Add multiple provider network extension

The following commit adds the ability to associate multiple
different provider networks on a single network.

Implements blueprint map-networks-to-multiple-provider-networks

Change-Id: I3c70fb2426899f728a401566debab7f66e7246bc
This commit is contained in:
Aaron Rosen 2013-06-19 16:09:05 -07:00
parent 589071a1fb
commit d16e185d34
9 changed files with 582 additions and 129 deletions

View File

@ -21,16 +21,19 @@
"create_network": "", "create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external", "get_network": "rule:admin_or_owner or rule:shared or rule:external",
"get_network:router:external": "rule:regular_user", "get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only", "create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only", "create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner", "update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only",

View File

@ -0,0 +1,102 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add multiprovider
Revision ID: 3c6e57a23db4
Revises: 86cf4d88bd3
Create Date: 2013-07-10 12:43:35.769283
"""
# revision identifiers, used by Alembic.
revision = '3c6e57a23db4'
down_revision = '86cf4d88bd3'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'nvp_multi_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'),
mysql_engine='InnoDB'
)
op.create_table('rename_nvp_network_bindings',
sa.Column('network_id', sa.String(length=36),
primary_key=True),
sa.Column('binding_type',
sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name=(
'nvp_network_bindings_binding_type')),
nullable=False, primary_key=True),
sa.Column('phy_uuid', sa.String(36), primary_key=True,
nullable=True),
sa.Column('vlan_id', sa.Integer, primary_key=True,
nullable=True, autoincrement=False))
# copy data from nvp_network_bindings into rename_nvp_network_bindings
op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, "
"binding_type, phy_uuid, vlan_id from nvp_network_bindings")
op.drop_table('nvp_network_bindings')
op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Delete the multi_provider_network entries from nvp_network_bindings
op.execute("DELETE from nvp_network_bindings WHERE network_id IN "
"(SELECT network_id from nvp_multi_provider_networks)")
# create table with previous contains
op.create_table('rename_nvp_network_bindings',
sa.Column('network_id', sa.String(length=36),
primary_key=True),
sa.Column('binding_type',
sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name=(
'nvp_network_bindings_binding_type')),
nullable=False),
sa.Column('phy_uuid', sa.String(36),
nullable=True),
sa.Column('vlan_id', sa.Integer,
nullable=True, autoincrement=False))
# copy data from nvp_network_bindings into rename_nvp_network_bindings
op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, "
"binding_type, phy_uuid, vlan_id from nvp_network_bindings")
op.drop_table('nvp_network_bindings')
op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings')
op.drop_table('nvp_multi_provider_networks')

View File

@ -0,0 +1,116 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as qexception
from neutron.extensions import providernet as pnet
SEGMENTS = 'segments'
class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput):
message = _("Segments and provider values cannot both be set.")
class SegmentsContainDuplicateEntry(qexception.InvalidInput):
message = _("Duplicate segment entry in request.")
def _convert_and_validate_segments(segments, valid_values=None):
unique = set()
for segment in segments:
unique.add(tuple(segment.iteritems()))
network_type = segment.get(pnet.NETWORK_TYPE,
attr.ATTR_NOT_SPECIFIED)
segment[pnet.NETWORK_TYPE] = network_type
physical_network = segment.get(pnet.PHYSICAL_NETWORK,
attr.ATTR_NOT_SPECIFIED)
segment[pnet.PHYSICAL_NETWORK] = physical_network
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
if segmentation_id:
segment[pnet.SEGMENTATION_ID] = attr.convert_to_int(
segmentation_id)
else:
segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED
if len(segment.keys()) != 3:
msg = (_("Unrecognized attribute(s) '%s'") %
', '.join(set(segment.keys()) -
set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID])))
raise webob.exc.HTTPBadRequest(msg)
if len(unique) != len(segments):
raise SegmentsContainDuplicateEntry()
attr.validators['type:convert_segments'] = (
_convert_and_validate_segments)
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
SEGMENTS: {'allow_post': True, 'allow_put': True,
'validate': {'type:convert_segments': None},
'convert_list_to': attr.convert_kvp_list_to_dict,
'default': attr.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Multiprovidernet(extensions.ExtensionDescriptor):
"""Extension class supporting multiple provider networks.
This class is used by neutron's extension framework to make
metadata about the multiple provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will also include
provider attributes.
"""
@classmethod
def get_name(cls):
return "Multi Provider Network"
@classmethod
def get_alias(cls):
return "multi-provider"
@classmethod
def get_description(cls):
return ("Expose mapping of virtual networks to multiple physical "
"networks")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/multi-provider/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-06-27T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}

View File

@ -52,6 +52,7 @@ from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db from neutron.db import securitygroups_db
from neutron.extensions import extraroute from neutron.extensions import extraroute
from neutron.extensions import l3 from neutron.extensions import l3
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings as pbin from neutron.extensions import portbindings as pbin
from neutron.extensions import portsecurity as psec from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as pnet from neutron.extensions import providernet as pnet
@ -91,6 +92,7 @@ class NetworkTypes:
GRE = 'gre' GRE = 'gre'
FLAT = 'flat' FLAT = 'flat'
VLAN = 'vlan' VLAN = 'vlan'
BRIDGE = 'bridge'
def create_nvp_cluster(cluster_opts, concurrent_connections, def create_nvp_cluster(cluster_opts, concurrent_connections,
@ -153,6 +155,7 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
"ext-gw-mode", "ext-gw-mode",
"extraroute", "extraroute",
"mac-learning", "mac-learning",
"multi-provider",
"network-gateway", "network-gateway",
"nvp-qos", "nvp-qos",
"port-security", "port-security",
@ -401,18 +404,19 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
def _nvp_find_lswitch_for_port(self, context, port_data): def _nvp_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id']) network = self._get_network(context, port_data['network_id'])
network_binding = nicira_db.get_network_binding( network_bindings = nicira_db.get_network_bindings(
context.session, port_data['network_id']) context.session, port_data['network_id'])
max_ports = self.nvp_opts.max_lp_per_overlay_ls max_ports = self.nvp_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False allow_extra_lswitches = False
if (network_binding and for network_binding in network_bindings:
network_binding.binding_type in (NetworkTypes.FLAT, if network_binding.binding_type in (NetworkTypes.FLAT,
NetworkTypes.VLAN)): NetworkTypes.VLAN):
max_ports = self.nvp_opts.max_lp_per_bridged_ls max_ports = self.nvp_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True allow_extra_lswitches = True
break
try: try:
return self._handle_lswitch_selection(self.cluster, network, return self._handle_lswitch_selection(self.cluster, network,
network_binding, max_ports, network_bindings, max_ports,
allow_extra_lswitches) allow_extra_lswitches)
except NvpApiClient.NvpApiException: except NvpApiClient.NvpApiException:
err_desc = _("An exception occured while selecting logical " err_desc = _("An exception occured while selecting logical "
@ -761,19 +765,17 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
nvp_exc.MaintenanceInProgress: nvp_exc.MaintenanceInProgress:
webob.exc.HTTPServiceUnavailable}) webob.exc.HTTPServiceUnavailable})
def _handle_provider_create(self, context, attrs): def _validate_provider_create(self, context, network):
# NOTE(salvatore-orlando): This method has been borrowed from if not attr.is_attr_set(network.get(mpnet.SEGMENTS)):
# the OpenvSwtich plugin, altough changed to match NVP specifics.
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
physical_network_set = attr.is_attr_set(physical_network)
segmentation_id_set = attr.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return return
for segment in network[mpnet.SEGMENTS]:
network_type = segment.get(pnet.NETWORK_TYPE)
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
segmentation_id_set = attr.is_attr_set(segmentation_id)
err_msg = None err_msg = None
if not network_type_set: if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE err_msg = _("%s required") % pnet.NETWORK_TYPE
@ -795,10 +797,11 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
'max_id': constants.MAX_VLAN_TAG}) 'max_id': constants.MAX_VLAN_TAG})
else: else:
# Verify segment is not already allocated # Verify segment is not already allocated
binding = nicira_db.get_network_binding_by_vlanid( bindings = nicira_db.get_network_bindings_by_vlanid(
context.session, segmentation_id) context.session, segmentation_id)
if binding: if bindings:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id, raise q_exc.VlanIdInUse(
vlan_id=segmentation_id,
physical_network=physical_network) physical_network=physical_network)
elif network_type == NetworkTypes.L3_EXT: elif network_type == NetworkTypes.L3_EXT:
if (segmentation_id_set and if (segmentation_id_set and
@ -809,28 +812,42 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
'min_id': constants.MIN_VLAN_TAG, 'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG}) 'max_id': constants.MAX_VLAN_TAG})
else: else:
err_msg = _("%(net_type_param)s %(net_type_value)s not " err_msg = (_("%(net_type_param)s %(net_type_value)s not "
"supported") % {'net_type_param': pnet.NETWORK_TYPE, "supported") %
'net_type_value': network_type} {'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type})
if err_msg: if err_msg:
raise q_exc.InvalidInput(error_message=err_msg) raise q_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid # TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network # which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None): def _extend_network_dict_provider(self, context, network,
if not binding: multiprovider=None, bindings=None):
binding = nicira_db.get_network_binding(context.session, if not bindings:
bindings = nicira_db.get_network_bindings(context.session,
network['id'])
if not multiprovider:
multiprovider = nicira_db.is_multiprovider_network(context.session,
network['id']) network['id'])
# With NVP plugin 'normal' overlay networks will have no binding # With NVP plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct # TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type # phy_uuid as 'provider network' for STT net type
if binding: if bindings:
network[pnet.NETWORK_TYPE] = binding.binding_type if not multiprovider:
network[pnet.PHYSICAL_NETWORK] = binding.phy_uuid # network came in through provider networks api
network[pnet.SEGMENTATION_ID] = binding.vlan_id network[pnet.NETWORK_TYPE] = bindings[0].binding_type
network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
else:
# network come in though multiprovider networks api
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: binding.binding_type,
pnet.PHYSICAL_NETWORK: binding.phy_uuid,
pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings]
def _handle_lswitch_selection(self, cluster, network, def _handle_lswitch_selection(self, cluster, network,
network_binding, max_ports, network_bindings, max_ports,
allow_extra_lswitches): allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id) lswitches = nvplib.get_lswitches(cluster, network.id)
try: try:
@ -853,12 +870,12 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
main_ls[0]['display_name'], main_ls[0]['display_name'],
network['tenant_id'], network['tenant_id'],
tags=tags) tags=tags)
transport_zone_config = self._convert_to_nvp_transport_zones(
cluster, bindings=network_bindings)
selected_lswitch = nvplib.create_lswitch( selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id, cluster, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)), "%s-ext-%s" % (network.name, len(lswitches)),
network_binding.binding_type, transport_zone_config,
network_binding.phy_uuid,
network_binding.vlan_id,
network.id) network.id)
return selected_lswitch return selected_lswitch
else: else:
@ -878,12 +895,86 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
# Consume from all consumers in a thread # Consume from all consumers in a thread
self.conn.consume_in_thread() self.conn.consume_in_thread()
def _convert_to_nvp_transport_zones(self, cluster, network=None,
bindings=None):
nvp_transport_zones_config = []
# Convert fields from provider request to nvp format
if (network and not attr.is_attr_set(
network.get(mpnet.SEGMENTS))):
return [{"zone_uuid": cluster.default_tz_uuid,
"transport_type": cfg.CONF.NVP.default_transport_type}]
# Convert fields from db to nvp format
if bindings:
transport_entry = {}
for binding in bindings:
if binding.binding_type in [NetworkTypes.FLAT,
NetworkTypes.VLAN]:
transport_entry['transport_type'] = NetworkTypes.BRIDGE
transport_entry['binding_config'] = {}
vlan_id = binding.vlan_id
if vlan_id:
transport_entry['binding_config'] = (
{'vlan_translation': [{'transport': vlan_id}]})
else:
transport_entry['transport_type'] = binding.binding_type
transport_entry['zone_uuid'] = binding.phy_uuid
nvp_transport_zones_config.append(transport_entry)
return nvp_transport_zones_config
for transport_zone in network.get(mpnet.SEGMENTS):
for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED:
transport_zone[value] = None
transport_entry = {}
transport_type = transport_zone.get(pnet.NETWORK_TYPE)
if transport_type in [NetworkTypes.FLAT, NetworkTypes.VLAN]:
transport_entry['transport_type'] = NetworkTypes.BRIDGE
transport_entry['binding_config'] = {}
vlan_id = transport_zone.get(pnet.SEGMENTATION_ID)
if vlan_id:
transport_entry['binding_config'] = (
{'vlan_translation': [{'transport': vlan_id}]})
else:
transport_entry['transport_type'] = transport_type
transport_entry['zone_uuid'] = (
transport_zone[pnet.PHYSICAL_NETWORK] or
cluster.deafult_tz_uuid)
nvp_transport_zones_config.append(transport_entry)
return nvp_transport_zones_config
def _convert_to_transport_zones_dict(self, network):
"""Converts the provider request body to multiprovider.
Returns: True if request is multiprovider False if provider
and None if neither.
"""
if any(attr.is_attr_set(network.get(f))
for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)):
if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
raise mpnet.SegmentsSetInConjunctionWithProviders()
# convert to transport zone list
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
del network[pnet.NETWORK_TYPE]
del network[pnet.PHYSICAL_NETWORK]
del network[pnet.SEGMENTATION_ID]
return False
if attr.is_attr_set(mpnet.SEGMENTS):
return True
def create_network(self, context, network): def create_network(self, context, network):
net_data = network['network'] net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data) tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id) self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension # Process the provider network extension
self._handle_provider_create(context, net_data) provider_type = self._convert_to_transport_zones_dict(net_data)
self._validate_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP # Replace ATTR_NOT_SPECIFIED with None before sending to NVP
for key, value in network['network'].iteritems(): for key, value in network['network'].iteritems():
if value is attr.ATTR_NOT_SPECIFIED: if value is attr.ATTR_NOT_SPECIFIED:
@ -893,16 +984,14 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
LOG.warning(_("Network with admin_state_up=False are not yet " LOG.warning(_("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for " "supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>')) "network %s"), net_data.get('name', '<unknown>'))
transport_zone_config = self._convert_to_nvp_transport_zones(
self.cluster, net_data)
external = net_data.get(l3.EXTERNAL) external = net_data.get(l3.EXTERNAL)
if (not attr.is_attr_set(external) or if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external): attr.is_attr_set(external) and not external):
nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
if nvp_binding_type in ('flat', 'vlan'):
nvp_binding_type = 'bridge'
lswitch = nvplib.create_lswitch( lswitch = nvplib.create_lswitch(
self.cluster, tenant_id, net_data.get('name'), self.cluster, tenant_id, net_data.get('name'),
nvp_binding_type, net_data.get(pnet.PHYSICAL_NETWORK), transport_zone_config,
net_data.get(pnet.SEGMENTATION_ID),
shared=net_data.get(attr.SHARED)) shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid'] net_data['id'] = lswitch['uuid']
@ -924,14 +1013,21 @@ class NvpPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
self._process_network_queue_mapping(context, new_net) self._process_network_queue_mapping(context, new_net)
self._extend_network_qos_queue(context, new_net) self._extend_network_qos_queue(context, new_net)
if net_data.get(pnet.NETWORK_TYPE): if (net_data.get(mpnet.SEGMENTS) and
net_binding = nicira_db.add_network_binding( isinstance(provider_type, bool)):
net_bindings = []
for tz in net_data[mpnet.SEGMENTS]:
net_bindings.append(nicira_db.add_network_binding(
context.session, new_net['id'], context.session, new_net['id'],
net_data.get(pnet.NETWORK_TYPE), tz.get(pnet.NETWORK_TYPE),
net_data.get(pnet.PHYSICAL_NETWORK), tz.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID, 0)) tz.get(pnet.SEGMENTATION_ID, 0)))
if provider_type:
nicira_db.set_multiprovider_network(context.session,
new_net['id'])
self._extend_network_dict_provider(context, new_net, self._extend_network_dict_provider(context, new_net,
net_binding) provider_type,
net_bindings)
self.schedule_network(context, new_net) self.schedule_network(context, new_net)
return new_net return new_net

View File

@ -25,26 +25,18 @@ from neutron.plugins.nicira.dbexts import nicira_networkgw_db
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def get_network_binding(session, network_id): def get_network_bindings(session, network_id):
session = session or db.get_session() session = session or db.get_session()
try: return (session.query(nicira_models.NvpNetworkBinding).
binding = (session.query(nicira_models.NvpNetworkBinding).
filter_by(network_id=network_id). filter_by(network_id=network_id).
one()) all())
return binding
except exc.NoResultFound:
return
def get_network_binding_by_vlanid(session, vlan_id): def get_network_bindings_by_vlanid(session, vlan_id):
session = session or db.get_session() session = session or db.get_session()
try: return (session.query(nicira_models.NvpNetworkBinding).
binding = (session.query(nicira_models.NvpNetworkBinding).
filter_by(vlan_id=vlan_id). filter_by(vlan_id=vlan_id).
one()) all())
return binding
except exc.NoResultFound:
return
def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
@ -88,3 +80,18 @@ def set_default_network_gateway(session, gw_id):
gw = (session.query(nicira_networkgw_db.NetworkGateway). gw = (session.query(nicira_networkgw_db.NetworkGateway).
filter_by(id=gw_id).one()) filter_by(id=gw_id).one())
gw['default'] = True gw['default'] = True
def set_multiprovider_network(session, network_id):
with session.begin(subtransactions=True):
multiprovider_network = nicira_models.MultiProviderNetworks(
network_id)
session.add(multiprovider_network)
return multiprovider_network
def is_multiprovider_network(session, network_id):
with session.begin(subtransactions=True):
return bool(
session.query(nicira_models.MultiProviderNetworks).filter_by(
network_id=network_id).first())

View File

@ -29,15 +29,18 @@ class NvpNetworkBinding(model_base.BASEV2):
""" """
__tablename__ = 'nvp_network_bindings' __tablename__ = 'nvp_network_bindings'
# TODO(arosen) - it might be worth while refactoring the how this data
# is stored later so every column does not need to be a primary key.
network_id = Column(String(36), network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"), ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True) primary_key=True)
# 'flat', 'vlan', stt' or 'gre' # 'flat', 'vlan', stt' or 'gre'
binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='nvp_network_bindings_binding_type'), name='nvp_network_bindings_binding_type'),
nullable=False) nullable=False, primary_key=True)
phy_uuid = Column(String(36)) phy_uuid = Column(String(36), primary_key=True, nullable=True)
vlan_id = Column(Integer) vlan_id = Column(Integer, primary_key=True, nullable=True,
autoincrement=False)
def __init__(self, network_id, binding_type, phy_uuid, vlan_id): def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
self.network_id = network_id self.network_id = network_id
@ -64,3 +67,15 @@ class NeutronNvpPortMapping(model_base.BASEV2):
def __init__(self, quantum_id, nvp_id): def __init__(self, quantum_id, nvp_id):
self.quantum_id = quantum_id self.quantum_id = quantum_id
self.nvp_id = nvp_id self.nvp_id = nvp_id
class MultiProviderNetworks(model_base.BASEV2):
"""Networks that were provision through multiprovider extension."""
__tablename__ = 'nvp_multi_provider_networks'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
def __init__(self, network_id):
self.network_id = network_id

View File

@ -24,8 +24,6 @@ import hashlib
import inspect import inspect
import json import json
from oslo.config import cfg
#FIXME(danwent): I'd like this file to get to the point where it has #FIXME(danwent): I'd like this file to get to the point where it has
# no neutron-specific logic in it # no neutron-specific logic in it
from neutron.common import constants from neutron.common import constants
@ -217,27 +215,14 @@ def get_lswitches(cluster, neutron_net_id):
def create_lswitch(cluster, tenant_id, display_name, def create_lswitch(cluster, tenant_id, display_name,
transport_type=None, transport_zones_config,
transport_zone_uuid=None,
vlan_id=None,
neutron_net_id=None, neutron_net_id=None,
shared=None, shared=None,
**kwargs): **kwargs):
nvp_binding_type = transport_type
if transport_type in ('flat', 'vlan'):
nvp_binding_type = 'bridge'
transport_zone_config = (
{"zone_uuid": (transport_zone_uuid or
cluster.default_tz_uuid),
"transport_type": (nvp_binding_type or
cfg.CONF.NVP.default_transport_type)})
lswitch_obj = {"display_name": _check_and_truncate_name(display_name), lswitch_obj = {"display_name": _check_and_truncate_name(display_name),
"transport_zones": [transport_zone_config], "transport_zones": transport_zones_config,
"tags": [{"tag": tenant_id, "scope": "os_tid"}, "tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]} {"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if nvp_binding_type == 'bridge' and vlan_id:
transport_zone_config["binding_config"] = {"vlan_translation":
[{"transport": vlan_id}]}
if neutron_net_id: if neutron_net_id:
lswitch_obj["tags"].append({"tag": neutron_net_id, lswitch_obj["tags"].append({"tag": neutron_net_id,
"scope": "quantum_net_id"}) "scope": "quantum_net_id"})

View File

@ -25,6 +25,7 @@ from neutron.common import exceptions as ntn_exc
import neutron.common.test_lib as test_lib import neutron.common.test_lib as test_lib
from neutron import context from neutron import context
from neutron.extensions import l3 from neutron.extensions import l3
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as secgrp from neutron.extensions import securitygroup as secgrp
@ -1232,3 +1233,105 @@ class TestNiciraNetworkGateway(test_l2_gw.NetworkGatewayDbTestCase,
def test_delete_network_gateway(self): def test_delete_network_gateway(self):
# The default gateway must still be there # The default gateway must still be there
self._test_delete_network_gateway(1) self._test_delete_network_gateway(1)
class TestNiciraMultiProviderNetworks(NiciraPluginV2TestCase):
def setUp(self, plugin=None):
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
super(TestNiciraMultiProviderNetworks, self).setUp()
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1)
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiple_provider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertTrue(provider_field not in network['network'])
tz = network['network'][mpnet.SEGMENTS][0]
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS][0]
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
def test_create_network_multprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'stt',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_create_network_duplicate_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(res.status_int, 400)

View File

@ -254,8 +254,10 @@ class TestNvplibL2Gateway(NvplibTestCase):
def test_plug_l2_gw_port_attachment(self): def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo' tenant_id = 'pippo'
node_uuid = _uuid() node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
'fake-switch') 'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid'] gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = nvplib.create_lport(self.fake_cluster, lport = nvplib.create_lport(self.fake_cluster,
lswitch['uuid'], lswitch['uuid'],
@ -283,9 +285,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
def test_create_and_get_lswitches_single(self): def test_create_and_get_lswitches_single(self):
tenant_id = 'pippo' tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, tenant_id,
'fake-switch') 'fake-switch',
transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
lswitch['uuid']) lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1) self.assertEqual(len(res_lswitch), 1)
@ -294,9 +299,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
def test_create_and_get_lswitches_single_name_exceeds_40_chars(self): def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
tenant_id = 'pippo' tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, tenant_id,
'*' * 50) '*' * 50,
transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
lswitch['uuid']) lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1) self.assertEqual(len(res_lswitch), 1)
@ -305,12 +313,16 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
def test_create_and_get_lswitches_multiple(self): def test_create_and_get_lswitches_multiple(self):
tenant_id = 'pippo' tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
main_lswitch = nvplib.create_lswitch( main_lswitch = nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch', self.fake_cluster, tenant_id, 'fake-switch',
transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}]) tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch # Create secondary lswitch
nvplib.create_lswitch( nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch-2', self.fake_cluster, tenant_id, 'fake-switch-2',
transport_zones_config,
neutron_net_id=main_lswitch['uuid']) neutron_net_id=main_lswitch['uuid'])
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
main_lswitch['uuid']) main_lswitch['uuid'])
@ -329,9 +341,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
def test_update_lswitch(self): def test_update_lswitch(self):
new_name = 'new-name' new_name = 'new-name'
new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}] new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}]
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
'pippo', 'pippo',
'fake-switch') 'fake-switch',
transport_zones_config)
nvplib.update_lswitch(self.fake_cluster, lswitch['uuid'], nvplib.update_lswitch(self.fake_cluster, lswitch['uuid'],
new_name, tags=new_tags) new_name, tags=new_tags)
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
@ -349,9 +364,12 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
'foo', 'bar') 'foo', 'bar')
def test_delete_networks(self): def test_delete_networks(self):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
'pippo', 'pippo',
'fake-switch') 'fake-switch',
transport_zones_config)
nvplib.delete_networks(self.fake_cluster, lswitch['uuid'], nvplib.delete_networks(self.fake_cluster, lswitch['uuid'],
[lswitch['uuid']]) [lswitch['uuid']])
self.assertRaises(exceptions.NotFound, self.assertRaises(exceptions.NotFound,
@ -842,8 +860,11 @@ class TestNvplibLogicalRouters(NvplibTestCase):
def test_plug_lrouter_port_patch_attachment(self): def test_plug_lrouter_port_patch_attachment(self):
tenant_id = 'pippo' tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, 'fake-switch') tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'], lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz', tenant_id, 'xyz',
'name', 'device_id', True) 'name', 'device_id', True)
@ -1215,8 +1236,11 @@ class TestNvplibLogicalPorts(NvplibTestCase):
def _create_switch_and_port(self, tenant_id='pippo', def _create_switch_and_port(self, tenant_id='pippo',
neutron_port_id='whatever'): neutron_port_id='whatever'):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, 'fake-switch') tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'], lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id, tenant_id, neutron_port_id,
'name', 'device_id', True) 'name', 'device_id', True)
@ -1252,8 +1276,10 @@ class TestNvplibLogicalPorts(NvplibTestCase):
def test_get_port_by_tag_not_found_returns_None(self): def test_get_port_by_tag_not_found_returns_None(self):
tenant_id = 'pippo' tenant_id = 'pippo'
neutron_port_id = 'whatever' neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
'fake-switch') 'fake-switch', transport_zones_config)
lport = nvplib.get_port_by_neutron_tag(self.fake_cluster, lport = nvplib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'], lswitch['uuid'],
neutron_port_id) neutron_port_id)