Merge "Add DB mappings with NSX logical switches"

This commit is contained in:
Jenkins 2014-02-12 23:32:48 +00:00 committed by Gerrit Code Review
commit 4d63a13681
12 changed files with 319 additions and 95 deletions

View File

@ -0,0 +1,59 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_switch_mappings
Revision ID: 3d3cb89d84ee
Revises: 1421183d533f
Create Date: 2014-01-07 15:37:41.323020
"""
# revision identifiers, used by Alembic.
revision = '3d3cb89d84ee'
down_revision = '1421183d533f'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Create table for network mappings
op.create_table(
'neutron_nsx_network_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'],
ondelete='CASCADE'),
# There might be multiple switches for a neutron network
sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'),
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('neutron_nsx_network_mappings')

View File

@ -22,6 +22,7 @@
import logging import logging
import os import os
import uuid
from oslo.config import cfg from oslo.config import cfg
from sqlalchemy import exc as sql_exc from sqlalchemy import exc as sql_exc
@ -394,9 +395,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
allow_extra_lswitches = True allow_extra_lswitches = True
break break
try: try:
return self._handle_lswitch_selection(self.cluster, network, return self._handle_lswitch_selection(
network_bindings, max_ports, context, self.cluster, network, network_bindings,
allow_extra_lswitches) max_ports, allow_extra_lswitches)
except NvpApiClient.NvpApiException: except NvpApiClient.NvpApiException:
err_desc = _("An exception occurred while selecting logical " err_desc = _("An exception occurred while selecting logical "
"switch for the port") "switch for the port")
@ -823,12 +824,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
pnet.SEGMENTATION_ID: binding.vlan_id} pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings] for binding in bindings]
def _handle_lswitch_selection(self, cluster, network, def _handle_lswitch_selection(self, context, cluster, network,
network_bindings, max_ports, network_bindings, max_ports,
allow_extra_lswitches): allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id) lswitches = nsx_utils.fetch_nsx_switches(
context.session, cluster, network.id)
try: try:
# TODO(salvatore-orlando) find main_ls too!
return [ls for ls in lswitches return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus'] if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0) ['lport_count'] < max_ports)].pop(0)
@ -837,23 +838,35 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.debug(_("No switch has available ports (%d checked)"), LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches)) len(lswitches))
if allow_extra_lswitches: if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id] # The 'main' logical switch is either the only one available
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags']) # or the one where the 'multi_lswitch' tag was set
if 'multi_lswitch' not in tag_dict: while lswitches:
tags = main_ls[0]['tags'] main_ls = lswitches.pop(0)
tag_dict = dict((x['scope'], x['tag'])
for x in main_ls['tags'])
if 'multi_lswitch' in tag_dict:
break
else:
# by construction this statement is hit if there is only one
# logical switch and the multi_lswitch tag has not been set.
# The tag must therefore be added.
tags = main_ls['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'}) tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster, nvplib.update_lswitch(cluster,
main_ls[0]['uuid'], main_ls['uuid'],
main_ls[0]['display_name'], main_ls['display_name'],
network['tenant_id'], network['tenant_id'],
tags=tags) tags=tags)
transport_zone_config = self._convert_to_nvp_transport_zones( transport_zone_config = self._convert_to_nvp_transport_zones(
cluster, network, bindings=network_bindings) cluster, network, bindings=network_bindings)
selected_lswitch = nvplib.create_lswitch( selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id, cluster, network.id, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)), "%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config, transport_zone_config)
network.id) # add a mapping between the neutron network and the newly
# created logical switch
nicira_db.add_neutron_nsx_network_mapping(
context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch return selected_lswitch
else: else:
LOG.error(_("Maximum number of logical ports reached for " LOG.error(_("Maximum number of logical ports reached for "
@ -952,19 +965,21 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
transport_zone_config = self._convert_to_nvp_transport_zones( transport_zone_config = self._convert_to_nvp_transport_zones(
self.cluster, net_data) self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL) external = net_data.get(ext_net_extn.EXTERNAL)
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# network. This will be removed once the network create operation
# becomes an asynchronous task
net_data['id'] = str(uuid.uuid4())
if (not attr.is_attr_set(external) or if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external): attr.is_attr_set(external) and not external):
lswitch = nvplib.create_lswitch( lswitch = nvplib.create_lswitch(
self.cluster, tenant_id, net_data.get('name'), self.cluster, net_data['id'],
tenant_id, net_data.get('name'),
transport_zone_config, transport_zone_config,
shared=net_data.get(attr.SHARED)) shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True): with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context, new_net = super(NvpPluginV2, self).create_network(context,
network) network)
# Ensure there's an id in net_data
net_data['id'] = new_net['id']
# Process port security extension # Process port security extension
self._process_network_port_security_create( self._process_network_port_security_create(
context, net_data, new_net) context, net_data, new_net)
@ -977,7 +992,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self.get_qos_queue(context, net_queue_id) self.get_qos_queue(context, net_queue_id)
self._process_network_queue_mapping( self._process_network_queue_mapping(
context, new_net, net_queue_id) context, new_net, net_queue_id)
# Add mapping between neutron network and NSX switch
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nicira_db.add_neutron_nsx_network_mapping(
context.session, new_net['id'],
lswitch['uuid'])
if (net_data.get(mpnet.SEGMENTS) and if (net_data.get(mpnet.SEGMENTS) and
isinstance(provider_type, bool)): isinstance(provider_type, bool)):
net_bindings = [] net_bindings = []
@ -1007,7 +1027,11 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
for port in router_iface_ports: for port in router_iface_ports:
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id( nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id) context.session, self.cluster, id)
# Before removing entry from Neutron DB, retrieve NSX switch
# identifiers for removing them from backend
if not external:
lswitch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
super(NvpPluginV2, self).delete_network(context, id) super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports # clean up network owned ports
for port in router_iface_ports: for port in router_iface_ports:
@ -1036,8 +1060,6 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Do not go to NVP for external networks # Do not go to NVP for external networks
if not external: if not external:
try: try:
lswitch_ids = [ls['uuid'] for ls in
nvplib.get_lswitches(self.cluster, id)]
nvplib.delete_networks(self.cluster, id, lswitch_ids) nvplib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"), LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id) context.tenant_id)

View File

@ -1604,16 +1604,12 @@ class VcnsCallbacks(object):
def _process_base_create_lswitch_args(*args, **kwargs): def _process_base_create_lswitch_args(*args, **kwargs):
tags = [{"tag": nvplib.NEUTRON_VERSION, "scope": "quantum"}] tags = [{"tag": nvplib.NEUTRON_VERSION, "scope": "quantum"}]
if args[1]: tags.append({"tag": args[1],
tags.append({"tag": args[1], "scope": "os_tid"}) "scope": "quantum_net_id"})
switch_name = args[2] if args[2]:
tz_config = args[3] tags.append({"tag": args[2], "scope": "os_tid"})
if "neutron_net_id" in kwargs or len(args) >= 5: switch_name = args[3]
neutron_net_id = kwargs.get('neutron_net_id') tz_config = args[4]
if neutron_net_id is None:
neutron_net_id = args[4]
tags.append({"tag": neutron_net_id,
"scope": "quantum_net_id"})
if kwargs.get("shared", False) or len(args) >= 6: if kwargs.get("shared", False) or len(args) >= 6:
tags.append({"tag": "true", "scope": "shared"}) tags.append({"tag": "true", "scope": "shared"})
if kwargs.get("tags"): if kwargs.get("tags"):

View File

@ -25,6 +25,54 @@ from neutron.plugins.nicira import nvplib
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
def fetch_nsx_switches(session, cluster, neutron_net_id):
"""Retrieve logical switches for a neutron network.
This function is optimized for fetching all the lswitches always
with a single NSX query.
If there is more than 1 logical switch (chained switches use case)
NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX
lswitch is directly retrieved by id (more efficient).
"""
nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
if len(nsx_switch_ids) > 1:
lswitches = nvplib.get_lswitches(cluster, neutron_net_id)
else:
lswitches = [nvplib.get_lswitch_by_id(
cluster, nsx_switch_ids[0])]
return lswitches
def get_nsx_switch_ids(session, cluster, neutron_network_id):
"""Return the NSX switch id for a given neutron network.
First lookup for mappings in Neutron database. If no mapping is
found, query the NSX backend and add the mappings.
"""
nsx_switch_ids = nicira_db.get_nsx_switch_ids(
session, neutron_network_id)
if not nsx_switch_ids:
# Find logical switches from backend.
# This is a rather expensive query, but it won't be executed
# more than once for each network in Neutron's lifetime
nsx_switches = nvplib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches:
LOG.warn(_("Unable to find NSX switches for Neutron network %s"),
neutron_network_id)
return
nsx_switch_ids = []
with session.begin(subtransactions=True):
for nsx_switch in nsx_switches:
nsx_switch_id = nsx_switch['uuid']
nsx_switch_ids.append(nsx_switch_id)
# Create DB mapping
nicira_db.add_neutron_nsx_network_mapping(
session,
neutron_network_id,
nsx_switch_id)
return nsx_switch_ids
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
"""Return the NSX switch and port uuids for a given neutron port. """Return the NSX switch and port uuids for a given neutron port.

View File

@ -249,8 +249,9 @@ class NvpSynchronizer():
if not lswitches: if not lswitches:
# Try to get logical switches from nvp # Try to get logical switches from nvp
try: try:
lswitches = nvplib.get_lswitches( lswitches = nsx_utils.fetch_nsx_switches(
self._cluster, neutron_network_data['id']) context.session, self._cluster,
neutron_network_data['id'])
except exceptions.NetworkNotFound: except exceptions.NetworkNotFound:
# TODO(salv-orlando): We should be catching # TODO(salv-orlando): We should be catching
# NvpApiClient.ResourceNotFound here # NvpApiClient.ResourceNotFound here

View File

@ -48,6 +48,14 @@ def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
return binding return binding
def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id):
with session.begin(subtransactions=True):
mapping = nicira_models.NeutronNsxNetworkMapping(
neutron_id=neutron_id, nsx_id=nsx_switch_id)
session.add(mapping)
return mapping
def add_neutron_nsx_port_mapping(session, neutron_id, def add_neutron_nsx_port_mapping(session, neutron_id,
nsx_switch_id, nsx_port_id): nsx_switch_id, nsx_port_id):
session.begin(subtransactions=True) session.begin(subtransactions=True)
@ -74,6 +82,14 @@ def add_neutron_nsx_port_mapping(session, neutron_id,
return mapping return mapping
def get_nsx_switch_ids(session, neutron_id):
# This function returns a list of NSX switch identifiers because of
# the possibility of chained logical switches
return [mapping['nsx_id'] for mapping in
session.query(nicira_models.NeutronNsxNetworkMapping).filter_by(
neutron_id=neutron_id)]
def get_nsx_switch_and_port_id(session, neutron_id): def get_nsx_switch_and_port_id(session, neutron_id):
try: try:
mapping = (session.query(nicira_models.NeutronNsxPortMapping). mapping = (session.query(nicira_models.NeutronNsxPortMapping).

View File

@ -57,6 +57,19 @@ class NvpNetworkBinding(model_base.BASEV2):
self.vlan_id) self.vlan_id)
class NeutronNsxNetworkMapping(model_base.BASEV2):
"""Maps neutron network identifiers to NSX identifiers.
Because of chained logical switches more than one mapping might exist
for a single Neutron network.
"""
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = Column(String(36),
ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = Column(String(36), primary_key=True)
class NeutronNsxPortMapping(model_base.BASEV2): class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nvp port uuids.""" """Represents the mapping between neutron and nvp port uuids."""

View File

@ -202,7 +202,29 @@ def get_all_query_pages(path, c):
# ------------------------------------------------------------------- # -------------------------------------------------------------------
# Network functions # Network functions
# ------------------------------------------------------------------- # -------------------------------------------------------------------
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id): def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id, lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus") relations="LogicalSwitchStatus")
results = [] results = []
@ -212,33 +234,30 @@ def get_lswitches(cluster, neutron_net_id):
for tag in ls['tags']: for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"): tag['tag'] == "True"):
# Fetch extra logical switches results.extend(lookup_switches_by_tag())
extra_lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
extra_switches = get_all_query_pages(extra_lswitch_uri_path,
cluster)
results.extend(extra_switches)
return results
except exception.NotFound: except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id) raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, tenant_id, display_name, def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config, transport_zones_config,
neutron_net_id=None,
shared=None, shared=None,
**kwargs): **kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name), lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config, "transport_zones": transport_zones_config,
"tags": [{"tag": tenant_id, "scope": "os_tid"}, "tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": neutron_net_id, "scope": "quantum_net_id"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]} {"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if neutron_net_id: # TODO(salv-orlando): Now that we have async status synchronization
lswitch_obj["tags"].append({"tag": neutron_net_id, # this tag is perhaps not needed anymore
"scope": "quantum_net_id"})
if shared: if shared:
lswitch_obj["tags"].append({"tag": "true", lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"}) "scope": "shared"})

View File

@ -259,12 +259,14 @@ class TestProxyCreateLswitch(base.BaseTestCase):
] ]
self.tags = [ self.tags = [
{'scope': 'quantum', 'tag': nvplib.NEUTRON_VERSION}, {'scope': 'quantum', 'tag': nvplib.NEUTRON_VERSION},
{'scope': 'quantum_net_id', 'tag': 'foo_id'},
{'scope': 'os_tid', 'tag': self.tenant_id} {'scope': 'os_tid', 'tag': self.tenant_id}
] ]
self.cluster = None self.cluster = None
def test_create_lswitch_with_basic_args(self): def test_create_lswitch_with_basic_args(self):
result = nsp._process_base_create_lswitch_args(self.cluster, result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id, self.tenant_id,
self.display_name, self.display_name,
self.tz_config) self.tz_config)
@ -272,26 +274,9 @@ class TestProxyCreateLswitch(base.BaseTestCase):
self.assertEqual(self.tz_config, result[1]) self.assertEqual(self.tz_config, result[1])
self.assertEqual(self.tags, result[2]) self.assertEqual(self.tags, result[2])
def test_create_lswitch_with_neutron_net_id_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
self.tenant_id,
self.display_name,
self.tz_config,
neutron_net_id='foo')
expected = self.tags + [{'scope': 'quantum_net_id', 'tag': 'foo'}]
self.assertEqual(expected, result[2])
def test_create_lswitch_with_neutron_net_id_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster,
self.tenant_id,
self.display_name,
self.tz_config,
'foo')
expected = self.tags + [{'scope': 'quantum_net_id', 'tag': 'foo'}]
self.assertEqual(expected, result[2])
def test_create_lswitch_with_shared_as_kwarg(self): def test_create_lswitch_with_shared_as_kwarg(self):
result = nsp._process_base_create_lswitch_args(self.cluster, result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id, self.tenant_id,
self.display_name, self.display_name,
self.tz_config, self.tz_config,
@ -301,19 +286,19 @@ class TestProxyCreateLswitch(base.BaseTestCase):
def test_create_lswitch_with_shared_as_arg(self): def test_create_lswitch_with_shared_as_arg(self):
result = nsp._process_base_create_lswitch_args(self.cluster, result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id, self.tenant_id,
self.display_name, self.display_name,
self.tz_config, self.tz_config,
'foo',
True) True)
additional_tags = [{'scope': 'quantum_net_id', 'tag': 'foo'}, additional_tags = [{'scope': 'shared', 'tag': 'true'}]
{'scope': 'shared', 'tag': 'true'}]
expected = self.tags + additional_tags expected = self.tags + additional_tags
self.assertEqual(expected, result[2]) self.assertEqual(expected, result[2])
def test_create_lswitch_with_additional_tags(self): def test_create_lswitch_with_additional_tags(self):
more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}] more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}]
result = nsp._process_base_create_lswitch_args(self.cluster, result = nsp._process_base_create_lswitch_args(self.cluster,
'foo_id',
self.tenant_id, self.tenant_id,
self.display_name, self.display_name,
self.tz_config, self.tz_config,

View File

@ -39,6 +39,17 @@ class NsxUtilsTestCase(base.BaseTestCase):
module_name='dbexts.nicira_db')).start() module_name='dbexts.nicira_db')).start()
self.addCleanup(mock.patch.stopall) self.addCleanup(mock.patch.stopall)
def _mock_network_mapping_db_calls(self, ret_value):
# Mock relevant db calls
# This will allow for avoiding setting up the plugin
# for creating db entries
mock.patch(nicira_method('get_nsx_switch_ids',
module_name='dbexts.nicira_db'),
return_value=ret_value).start()
mock.patch(nicira_method('add_neutron_nsx_network_mapping',
module_name='dbexts.nicira_db')).start()
self.addCleanup(mock.patch.stopall)
def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid): def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
# The nvplib and db calls are mocked, therefore the cluster # The nvplib and db calls are mocked, therefore the cluster
# and the neutron_port_id parameters can be set to None # and the neutron_port_id parameters can be set to None
@ -47,6 +58,16 @@ class NsxUtilsTestCase(base.BaseTestCase):
self.assertEqual(exp_ls_uuid, ls_uuid) self.assertEqual(exp_ls_uuid, ls_uuid)
self.assertEqual(exp_lp_uuid, lp_uuid) self.assertEqual(exp_lp_uuid, lp_uuid)
def _verify_get_nsx_switch_ids(self, exp_ls_uuids):
# The nvplib and db calls are mocked, therefore the cluster
# and the neutron_router_id parameters can be set to None
ls_uuids = nsx_utils.get_nsx_switch_ids(
db_api.get_session(), None, None)
for ls_uuid in ls_uuids or []:
self.assertIn(ls_uuid, exp_ls_uuids)
exp_ls_uuids.remove(ls_uuid)
self.assertFalse(exp_ls_uuids)
def test_get_nsx_switch_and_port_id_from_db_mappings(self): def test_get_nsx_switch_and_port_id_from_db_mappings(self):
# This test is representative of the 'standard' case in which both the # This test is representative of the 'standard' case in which both the
# switch and the port mappings were stored in the neutron db # switch and the port mappings were stored in the neutron db
@ -94,3 +115,28 @@ class NsxUtilsTestCase(base.BaseTestCase):
with mock.patch(nicira_method('query_lswitch_lports'), with mock.patch(nicira_method('query_lswitch_lports'),
return_value=[]): return_value=[]):
self._verify_get_nsx_switch_and_port_id(None, None) self._verify_get_nsx_switch_and_port_id(None, None)
def test_get_nsx_switch_ids_from_db_mappings(self):
# This test is representative of the 'standard' case in which the
# lswitch mappings were stored in the neutron db
exp_ls_uuids = [uuidutils.generate_uuid()]
self._mock_network_mapping_db_calls(exp_ls_uuids)
self._verify_get_nsx_switch_ids(exp_ls_uuids)
def test_get_nsx_switch_ids_no_db_mapping(self):
# This test is representative of the case where db mappings where not
# found for a given network identifier
exp_ls_uuids = [uuidutils.generate_uuid()]
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
return_value=[{'uuid': uuid}
for uuid in exp_ls_uuids]):
self._verify_get_nsx_switch_ids(exp_ls_uuids)
def test_get_nsx_switch_ids_no_mapping_returns_None(self):
# This test verifies that the function returns None if the mappings
# are not found both in the db and in the backend
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
return_value=[]):
self._verify_get_nsx_switch_ids(None)

View File

@ -379,7 +379,9 @@ class NvpSyncTestCase(base.BaseTestCase):
def _test_sync(self, exp_net_status, def _test_sync(self, exp_net_status,
exp_port_status, exp_router_status, exp_port_status, exp_router_status,
action_callback=None, sp=None): action_callback=None, sp=None):
neutron_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0] ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
neutron_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0] lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
neutron_port_id = self._get_tag_dict( neutron_port_id = self._get_tag_dict(
self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id'] self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
@ -540,7 +542,9 @@ class NvpSyncTestCase(base.BaseTestCase):
ctx = context.get_admin_context() ctx = context.get_admin_context()
with self._populate_data(ctx): with self._populate_data(ctx):
# Put a network down to verify synchronization # Put a network down to verify synchronization
q_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0] ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin._get_network(ctx, q_net_id) q_net_data = self._plugin._get_network(ctx, q_net_id)
self._plugin._synchronizer.synchronize_network(ctx, q_net_data) self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
@ -558,7 +562,9 @@ class NvpSyncTestCase(base.BaseTestCase):
ctx = context.get_admin_context() ctx = context.get_admin_context()
with self._populate_data(ctx): with self._populate_data(ctx):
# Put a network down to verify punctual synchronization # Put a network down to verify punctual synchronization
q_net_id = ls_uuid = self.fc._fake_lswitch_dict.keys()[0] ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin.get_network(ctx, q_net_id) q_net_data = self._plugin.get_network(ctx, q_net_id)
self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status']) self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])

View File

@ -262,7 +262,7 @@ class TestNvplibL2Gateway(NvplibTestCase):
node_uuid = _uuid() node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, lswitch = nvplib.create_lswitch(self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config) 'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid'] gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = nvplib.create_lport(self.fake_cluster, lport = nvplib.create_lport(self.fake_cluster,
@ -294,6 +294,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, tenant_id,
'fake-switch', 'fake-switch',
transport_zones_config) transport_zones_config)
@ -309,6 +310,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, tenant_id,
_uuid(),
'*' * 50, '*' * 50,
transport_zones_config) transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
@ -321,28 +323,36 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
tenant_id = 'pippo' tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
network_id = _uuid()
main_lswitch = nvplib.create_lswitch( main_lswitch = nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch', self.fake_cluster, network_id,
transport_zones_config, tenant_id, 'fake-switch', transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}]) tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch # Create secondary lswitch
nvplib.create_lswitch( second_lswitch = nvplib.create_lswitch(
self.fake_cluster, tenant_id, 'fake-switch-2', self.fake_cluster, network_id,
transport_zones_config, tenant_id, 'fake-switch-2', transport_zones_config)
neutron_net_id=main_lswitch['uuid'])
res_lswitch = nvplib.get_lswitches(self.fake_cluster, res_lswitch = nvplib.get_lswitches(self.fake_cluster,
main_lswitch['uuid']) network_id)
self.assertEqual(len(res_lswitch), 2) self.assertEqual(len(res_lswitch), 2)
self.assertEqual(res_lswitch[0]['uuid'], switch_uuids = [ls['uuid'] for ls in res_lswitch]
main_lswitch['uuid']) self.assertIn(main_lswitch['uuid'], switch_uuids)
switch_1_tags = self._build_tag_dict(res_lswitch[0]['tags']) self.assertIn(second_lswitch['uuid'], switch_uuids)
switch_2_tags = self._build_tag_dict(res_lswitch[1]['tags']) for ls in res_lswitch:
self.assertIn('multi_lswitch', switch_1_tags) if ls['uuid'] == main_lswitch['uuid']:
self.assertNotIn('multi_lswitch', switch_2_tags) main_ls = ls
self.assertNotIn('quantum_net_id', switch_1_tags) else:
self.assertIn('quantum_net_id', switch_2_tags) second_ls = ls
self.assertEqual(switch_2_tags['quantum_net_id'], main_ls_tags = self._build_tag_dict(main_ls['tags'])
main_lswitch['uuid']) second_ls_tags = self._build_tag_dict(second_ls['tags'])
self.assertIn('multi_lswitch', main_ls_tags)
self.assertNotIn('multi_lswitch', second_ls_tags)
self.assertIn('quantum_net_id', main_ls_tags)
self.assertIn('quantum_net_id', second_ls_tags)
self.assertEqual(main_ls_tags['quantum_net_id'],
network_id)
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def test_update_lswitch(self): def test_update_lswitch(self):
new_name = 'new-name' new_name = 'new-name'
@ -350,6 +360,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo', 'pippo',
'fake-switch', 'fake-switch',
transport_zones_config) transport_zones_config)
@ -373,6 +384,7 @@ class TestNvplibLogicalSwitches(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo', 'pippo',
'fake-switch', 'fake-switch',
transport_zones_config) transport_zones_config)
@ -933,6 +945,7 @@ class TestNvplibLogicalRouters(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch', tenant_id, 'fake-switch',
transport_zones_config) transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'], lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
@ -1310,7 +1323,7 @@ class TestNvplibLogicalPorts(NvplibTestCase):
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id, 'fake-switch', _uuid(), tenant_id, 'fake-switch',
transport_zones_config) transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'], lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id, tenant_id, neutron_port_id,
@ -1349,7 +1362,7 @@ class TestNvplibLogicalPorts(NvplibTestCase):
neutron_port_id = 'whatever' neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(), transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}] 'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config) 'fake-switch', transport_zones_config)
lport = nvplib.get_port_by_neutron_tag(self.fake_cluster, lport = nvplib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'], lswitch['uuid'],