NSX: Remove logic for creating chained logical switches

This patch removes the logic for creating chained local switches in
the NSX backend for Neutron networks with flat or vlan bindings.
Switch chaining as a feature is no more useful since the underlying
backend limitation has been long removed.

Existing networks backed by chained switches will continue to work.
The configuration parameters concerning the maximum number of ports
per logical switch are still honoured for backward compatibility,
but these parameters will be marked for deprecation.

Closes-Bug: #1376037

Change-Id: I92b84677afa15d13085d59be9de8060bf876d85f
This commit is contained in:
Salvatore Orlando 2015-01-13 14:11:21 -08:00
parent c6ca2b56f5
commit 3fb04ed62b
2 changed files with 26 additions and 72 deletions

View File

@ -373,25 +373,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
def _nsx_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id'])
network_bindings = nsx_db.get_network_bindings(
context.session, port_data['network_id'])
max_ports = self.nsx_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False
for network_binding in network_bindings:
if network_binding.binding_type in (c_utils.NetworkTypes.FLAT,
c_utils.NetworkTypes.VLAN):
max_ports = self.nsx_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True
break
try:
return self._handle_lswitch_selection(
context, self.cluster, network, network_bindings,
max_ports, allow_extra_lswitches)
except api_exc.NsxApiException:
err_desc = _("An exception occurred while selecting logical "
"switch for the port")
LOG.exception(err_desc)
raise nsx_exc.NsxPluginException(err_msg=err_desc)
return self._handle_lswitch_selection(
context, self.cluster, network)
def _nsx_create_port_helper(self, session, ls_uuid, port_data,
do_port_security=True):
@ -844,53 +827,31 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
super(NsxPluginV2, self).extend_port_dict_binding(port_res, port_db)
port_res[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL
def _handle_lswitch_selection(self, context, cluster, network,
network_bindings, max_ports,
allow_extra_lswitches):
def _handle_lswitch_selection(self, context, cluster, network):
# NOTE(salv-orlando): This method used to select a NSX logical switch
# with an available port, and create a new logical switch if
# necessary. As there is no more need to perform switch chaining in
# NSX, the logic for creating a new logical switch has been removed.
max_ports = self.nsx_opts.max_lp_per_overlay_ls
network_bindings = nsx_db.get_network_bindings(
context.session, network['id'])
for network_binding in network_bindings:
if network_binding.binding_type in (c_utils.NetworkTypes.FLAT,
c_utils.NetworkTypes.VLAN):
max_ports = self.nsx_opts.max_lp_per_bridged_ls
# This is still necessary as there could be chained switches in
# the deployment and the code needs to find the first one with
# an available slot for a port
lswitches = nsx_utils.fetch_nsx_switches(
context.session, cluster, network.id)
context.session, cluster, network['id'])
try:
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
# Too bad, no switch where a port can be created
LOG.debug("No switch has available ports (%d checked)",
len(lswitches))
if allow_extra_lswitches:
# The 'main' logical switch is either the only one available
# or the one where the 'multi_lswitch' tag was set
while lswitches:
main_ls = lswitches.pop(0)
tag_dict = dict((x['scope'], x['tag'])
for x in main_ls['tags'])
if 'multi_lswitch' in tag_dict:
break
else:
# by construction this statement is hit if there is only one
# logical switch and the multi_lswitch tag has not been set.
# The tag must therefore be added.
tags = main_ls['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
switchlib.update_lswitch(cluster,
main_ls['uuid'],
main_ls['display_name'],
network['tenant_id'],
tags=tags)
transport_zone_config = self._convert_to_nsx_transport_zones(
cluster, network, bindings=network_bindings)
selected_lswitch = switchlib.create_lswitch(
cluster, network.id, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config)
# add a mapping between the neutron network and the newly
# created logical switch
nsx_db.add_neutron_nsx_network_mapping(
context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch
else:
LOG.error(_LE("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nsx_exc.NoMorePortsException(network=network.id)
def _convert_to_nsx_transport_zones(self, cluster, network=None,

View File

@ -131,9 +131,9 @@ class TestPortsV2(NsxPluginV2TestCase,
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
def test_exhaust_ports_overlay_network(self):
cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX')
def _test_exhaust_ports(self, providernet_args=None):
with self.network(name='testnet',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
@ -142,22 +142,15 @@ class TestPortsV2(NsxPluginV2TestCase,
# creating another port should see an exception
self._create_port('json', net['network']['id'], 400)
def test_exhaust_ports_overlay_network(self):
cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX')
self._test_exhaust_ports()
def test_exhaust_ports_bridged_network(self):
cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NSX")
providernet_args = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'tzuuid'}
with self.network(name='testnet',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub):
with self.port(subnet=sub):
plugin = manager.NeutronManager.get_plugin()
ls = nsxlib.switch.get_lswitches(plugin.cluster,
net['network']['id'])
self.assertEqual(len(ls), 2)
self._test_exhaust_ports(providernet_args=providernet_args)
def test_update_port_delete_ip(self):
# This test case overrides the default because the nsx plugin