From c5fa665de3173f3ad82cc3e7624b5968bc52c08d Mon Sep 17 00:00:00 2001 From: shihanzhang Date: Fri, 21 Aug 2015 09:51:59 +0800 Subject: [PATCH] ML2: update port's status to DOWN if its binding info has changed This fixes the problem that when two or more ports in a network are migrated to a host that did not previously have any ports in the same network, the new host is sometimes not told about the IP/MAC addresses of all the other ports in the network. In other words, initial L2population does not work, for the new host. This is because the l2pop mechanism driver only sends catch-up information to the host when it thinks it is dealing with the first active port on that host; and currently, when multiple ports are migrated to a new host, there is always more than one active port so the condition above is never triggered. The fix is for the ML2 plugin to set a port's status to DOWN when its binding info changes. This patch also fixes the bug when nova thinks it should not wait for any events from neutron because all ports are already active. Closes-bug: #1483601 Closes-bug: #1443421 Closes-Bug: #1522824 Related-Bug: #1450604 Change-Id: I342ad910360b21085316c25df2154854fd1001b2 --- .../plugins/ml2/drivers/l2pop/mech_driver.py | 53 ++++++++----------- neutron/plugins/ml2/plugin.py | 4 ++ .../ml2/drivers/l2pop/test_mech_driver.py | 22 ++++---- neutron/tests/unit/plugins/ml2/test_plugin.py | 14 ++++- 4 files changed, 49 insertions(+), 44 deletions(-) diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py index e4ddfe2e801..13eda812fa7 100644 --- a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -38,7 +38,6 @@ class L2populationMechanismDriver(api.MechanismDriver): def initialize(self): LOG.debug("Experimental L2 population driver") self.rpc_ctx = n_context.get_admin_context_without_session() - self.migrated_ports = {} def _get_port_fdb_entries(self, port): return [l2pop_rpc.PortInfo(mac_address=port['mac_address'], @@ -48,8 +47,8 @@ class L2populationMechanismDriver(api.MechanismDriver): def delete_port_postcommit(self, context): port = context.current agent_host = context.host - - fdb_entries = self._get_agent_fdb(context, port, agent_host) + fdb_entries = self._get_agent_fdb(context.bottom_bound_segment, + port, agent_host) self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries) @@ -122,50 +121,41 @@ class L2populationMechanismDriver(api.MechanismDriver): if context.status == const.PORT_STATUS_DOWN: agent_host = context.host fdb_entries = self._get_agent_fdb( - context, port, agent_host) + context.bottom_bound_segment, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) elif (context.host != context.original_host - and context.status == const.PORT_STATUS_ACTIVE - and not self.migrated_ports.get(orig['id'])): - # The port has been migrated. We have to store the original - # binding to send appropriate fdb once the port will be set - # on the destination host - self.migrated_ports[orig['id']] = ( - (orig, context.original_host)) + and context.original_status == const.PORT_STATUS_ACTIVE + and context.status == const.PORT_STATUS_DOWN): + # The port has been migrated. Send notification about port + # removal from old host. + fdb_entries = self._get_agent_fdb( + context.original_bottom_bound_segment, + orig, context.original_host) + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries) elif context.status != context.original_status: if context.status == const.PORT_STATUS_ACTIVE: self._update_port_up(context) elif context.status == const.PORT_STATUS_DOWN: fdb_entries = self._get_agent_fdb( - context, port, context.host) + context.bottom_bound_segment, port, context.host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) - elif context.status == const.PORT_STATUS_BUILD: - orig = self.migrated_ports.pop(port['id'], None) - if orig: - original_port = orig[0] - original_host = orig[1] - # this port has been migrated: remove its entries from fdb - fdb_entries = self._get_agent_fdb( - context, original_port, original_host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - def _get_and_validate_segment(self, context, port_id, agent): - segment = context.bottom_bound_segment + def _validate_segment(self, segment, port_id, agent): if not segment: LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound " "to any segment", {'port': port_id, 'agent': agent}) - return + return False network_types = l2pop_db.get_agent_l2pop_network_types(agent) if network_types is None: network_types = l2pop_db.get_agent_tunnel_types(agent) if segment['network_type'] not in network_types: - return + return False - return segment + return True def _create_agent_fdb(self, session, agent, segment, network_id): agent_fdb_entries = {network_id: @@ -220,8 +210,8 @@ class L2populationMechanismDriver(api.MechanismDriver): session, agent_host, network_id) agent_ip = l2pop_db.get_agent_ip(agent) - segment = self._get_and_validate_segment(context, port['id'], agent) - if not segment: + segment = context.bottom_bound_segment + if not self._validate_segment(segment, port['id'], agent): return other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, network_id) @@ -250,7 +240,7 @@ class L2populationMechanismDriver(api.MechanismDriver): self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries) - def _get_agent_fdb(self, context, port, agent_host): + def _get_agent_fdb(self, segment, port, agent_host): if not agent_host: return @@ -261,8 +251,7 @@ class L2populationMechanismDriver(api.MechanismDriver): session, agent_host, network_id) agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host) - segment = self._get_and_validate_segment(context, port['id'], agent) - if not segment: + if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip(agent) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 4f32e499988..a1283171f19 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -284,6 +284,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) mech_context._clear_binding_levels() + port['status'] = const.PORT_STATUS_DOWN + super(Ml2Plugin, self).update_port( + mech_context._plugin_context, port_id, + {attributes.PORT: {'status': const.PORT_STATUS_DOWN}}) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding.vif_type = portbindings.VIF_TYPE_UNBOUND diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py index 429a5769689..b147535d8eb 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py @@ -743,6 +743,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: + tunnel_ip = '20.0.0.1' p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( @@ -750,22 +751,21 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): agent_id=HOST, device=device1) if twice: + tunnel_ip = '20.0.0.4' self._update_and_check_portbinding(p1['id'], HOST_4) - self._update_and_check_portbinding(p1['id'], HOST_2) + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST_4, + device=device1) + self.mock_fanout.reset_mock() - # NOTE(yamamoto): see bug #1441488 - self.adminContext.session.expire_all() - self.callbacks.get_device_details( - self.adminContext, - device=device1, - agent_id=HOST_2) + self._update_and_check_portbinding(p1['id'], HOST_2) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': - {'20.0.0.1': [constants.FLOODING_ENTRY, - l2pop_rpc.PortInfo( - p1['mac_address'], - p1_ips[0])]}, + {tunnel_ip: [constants.FLOODING_ENTRY, + l2pop_rpc.PortInfo( + p1['mac_address'], + p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index a9c40f8c6fa..50503d96082 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -516,6 +516,16 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): plugin.update_port(ctx, port['port']['id'], port) self.assertTrue(sg_member_update.called) + def test_update_port_host_id_changed(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + host_id = {portbindings.HOST_ID: 'host1'} + with self.port(**host_id) as port: + plugin.update_port_status(ctx, port['port']['id'], 'UP') + port['port']['binding:host_id'] = 'host2' + result = plugin.update_port(ctx, port['port']['id'], port) + self.assertEqual(constants.PORT_STATUS_DOWN, result['status']) + def test_update_port_status_with_network(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() @@ -1801,7 +1811,9 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): def test_create_port_rpc_outside_transaction(self): with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\ mock.patch.object(base_plugin.NeutronDbPluginV2, - 'create_port') as db_create_port: + 'create_port') as db_create_port, \ + mock.patch.object(base_plugin.NeutronDbPluginV2, + 'update_port'): init.return_value = None new_port = mock.MagicMock()