ML2: update port's status to DOWN if its binding info has changed
This fixes the problem that when two or more ports in a network
are migrated to a host that did not previously have any ports in
the same network, the new host is sometimes not told about the
IP/MAC addresses of all the other ports in the network. In other
words, initial L2population does not work, for the new host.
This is because the l2pop mechanism driver only sends catch-up
information to the host when it thinks it is dealing with the first
active port on that host; and currently, when multiple ports are
migrated to a new host, there is always more than one active port so
the condition above is never triggered.
The fix is for the ML2 plugin to set a port's status to DOWN when
its binding info changes.
This patch also fixes the bug when nova thinks it should not wait
for any events from neutron because all ports are already active.
Closes-bug: #1483601
Closes-bug: #1443421
Closes-Bug: #1522824
Related-Bug: #1450604
(cherry picked from commit c5fa665de3
)
Conflicts: neutron/plugins/ml2/drivers/l2pop/mech_driver.py
Change-Id: I342ad910360b21085316c25df2154854fd1001b2
This commit is contained in:
parent
8caf401aaa
commit
a38cb93dde
|
@ -39,7 +39,6 @@ class L2populationMechanismDriver(api.MechanismDriver,
|
|||
def initialize(self):
|
||||
LOG.debug("Experimental L2 population driver")
|
||||
self.rpc_ctx = n_context.get_admin_context_without_session()
|
||||
self.migrated_ports = {}
|
||||
|
||||
def _get_port_fdb_entries(self, port):
|
||||
return [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
|
||||
|
@ -126,13 +125,14 @@ class L2populationMechanismDriver(api.MechanismDriver,
|
|||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
elif (context.host != context.original_host
|
||||
and context.status == const.PORT_STATUS_ACTIVE
|
||||
and not self.migrated_ports.get(orig['id'])):
|
||||
# The port has been migrated. We have to store the original
|
||||
# binding to send appropriate fdb once the port will be set
|
||||
# on the destination host
|
||||
self.migrated_ports[orig['id']] = (
|
||||
(orig, context.original_host))
|
||||
and context.original_status == const.PORT_STATUS_ACTIVE
|
||||
and context.status == const.PORT_STATUS_DOWN):
|
||||
# The port has been migrated. Send notification about port
|
||||
# removal from old host.
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, orig, context.original_host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
elif context.status != context.original_status:
|
||||
if context.status == const.PORT_STATUS_ACTIVE:
|
||||
self._update_port_up(context)
|
||||
|
@ -141,16 +141,6 @@ class L2populationMechanismDriver(api.MechanismDriver,
|
|||
context, port, context.host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
elif context.status == const.PORT_STATUS_BUILD:
|
||||
orig = self.migrated_ports.pop(port['id'], None)
|
||||
if orig:
|
||||
original_port = orig[0]
|
||||
original_host = orig[1]
|
||||
# this port has been migrated: remove its entries from fdb
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, original_port, original_host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
|
||||
def _get_port_infos(self, context, port, agent_host):
|
||||
if not agent_host:
|
||||
|
@ -164,8 +154,12 @@ class L2populationMechanismDriver(api.MechanismDriver,
|
|||
return
|
||||
|
||||
agent_ip = self.get_agent_ip(agent)
|
||||
|
||||
segment = context.bottom_bound_segment
|
||||
# If a port has migrated, when we send remove_fdb_entries to original
|
||||
# host, we should use context.original_bottom_bound_segment
|
||||
if context.host == agent_host:
|
||||
segment = context.bottom_bound_segment
|
||||
else:
|
||||
segment = context.original_bottom_bound_segment
|
||||
if not segment:
|
||||
LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound "
|
||||
"to any segment", {'port': port['id'], 'agent': agent})
|
||||
|
|
|
@ -251,6 +251,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
binding.vif_details = ''
|
||||
db.clear_binding_levels(session, port_id, original_host)
|
||||
mech_context._clear_binding_levels()
|
||||
port['status'] = const.PORT_STATUS_DOWN
|
||||
super(Ml2Plugin, self).update_port(
|
||||
mech_context._plugin_context, port_id,
|
||||
{attributes.PORT: {'status': const.PORT_STATUS_DOWN}})
|
||||
|
||||
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
|
||||
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
|
||||
|
|
|
@ -767,6 +767,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
|
|||
device_owner=DEVICE_OWNER_COMPUTE,
|
||||
arg_list=(portbindings.HOST_ID,),
|
||||
**host_arg) as port1:
|
||||
tunnel_ip = '20.0.0.1'
|
||||
p1 = port1['port']
|
||||
device1 = 'tap' + p1['id']
|
||||
self.callbacks.update_device_up(
|
||||
|
@ -774,22 +775,21 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
|
|||
agent_id=HOST,
|
||||
device=device1)
|
||||
if twice:
|
||||
tunnel_ip = '20.0.0.4'
|
||||
self._update_and_check_portbinding(p1['id'], HOST_4)
|
||||
self._update_and_check_portbinding(p1['id'], HOST_2)
|
||||
self.callbacks.update_device_up(self.adminContext,
|
||||
agent_id=HOST_4,
|
||||
device=device1)
|
||||
|
||||
self.mock_fanout.reset_mock()
|
||||
# NOTE(yamamoto): see bug #1441488
|
||||
self.adminContext.session.expire_all()
|
||||
self.callbacks.get_device_details(
|
||||
self.adminContext,
|
||||
device=device1,
|
||||
agent_id=HOST_2)
|
||||
self._update_and_check_portbinding(p1['id'], HOST_2)
|
||||
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
|
||||
expected = {p1['network_id']:
|
||||
{'ports':
|
||||
{'20.0.0.1': [constants.FLOODING_ENTRY,
|
||||
l2pop_rpc.PortInfo(
|
||||
p1['mac_address'],
|
||||
p1_ips[0])]},
|
||||
{tunnel_ip: [constants.FLOODING_ENTRY,
|
||||
l2pop_rpc.PortInfo(
|
||||
p1['mac_address'],
|
||||
p1_ips[0])]},
|
||||
'network_type': 'vxlan',
|
||||
'segment_id': 1}}
|
||||
|
||||
|
|
|
@ -496,6 +496,16 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
|||
plugin.update_port(ctx, port['port']['id'], port)
|
||||
self.assertTrue(sg_member_update.called)
|
||||
|
||||
def test_update_port_host_id_changed(self):
|
||||
ctx = context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
host_id = {portbindings.HOST_ID: 'host1'}
|
||||
with self.port(**host_id) as port:
|
||||
plugin.update_port_status(ctx, port['port']['id'], 'UP')
|
||||
port['port']['binding:host_id'] = 'host2'
|
||||
result = plugin.update_port(ctx, port['port']['id'], port)
|
||||
self.assertEqual(constants.PORT_STATUS_DOWN, result['status'])
|
||||
|
||||
def test_update_port_status_with_network(self):
|
||||
ctx = context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
|
@ -1867,7 +1877,9 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
|
|||
def test_create_port_rpc_outside_transaction(self):
|
||||
with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\
|
||||
mock.patch.object(base_plugin.NeutronDbPluginV2,
|
||||
'create_port') as db_create_port:
|
||||
'create_port') as db_create_port, \
|
||||
mock.patch.object(base_plugin.NeutronDbPluginV2,
|
||||
'update_port'):
|
||||
init.return_value = None
|
||||
|
||||
new_port = mock.MagicMock()
|
||||
|
|
Loading…
Reference in New Issue