Fix update of trunk subports during live migration

During the live migration trunk subports where updated only based
on the "host_id" field. But when rpc message to update subports
is send trunk's port is still bound to the old host and has
"migrating_to" field in binding:profile set to the new host.
Because of that binding:host_id for the subport's port wasn't updated
proberly and port was set to DOWN on the new host.
That could even cause connectivity break if L2population is used in the
cloud.

This patch fixes that by updating subport's binding:host_id field based
on the migrating_to field if that is available and not empty.

Closes-Bug: #1914747
Change-Id: I98e55242d381ada642ca0729e9aefdea7628c945
(cherry picked from commit e05101c8ac)
This commit is contained in:
Slawek Kaplonski 2021-02-05 13:32:23 +01:00
parent 40feff0e46
commit 366fae2d53
2 changed files with 43 additions and 0 deletions

View File

@ -128,6 +128,12 @@ class TrunkSkeleton(object):
trunk_port_id = trunk.port_id trunk_port_id = trunk.port_id
trunk_port = self.core_plugin.get_port(context, trunk_port_id) trunk_port = self.core_plugin.get_port(context, trunk_port_id)
trunk_host = trunk_port.get(portbindings.HOST_ID) trunk_host = trunk_port.get(portbindings.HOST_ID)
migrating_to_host = trunk_port.get(
portbindings.PROFILE, {}).get('migrating_to')
if migrating_to_host and trunk_host != migrating_to_host:
# Trunk is migrating now, so lets update host of the subports
# to the new host already
trunk_host = migrating_to_host
# NOTE(status_police) Set the trunk in BUILD state before # NOTE(status_police) Set the trunk in BUILD state before
# processing subport bindings. The trunk will stay in BUILD # processing subport bindings. The trunk will stay in BUILD

View File

@ -103,6 +103,43 @@ class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase):
for port in updated_subports[trunk['id']]: for port in updated_subports[trunk['id']]:
self.assertEqual('trunk_host_id', port[portbindings.HOST_ID]) self.assertEqual('trunk_host_id', port[portbindings.HOST_ID])
def test_update_subport_bindings_during_migration(self):
with self.port() as _parent_port:
parent_port = _parent_port
trunk = self._create_test_trunk(parent_port)
subports = []
for vid in range(0, 3):
with self.port() as new_port:
obj = trunk_obj.SubPort(
context=self.context,
trunk_id=trunk['id'],
port_id=new_port['port']['id'],
segmentation_type='vlan',
segmentation_id=vid)
subports.append(obj)
expected_calls = [
mock.call(
mock.ANY, subport['port_id'],
{'port': {portbindings.HOST_ID: 'new_trunk_host_id',
'device_owner': constants.TRUNK_SUBPORT_OWNER}})
for subport in subports]
test_obj = server.TrunkSkeleton()
test_obj._trunk_plugin = self.trunk_plugin
test_obj._core_plugin = self.core_plugin
port_data = {
portbindings.HOST_ID: 'trunk_host_id',
portbindings.PROFILE: {'migrating_to': 'new_trunk_host_id'}}
with mock.patch.object(
self.core_plugin, "get_port",
return_value=port_data), \
mock.patch.object(
test_obj, "_safe_update_trunk"):
test_obj.update_subport_bindings(self.context, subports=subports)
for expected_call in expected_calls:
self.assertIn(expected_call, self.mock_update_port.mock_calls)
def test__handle_port_binding_binding_error(self): def test__handle_port_binding_binding_error(self):
with self.port() as _trunk_port: with self.port() as _trunk_port:
trunk = self._create_test_trunk(_trunk_port) trunk = self._create_test_trunk(_trunk_port)