update allocation in binding profile during migrate
If the server has port with resource allocation and the server is migrated then when the port is bound to the destination host the allocation key needs to be updated in the binding:profile to point to the resource provider that provides resources for this port on the destination host. This patch extends the migrate_instance_finish() network api method to pass the updated resource providers of the ports during migration. Change-Id: I220fa02ee916728e241503084b14984bab4b0c3b blueprint: support-move-ops-with-qos-ports
This commit is contained in:
parent
f7f5e1846c
commit
a2984b647a
@ -4203,8 +4203,8 @@ class ComputeManager(manager.Manager):
|
|||||||
self.compute_rpcapi.finish_revert_resize(context, instance,
|
self.compute_rpcapi.finish_revert_resize(context, instance,
|
||||||
migration, migration.source_compute, request_spec)
|
migration, migration.source_compute, request_spec)
|
||||||
|
|
||||||
def _finish_revert_resize_network_migrate_finish(self, context, instance,
|
def _finish_revert_resize_network_migrate_finish(
|
||||||
migration):
|
self, context, instance, migration, provider_mappings):
|
||||||
"""Causes port binding to be updated. In some Neutron or port
|
"""Causes port binding to be updated. In some Neutron or port
|
||||||
configurations - see NetworkModel.get_bind_time_events() - we
|
configurations - see NetworkModel.get_bind_time_events() - we
|
||||||
expect the vif-plugged event from Neutron immediately and wait for it.
|
expect the vif-plugged event from Neutron immediately and wait for it.
|
||||||
@ -4214,6 +4214,8 @@ class ComputeManager(manager.Manager):
|
|||||||
:param context: The request context.
|
:param context: The request context.
|
||||||
:param instance: The instance undergoing the revert resize.
|
:param instance: The instance undergoing the revert resize.
|
||||||
:param migration: The Migration object of the resize being reverted.
|
:param migration: The Migration object of the resize being reverted.
|
||||||
|
:param provider_mappings: a dict of list of resource provider uuids
|
||||||
|
keyed by port uuid
|
||||||
:raises: eventlet.timeout.Timeout or
|
:raises: eventlet.timeout.Timeout or
|
||||||
exception.VirtualInterfacePlugException.
|
exception.VirtualInterfacePlugException.
|
||||||
"""
|
"""
|
||||||
@ -4238,9 +4240,8 @@ class ComputeManager(manager.Manager):
|
|||||||
# the migration.dest_compute to source host at here.
|
# the migration.dest_compute to source host at here.
|
||||||
with utils.temporary_mutation(
|
with utils.temporary_mutation(
|
||||||
migration, dest_compute=migration.source_compute):
|
migration, dest_compute=migration.source_compute):
|
||||||
self.network_api.migrate_instance_finish(context,
|
self.network_api.migrate_instance_finish(
|
||||||
instance,
|
context, instance, migration, provider_mappings)
|
||||||
migration)
|
|
||||||
except eventlet.timeout.Timeout:
|
except eventlet.timeout.Timeout:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error('Timeout waiting for Neutron events: %s', events,
|
LOG.error('Timeout waiting for Neutron events: %s', events,
|
||||||
@ -4292,10 +4293,12 @@ class ComputeManager(manager.Manager):
|
|||||||
'migration_uuid': migration.uuid})
|
'migration_uuid': migration.uuid})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
provider_mappings = self._get_request_group_mapping(request_spec)
|
||||||
|
|
||||||
self.network_api.setup_networks_on_host(context, instance,
|
self.network_api.setup_networks_on_host(context, instance,
|
||||||
migration.source_compute)
|
migration.source_compute)
|
||||||
self._finish_revert_resize_network_migrate_finish(
|
self._finish_revert_resize_network_migrate_finish(
|
||||||
context, instance, migration)
|
context, instance, migration, provider_mappings)
|
||||||
network_info = self.network_api.get_instance_nw_info(context,
|
network_info = self.network_api.get_instance_nw_info(context,
|
||||||
instance)
|
instance)
|
||||||
|
|
||||||
@ -4746,7 +4749,7 @@ class ComputeManager(manager.Manager):
|
|||||||
context, bdm.attachment_id)
|
context, bdm.attachment_id)
|
||||||
|
|
||||||
def _finish_resize(self, context, instance, migration, disk_info,
|
def _finish_resize(self, context, instance, migration, disk_info,
|
||||||
image_meta, bdms):
|
image_meta, bdms, request_spec):
|
||||||
resize_instance = False # indicates disks have been resized
|
resize_instance = False # indicates disks have been resized
|
||||||
old_instance_type_id = migration['old_instance_type_id']
|
old_instance_type_id = migration['old_instance_type_id']
|
||||||
new_instance_type_id = migration['new_instance_type_id']
|
new_instance_type_id = migration['new_instance_type_id']
|
||||||
@ -4772,11 +4775,12 @@ class ComputeManager(manager.Manager):
|
|||||||
# NOTE(tr3buchet): setup networks on destination host
|
# NOTE(tr3buchet): setup networks on destination host
|
||||||
self.network_api.setup_networks_on_host(context, instance,
|
self.network_api.setup_networks_on_host(context, instance,
|
||||||
migration.dest_compute)
|
migration.dest_compute)
|
||||||
|
provider_mappings = self._get_request_group_mapping(request_spec)
|
||||||
|
|
||||||
# For neutron, migrate_instance_finish updates port bindings for this
|
# For neutron, migrate_instance_finish updates port bindings for this
|
||||||
# host including any PCI devices claimed for SR-IOV ports.
|
# host including any PCI devices claimed for SR-IOV ports.
|
||||||
self.network_api.migrate_instance_finish(context,
|
self.network_api.migrate_instance_finish(
|
||||||
instance,
|
context, instance, migration, provider_mappings)
|
||||||
migration)
|
|
||||||
|
|
||||||
network_info = self.network_api.get_instance_nw_info(context, instance)
|
network_info = self.network_api.get_instance_nw_info(context, instance)
|
||||||
|
|
||||||
@ -4850,7 +4854,7 @@ class ComputeManager(manager.Manager):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
self._finish_resize_helper(context, disk_info, image, instance,
|
self._finish_resize_helper(context, disk_info, image, instance,
|
||||||
migration)
|
migration, request_spec)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
# At this point, resize_instance (which runs on the source) has
|
# At this point, resize_instance (which runs on the source) has
|
||||||
@ -4871,7 +4875,7 @@ class ComputeManager(manager.Manager):
|
|||||||
context, instance, migration)
|
context, instance, migration)
|
||||||
|
|
||||||
def _finish_resize_helper(self, context, disk_info, image, instance,
|
def _finish_resize_helper(self, context, disk_info, image, instance,
|
||||||
migration):
|
migration, request_spec):
|
||||||
"""Completes the migration process.
|
"""Completes the migration process.
|
||||||
|
|
||||||
The caller must revert the instance's allocations if the migration
|
The caller must revert the instance's allocations if the migration
|
||||||
@ -4883,7 +4887,8 @@ class ComputeManager(manager.Manager):
|
|||||||
with self._error_out_instance_on_exception(context, instance):
|
with self._error_out_instance_on_exception(context, instance):
|
||||||
image_meta = objects.ImageMeta.from_dict(image)
|
image_meta = objects.ImageMeta.from_dict(image)
|
||||||
network_info = self._finish_resize(context, instance, migration,
|
network_info = self._finish_resize(context, instance, migration,
|
||||||
disk_info, image_meta, bdms)
|
disk_info, image_meta, bdms,
|
||||||
|
request_spec)
|
||||||
|
|
||||||
# TODO(melwitt): We should clean up instance console tokens here. The
|
# TODO(melwitt): We should clean up instance console tokens here. The
|
||||||
# instance is on a new host and will need to establish a new console
|
# instance is on a new host and will need to establish a new console
|
||||||
@ -7242,9 +7247,9 @@ class ComputeManager(manager.Manager):
|
|||||||
migration = {'source_compute': instance.host,
|
migration = {'source_compute': instance.host,
|
||||||
'dest_compute': self.host,
|
'dest_compute': self.host,
|
||||||
'migration_type': 'live-migration'}
|
'migration_type': 'live-migration'}
|
||||||
self.network_api.migrate_instance_finish(context,
|
# TODO(gibi): calculate and pass resource_provider_mapping
|
||||||
instance,
|
self.network_api.migrate_instance_finish(
|
||||||
migration)
|
context, instance, migration, provider_mappings=None)
|
||||||
|
|
||||||
network_info = self.network_api.get_instance_nw_info(context, instance)
|
network_info = self.network_api.get_instance_nw_info(context, instance)
|
||||||
self._notify_about_instance_usage(
|
self._notify_about_instance_usage(
|
||||||
|
@ -502,7 +502,8 @@ class API(base_api.NetworkAPI):
|
|||||||
|
|
||||||
self.network_rpcapi.migrate_instance_start(context, **args)
|
self.network_rpcapi.migrate_instance_start(context, **args)
|
||||||
|
|
||||||
def migrate_instance_finish(self, context, instance, migration):
|
def migrate_instance_finish(
|
||||||
|
self, context, instance, migration, provider_mappings):
|
||||||
"""Finish migrating the network of an instance."""
|
"""Finish migrating the network of an instance."""
|
||||||
flavor = instance.get_flavor()
|
flavor = instance.get_flavor()
|
||||||
args = dict(
|
args = dict(
|
||||||
@ -524,9 +525,9 @@ class API(base_api.NetworkAPI):
|
|||||||
def setup_instance_network_on_host(self, context, instance, host,
|
def setup_instance_network_on_host(self, context, instance, host,
|
||||||
migration=None):
|
migration=None):
|
||||||
"""Setup network for specified instance on host."""
|
"""Setup network for specified instance on host."""
|
||||||
self.migrate_instance_finish(context, instance,
|
self.migrate_instance_finish(
|
||||||
{'source_compute': None,
|
context, instance, {'source_compute': None, 'dest_compute': host},
|
||||||
'dest_compute': host})
|
None)
|
||||||
|
|
||||||
def cleanup_instance_network_on_host(self, context, instance, host):
|
def cleanup_instance_network_on_host(self, context, instance, host):
|
||||||
"""Cleanup network for specified instance on host."""
|
"""Cleanup network for specified instance on host."""
|
||||||
|
@ -343,8 +343,16 @@ class NetworkAPI(base.Base):
|
|||||||
"""Start to migrate the network of an instance."""
|
"""Start to migrate the network of an instance."""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def migrate_instance_finish(self, context, instance, migration):
|
def migrate_instance_finish(
|
||||||
"""Finish migrating the network of an instance."""
|
self, context, instance, migration, provider_mappings):
|
||||||
|
"""Finish migrating the network of an instance.
|
||||||
|
|
||||||
|
:param context: The request context.
|
||||||
|
:param instance: nova.objects.instance.Instance object.
|
||||||
|
:param migration: nova.objects.migration.Migration object.
|
||||||
|
:param provider_mappings: a dict of list of resource provider uuids
|
||||||
|
keyed by port uuid
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def setup_instance_network_on_host(self, context, instance, host,
|
def setup_instance_network_on_host(self, context, instance, host,
|
||||||
|
@ -2749,11 +2749,12 @@ class API(base_api.NetworkAPI):
|
|||||||
'Error: %s', vif['id'], dest_host, resp.status_code,
|
'Error: %s', vif['id'], dest_host, resp.status_code,
|
||||||
resp.text)
|
resp.text)
|
||||||
|
|
||||||
def migrate_instance_finish(self, context, instance, migration):
|
def migrate_instance_finish(
|
||||||
|
self, context, instance, migration, provider_mappings):
|
||||||
"""Finish migrating the network of an instance."""
|
"""Finish migrating the network of an instance."""
|
||||||
self._update_port_binding_for_instance(context, instance,
|
self._update_port_binding_for_instance(
|
||||||
migration['dest_compute'],
|
context, instance, migration['dest_compute'], migration=migration,
|
||||||
migration=migration)
|
provider_mappings=provider_mappings)
|
||||||
|
|
||||||
def add_network_to_project(self, context, project_id, network_uuid=None):
|
def add_network_to_project(self, context, project_id, network_uuid=None):
|
||||||
"""Force add a network to the project."""
|
"""Force add a network to the project."""
|
||||||
@ -3224,8 +3225,10 @@ class API(base_api.NetworkAPI):
|
|||||||
migration.get('status') == 'reverted')
|
migration.get('status') == 'reverted')
|
||||||
return instance.migration_context.get_pci_mapping_for_migration(revert)
|
return instance.migration_context.get_pci_mapping_for_migration(revert)
|
||||||
|
|
||||||
def _update_port_binding_for_instance(self, context, instance, host,
|
def _update_port_binding_for_instance(
|
||||||
migration=None):
|
self, context, instance, host, migration=None,
|
||||||
|
provider_mappings=None):
|
||||||
|
|
||||||
neutron = get_client(context, admin=True)
|
neutron = get_client(context, admin=True)
|
||||||
search_opts = {'device_id': instance.uuid,
|
search_opts = {'device_id': instance.uuid,
|
||||||
'tenant_id': instance.project_id}
|
'tenant_id': instance.project_id}
|
||||||
@ -3244,9 +3247,6 @@ class API(base_api.NetworkAPI):
|
|||||||
vif_type = p.get('binding:vif_type')
|
vif_type = p.get('binding:vif_type')
|
||||||
if (p.get(constants.BINDING_HOST_ID) != host or
|
if (p.get(constants.BINDING_HOST_ID) != host or
|
||||||
vif_type in FAILED_VIF_TYPES):
|
vif_type in FAILED_VIF_TYPES):
|
||||||
# TODO(gibi): To support ports with resource request during
|
|
||||||
# server move operations we need to take care of 'allocation'
|
|
||||||
# key in the binding profile per binding.
|
|
||||||
|
|
||||||
updates[constants.BINDING_HOST_ID] = host
|
updates[constants.BINDING_HOST_ID] = host
|
||||||
# If the host changed, the AZ could have also changed so we
|
# If the host changed, the AZ could have also changed so we
|
||||||
@ -3286,6 +3286,28 @@ class API(base_api.NetworkAPI):
|
|||||||
reason=_("Unable to correlate PCI slot %s") %
|
reason=_("Unable to correlate PCI slot %s") %
|
||||||
pci_slot)
|
pci_slot)
|
||||||
|
|
||||||
|
if p.get('resource_request'):
|
||||||
|
if not provider_mappings:
|
||||||
|
# NOTE(gibi): This should not happen as the API level
|
||||||
|
# minimum compute service version check ensures that the
|
||||||
|
# compute services already send the RequestSpec during
|
||||||
|
# the move operations between the source and the
|
||||||
|
# destination and the dest compute calculates the
|
||||||
|
# mapping based on that.
|
||||||
|
raise exception.PortUpdateFailed(
|
||||||
|
port_id=p['id'],
|
||||||
|
reason=_("Provider mappings wasn't provided for the "
|
||||||
|
"port with resource request"))
|
||||||
|
|
||||||
|
# NOTE(gibi): In the resource provider mapping there can be
|
||||||
|
# more than one RP fulfilling a request group. But resource
|
||||||
|
# requests of a Neutron port is always mapped to a
|
||||||
|
# numbered request group that is always fulfilled by one
|
||||||
|
# resource provider. So we only pass that single RP UUID here.
|
||||||
|
binding_profile[constants.ALLOCATION] = \
|
||||||
|
provider_mappings[p['id']][0]
|
||||||
|
updates[constants.BINDING_PROFILE] = binding_profile
|
||||||
|
|
||||||
port_updates.append((p['id'], updates))
|
port_updates.append((p['id'], updates))
|
||||||
|
|
||||||
# Avoid rolling back updates if we catch an error above.
|
# Avoid rolling back updates if we catch an error above.
|
||||||
|
@ -4866,7 +4866,7 @@ class ComputeTestCase(BaseTestCase,
|
|||||||
mock_setup.assert_called_once_with(self.context, instance,
|
mock_setup.assert_called_once_with(self.context, instance,
|
||||||
'fake-mini')
|
'fake-mini')
|
||||||
mock_net_mig.assert_called_once_with(self.context,
|
mock_net_mig.assert_called_once_with(self.context,
|
||||||
test.MatchType(objects.Instance), migration)
|
test.MatchType(objects.Instance), migration, None)
|
||||||
mock_get_nw.assert_called_once_with(self.context, instance)
|
mock_get_nw.assert_called_once_with(self.context, instance)
|
||||||
mock_notify.assert_has_calls([
|
mock_notify.assert_has_calls([
|
||||||
mock.call(self.context, instance, 'finish_resize.start',
|
mock.call(self.context, instance, 'finish_resize.start',
|
||||||
|
@ -5250,7 +5250,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||||||
source_compute='fake-source',
|
source_compute='fake-source',
|
||||||
dest_compute='fake-dest')
|
dest_compute='fake-dest')
|
||||||
|
|
||||||
def fake_migrate_instance_finish(context, instance, migration):
|
def fake_migrate_instance_finish(
|
||||||
|
context, instance, migration, mapping):
|
||||||
# NOTE(artom) This looks weird, but it's checking that the
|
# NOTE(artom) This looks weird, but it's checking that the
|
||||||
# temporaty_mutation() context manager did its job.
|
# temporaty_mutation() context manager did its job.
|
||||||
self.assertEqual(migration.dest_compute, migration.source_compute)
|
self.assertEqual(migration.dest_compute, migration.source_compute)
|
||||||
@ -5263,12 +5264,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||||||
side_effect=fake_migrate_instance_finish)
|
side_effect=fake_migrate_instance_finish)
|
||||||
) as (mock_wait, mock_migrate_instance_finish):
|
) as (mock_wait, mock_migrate_instance_finish):
|
||||||
self.compute._finish_revert_resize_network_migrate_finish(
|
self.compute._finish_revert_resize_network_migrate_finish(
|
||||||
self.context, instance, migration)
|
self.context, instance, migration, mock.sentinel.mapping)
|
||||||
mock_wait.assert_called_once_with(
|
mock_wait.assert_called_once_with(
|
||||||
instance, events, deadline=CONF.vif_plugging_timeout,
|
instance, events, deadline=CONF.vif_plugging_timeout,
|
||||||
error_callback=self.compute._neutron_failed_migration_callback)
|
error_callback=self.compute._neutron_failed_migration_callback)
|
||||||
mock_migrate_instance_finish.assert_called_once_with(
|
mock_migrate_instance_finish.assert_called_once_with(
|
||||||
self.context, instance, migration)
|
self.context, instance, migration, mock.sentinel.mapping)
|
||||||
|
|
||||||
def test_finish_revert_resize_network_migrate_finish_wait(self):
|
def test_finish_revert_resize_network_migrate_finish_wait(self):
|
||||||
"""Test that we wait for bind-time events if we have a hybrid-plugged
|
"""Test that we wait for bind-time events if we have a hybrid-plugged
|
||||||
@ -7397,7 +7398,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||||||
def test_finish_revert_resize_network_calls_order(self):
|
def test_finish_revert_resize_network_calls_order(self):
|
||||||
self.nw_info = None
|
self.nw_info = None
|
||||||
|
|
||||||
def _migrate_instance_finish(context, instance, migration):
|
def _migrate_instance_finish(
|
||||||
|
context, instance, migration, provider_mappings):
|
||||||
# The migration.dest_compute is temporarily set to source_compute.
|
# The migration.dest_compute is temporarily set to source_compute.
|
||||||
self.assertEqual(migration.source_compute, migration.dest_compute)
|
self.assertEqual(migration.source_compute, migration.dest_compute)
|
||||||
self.nw_info = 'nw_info'
|
self.nw_info = 'nw_info'
|
||||||
@ -8446,7 +8448,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||||||
self.context, self.instance,
|
self.context, self.instance,
|
||||||
{'source_compute': cn_old,
|
{'source_compute': cn_old,
|
||||||
'dest_compute': self.compute.host,
|
'dest_compute': self.compute.host,
|
||||||
'migration_type': 'live-migration'})
|
'migration_type': 'live-migration'},
|
||||||
|
provider_mappings=None)
|
||||||
_get_instance_block_device_info.assert_called_once_with(
|
_get_instance_block_device_info.assert_called_once_with(
|
||||||
self.context, self.instance
|
self.context, self.instance
|
||||||
)
|
)
|
||||||
|
@ -279,14 +279,14 @@ class ApiTestCase(test.TestCase):
|
|||||||
arg1, arg2, expected = self._stub_migrate_instance_calls(
|
arg1, arg2, expected = self._stub_migrate_instance_calls(
|
||||||
'migrate_instance_finish', True, info)
|
'migrate_instance_finish', True, info)
|
||||||
expected['host'] = 'fake_compute_dest'
|
expected['host'] = 'fake_compute_dest'
|
||||||
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
|
self.network_api.migrate_instance_finish(self.context, arg1, arg2, {})
|
||||||
self.assertEqual(info['kwargs'], expected)
|
self.assertEqual(info['kwargs'], expected)
|
||||||
|
|
||||||
def test_migrate_instance_finish_without_multihost(self):
|
def test_migrate_instance_finish_without_multihost(self):
|
||||||
info = {'kwargs': {}}
|
info = {'kwargs': {}}
|
||||||
arg1, arg2, expected = self._stub_migrate_instance_calls(
|
arg1, arg2, expected = self._stub_migrate_instance_calls(
|
||||||
'migrate_instance_finish', False, info)
|
'migrate_instance_finish', False, info)
|
||||||
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
|
self.network_api.migrate_instance_finish(self.context, arg1, arg2, {})
|
||||||
self.assertEqual(info['kwargs'], expected)
|
self.assertEqual(info['kwargs'], expected)
|
||||||
|
|
||||||
def test_is_multi_host_instance_has_no_fixed_ip(self):
|
def test_is_multi_host_instance_has_no_fixed_ip(self):
|
||||||
@ -516,7 +516,8 @@ class ApiTestCase(test.TestCase):
|
|||||||
self.context, instance, 'fake_compute_source')
|
self.context, instance, 'fake_compute_source')
|
||||||
fake_migrate_finish.assert_called_once_with(
|
fake_migrate_finish.assert_called_once_with(
|
||||||
self.context, instance,
|
self.context, instance,
|
||||||
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
|
{'source_compute': None, 'dest_compute': 'fake_compute_source'},
|
||||||
|
None)
|
||||||
|
|
||||||
@mock.patch('oslo_concurrency.lockutils.lock')
|
@mock.patch('oslo_concurrency.lockutils.lock')
|
||||||
@mock.patch.object(api.API, '_get_instance_nw_info')
|
@mock.patch.object(api.API, '_get_instance_nw_info')
|
||||||
|
@ -4531,6 +4531,57 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
|
|||||||
constants.BINDING_HOST_ID],
|
constants.BINDING_HOST_ID],
|
||||||
'new-host')
|
'new-host')
|
||||||
|
|
||||||
|
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||||
|
def test_update_port_bindings_for_instance_with_resource_req(
|
||||||
|
self, get_client_mock):
|
||||||
|
|
||||||
|
instance = fake_instance.fake_instance_obj(self.context)
|
||||||
|
fake_ports = {'ports': [
|
||||||
|
{'id': 'fake-port-1',
|
||||||
|
'binding:vnic_type': 'normal',
|
||||||
|
constants.BINDING_HOST_ID: 'old-host',
|
||||||
|
constants.BINDING_PROFILE:
|
||||||
|
{'allocation': uuids.source_compute_rp},
|
||||||
|
'resource_request': mock.sentinel.resource_request}]}
|
||||||
|
migration = {'status': 'confirmed',
|
||||||
|
'migration_type': "migration"}
|
||||||
|
list_ports_mock = mock.Mock(return_value=fake_ports)
|
||||||
|
get_client_mock.return_value.list_ports = list_ports_mock
|
||||||
|
|
||||||
|
self.api._update_port_binding_for_instance(
|
||||||
|
self.context, instance, 'new-host', migration,
|
||||||
|
{'fake-port-1': [uuids.dest_compute_rp]})
|
||||||
|
get_client_mock.return_value.update_port.assert_called_once_with(
|
||||||
|
'fake-port-1',
|
||||||
|
{'port': {'device_owner': 'compute:None',
|
||||||
|
'binding:profile': {'allocation': uuids.dest_compute_rp},
|
||||||
|
'binding:host_id': 'new-host'}})
|
||||||
|
|
||||||
|
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||||
|
def test_update_port_bindings_for_instance_with_resource_req_no_mapping(
|
||||||
|
self, get_client_mock):
|
||||||
|
|
||||||
|
instance = fake_instance.fake_instance_obj(self.context)
|
||||||
|
fake_ports = {'ports': [
|
||||||
|
{'id': 'fake-port-1',
|
||||||
|
'binding:vnic_type': 'normal',
|
||||||
|
constants.BINDING_HOST_ID: 'old-host',
|
||||||
|
constants.BINDING_PROFILE:
|
||||||
|
{'allocation': uuids.source_compute_rp},
|
||||||
|
'resource_request': mock.sentinel.resource_request}]}
|
||||||
|
migration = {'status': 'confirmed',
|
||||||
|
'migration_type': "migration"}
|
||||||
|
list_ports_mock = mock.Mock(return_value=fake_ports)
|
||||||
|
get_client_mock.return_value.list_ports = list_ports_mock
|
||||||
|
|
||||||
|
ex = self.assertRaises(
|
||||||
|
exception.PortUpdateFailed,
|
||||||
|
self.api._update_port_binding_for_instance, self.context,
|
||||||
|
instance, 'new-host', migration, provider_mappings=None)
|
||||||
|
self.assertIn(
|
||||||
|
"Provider mappings wasn't provided for the port with resource "
|
||||||
|
"request", six.text_type(ex))
|
||||||
|
|
||||||
def test_get_pci_mapping_for_migration(self):
|
def test_get_pci_mapping_for_migration(self):
|
||||||
instance = fake_instance.fake_instance_obj(self.context)
|
instance = fake_instance.fake_instance_obj(self.context)
|
||||||
instance.migration_context = objects.MigrationContext()
|
instance.migration_context = objects.MigrationContext()
|
||||||
@ -6211,7 +6262,8 @@ class TestNeutronv2Portbinding(TestNeutronv2Base):
|
|||||||
'migrate_instance_finish',
|
'migrate_instance_finish',
|
||||||
self.context,
|
self.context,
|
||||||
instance,
|
instance,
|
||||||
migration)
|
migration,
|
||||||
|
{})
|
||||||
|
|
||||||
def test_migrate_instance_finish_binding_true_exception(self):
|
def test_migrate_instance_finish_binding_true_exception(self):
|
||||||
migration = {'source_compute': self.instance.get('host'),
|
migration = {'source_compute': self.instance.get('host'),
|
||||||
@ -6221,7 +6273,8 @@ class TestNeutronv2Portbinding(TestNeutronv2Base):
|
|||||||
'migrate_instance_finish',
|
'migrate_instance_finish',
|
||||||
self.context,
|
self.context,
|
||||||
instance,
|
instance,
|
||||||
migration)
|
migration,
|
||||||
|
{})
|
||||||
|
|
||||||
def test_setup_instance_network_on_host_true(self):
|
def test_setup_instance_network_on_host_true(self):
|
||||||
instance = self._fake_instance_object(self.instance)
|
instance = self._fake_instance_object(self.instance)
|
||||||
|
Loading…
Reference in New Issue
Block a user