Support reverting migration / resize with bandwidth
When a migration is reverted the source host allocation held by the migration is swapped back to the instance. If the instance is bandwidth aware then also the allocation key of the port's binding:profile needs to be updated to point back to the resource providers of the source host provider tree. To be able to do that we have to re-calculate the request group - resource provider mapping for the source host based on the resource requests in the neutron ports of the instance and the resource allocation of the instance on the source host. Alternatively we could store such mapping in the MigrationContext during the move operation. blueprint: support-move-ops-with-qos-ports Change-Id: Ib50b6b02208f5bd2972de8a6f8f685c19745514c
This commit is contained in:
parent
5fa49cd0b8
commit
5114b61d52
|
@ -3459,6 +3459,17 @@ class API(base.Base):
|
|||
reqspec.flavor = instance.old_flavor
|
||||
reqspec.save()
|
||||
|
||||
# TODO(gibi): do not directly overwrite the
|
||||
# RequestSpec.requested_resources as others like cyborg might added
|
||||
# to things there already
|
||||
# NOTE(gibi): We need to collect the requested resource again as it is
|
||||
# intentionally not persisted in nova. Note that this is needs to be
|
||||
# done here as the nova REST API code directly calls revert on the
|
||||
# compute_api skipping the conductor.
|
||||
port_res_req = self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid)
|
||||
reqspec.requested_resources = port_res_req
|
||||
|
||||
instance.task_state = task_states.RESIZE_REVERTING
|
||||
instance.save(expected_task_state=[None])
|
||||
|
||||
|
|
|
@ -93,6 +93,7 @@ from nova import rpc
|
|||
from nova import safe_utils
|
||||
from nova.scheduler.client import query
|
||||
from nova.scheduler.client import report
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova import utils
|
||||
from nova.virt import block_device as driver_block_device
|
||||
from nova.virt import configdrive
|
||||
|
@ -4293,7 +4294,27 @@ class ComputeManager(manager.Manager):
|
|||
'migration_uuid': migration.uuid})
|
||||
raise
|
||||
|
||||
provider_mappings = self._get_request_group_mapping(request_spec)
|
||||
if request_spec:
|
||||
# TODO(gibi): the _revert_allocation() call above already
|
||||
# fetched the original allocation of the instance so we could
|
||||
# avoid this second call to placement
|
||||
# NOTE(gibi): We need to re-calculate the resource provider -
|
||||
# port mapping as we have to have the neutron ports allocate
|
||||
# from the source compute after revert.
|
||||
allocs = self.reportclient.get_allocations_for_consumer(
|
||||
context, instance.uuid)
|
||||
scheduler_utils.fill_provider_mapping_based_on_allocation(
|
||||
context, self.reportclient, request_spec, allocs)
|
||||
provider_mappings = self._get_request_group_mapping(
|
||||
request_spec)
|
||||
else:
|
||||
# NOTE(gibi): The compute RPC is pinned to be older than 5.2
|
||||
# and therefore request_spec is not sent. We cannot calculate
|
||||
# the provider mappings. If the instance has ports with
|
||||
# resource request then the port update will fail in
|
||||
# _update_port_binding_for_instance() called via
|
||||
# _finish_revert_resize_network_migrate_finish() below.
|
||||
provider_mappings = None
|
||||
|
||||
self.network_api.setup_networks_on_host(context, instance,
|
||||
migration.source_compute)
|
||||
|
|
|
@ -1146,6 +1146,33 @@ def fill_provider_mapping(
|
|||
ar = jsonutils.loads(host_selection.allocation_request)
|
||||
allocs = ar['allocations']
|
||||
|
||||
fill_provider_mapping_based_on_allocation(
|
||||
context, report_client, request_spec, allocs)
|
||||
|
||||
|
||||
def fill_provider_mapping_based_on_allocation(
|
||||
context, report_client, request_spec, allocation):
|
||||
"""Fills out the request group - resource provider mapping in the
|
||||
request spec based on the current allocation of the instance.
|
||||
|
||||
The fill_provider_mapping() variant is expected to be called in every
|
||||
scenario when a Selection object is available from the scheduler. However
|
||||
in case of revert operations such Selection does not exists. In this case
|
||||
the mapping is calculated based on the allocation of the source host the
|
||||
move operation is reverting to.
|
||||
|
||||
:param context: The security context
|
||||
:param report_client: SchedulerReportClient instance to be used to
|
||||
communicate with placement
|
||||
:param request_spec: The RequestSpec object associated with the
|
||||
operation
|
||||
:param allocation: allocation dict of the instance, keyed by RP UUID.
|
||||
"""
|
||||
|
||||
# Exit early if this request spec does not require mappings.
|
||||
if not request_spec.maps_requested_resources:
|
||||
return
|
||||
|
||||
# NOTE(gibi): Getting traits from placement for each instance in a
|
||||
# instance multi-create scenario is unnecessarily expensive. But
|
||||
# instance multi-create cannot be used with pre-created neutron ports
|
||||
|
@ -1158,9 +1185,9 @@ def fill_provider_mapping(
|
|||
provider_traits = {
|
||||
rp_uuid: report_client.get_provider_traits(
|
||||
context, rp_uuid).traits
|
||||
for rp_uuid in allocs}
|
||||
# NOTE(gibi): The allocs dict is in the format of the PUT /allocations
|
||||
for rp_uuid in allocation}
|
||||
# NOTE(gibi): The allocation dict is in the format of the PUT /allocations
|
||||
# and that format can change. The current format can be detected from
|
||||
# host_selection.allocation_request_version
|
||||
# allocation_request_version key of the Selection object.
|
||||
request_spec.map_requested_resources_to_providers(
|
||||
allocs, provider_traits)
|
||||
allocation, provider_traits)
|
||||
|
|
|
@ -6580,6 +6580,44 @@ class ServerMoveWithPortResourceRequestTest(
|
|||
|
||||
self._delete_server_and_check_allocations(qos_port, server)
|
||||
|
||||
def test_migrate_revert_with_qos_port(self):
|
||||
non_qos_port = self.neutron.port_1
|
||||
qos_port = self.neutron.port_with_resource_request
|
||||
|
||||
server = self._create_server_with_ports(non_qos_port, qos_port)
|
||||
|
||||
# check that the server allocates from the current host properly
|
||||
self._check_allocation(
|
||||
server, self.compute1_rp_uuid, non_qos_port, qos_port)
|
||||
|
||||
self.api.post_server_action(server['id'], {'migrate': None})
|
||||
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
|
||||
|
||||
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
|
||||
|
||||
# check that server allocates from the new host properly
|
||||
self._check_allocation(
|
||||
server, self.compute2_rp_uuid, non_qos_port, qos_port,
|
||||
migration_uuid, source_compute_rp_uuid=self.compute1_rp_uuid)
|
||||
|
||||
self.api.post_server_action(server['id'], {'revertResize': None})
|
||||
self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
|
||||
# check that allocation is moved back to the source host
|
||||
self._check_allocation(
|
||||
server, self.compute1_rp_uuid, non_qos_port, qos_port)
|
||||
|
||||
# check that the target host allocation is cleaned up.
|
||||
self.assertRequestMatchesUsage(
|
||||
{'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0,
|
||||
'NET_BW_IGR_KILOBIT_PER_SEC': 0, 'NET_BW_EGR_KILOBIT_PER_SEC': 0},
|
||||
self.compute2_rp_uuid)
|
||||
migration_allocations = self.placement_api.get(
|
||||
'/allocations/%s' % migration_uuid).body['allocations']
|
||||
self.assertEqual({}, migration_allocations)
|
||||
|
||||
self._delete_server_and_check_allocations(qos_port, server)
|
||||
|
||||
|
||||
class PortResourceRequestReSchedulingTest(
|
||||
PortResourceRequestBasedSchedulingTestBase):
|
||||
|
|
|
@ -1649,14 +1649,18 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
def test_confirm_resize_with_migration_ref(self):
|
||||
self._test_confirm_resize(mig_ref_passed=True)
|
||||
|
||||
@mock.patch('nova.network.neutronv2.api.API.'
|
||||
'get_requested_resource_for_instance',
|
||||
return_value=mock.sentinel.res_req)
|
||||
@mock.patch('nova.availability_zones.get_host_availability_zone',
|
||||
return_value='nova')
|
||||
@mock.patch('nova.objects.Quotas.check_deltas')
|
||||
@mock.patch('nova.objects.Migration.get_by_instance_and_status')
|
||||
@mock.patch('nova.context.RequestContext.elevated')
|
||||
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
|
||||
def _test_revert_resize(self, mock_get_reqspec, mock_elevated,
|
||||
mock_get_migration, mock_check, mock_get_host_az):
|
||||
def _test_revert_resize(
|
||||
self, mock_get_reqspec, mock_elevated, mock_get_migration,
|
||||
mock_check, mock_get_host_az, mock_get_requested_resources):
|
||||
params = dict(vm_state=vm_states.RESIZED)
|
||||
fake_inst = self._create_instance_obj(params=params)
|
||||
fake_inst.old_flavor = fake_inst.flavor
|
||||
|
@ -1696,19 +1700,26 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
mock_revert_resize.assert_called_once_with(
|
||||
self.context, fake_inst, fake_mig, 'compute-dest',
|
||||
mock_get_reqspec.return_value)
|
||||
mock_get_requested_resources.assert_called_once_with(
|
||||
self.context, fake_inst.uuid)
|
||||
self.assertEqual(
|
||||
mock.sentinel.res_req,
|
||||
mock_get_reqspec.return_value.requested_resources)
|
||||
|
||||
def test_revert_resize(self):
|
||||
self._test_revert_resize()
|
||||
|
||||
@mock.patch('nova.network.neutronv2.api.API.'
|
||||
'get_requested_resource_for_instance')
|
||||
@mock.patch('nova.availability_zones.get_host_availability_zone',
|
||||
return_value='nova')
|
||||
@mock.patch('nova.objects.Quotas.check_deltas')
|
||||
@mock.patch('nova.objects.Migration.get_by_instance_and_status')
|
||||
@mock.patch('nova.context.RequestContext.elevated')
|
||||
@mock.patch('nova.objects.RequestSpec')
|
||||
def test_revert_resize_concurrent_fail(self, mock_reqspec, mock_elevated,
|
||||
mock_get_migration, mock_check,
|
||||
mock_get_host_az):
|
||||
def test_revert_resize_concurrent_fail(
|
||||
self, mock_reqspec, mock_elevated, mock_get_migration, mock_check,
|
||||
mock_get_host_az, mock_get_requested_resources):
|
||||
params = dict(vm_state=vm_states.RESIZED)
|
||||
fake_inst = self._create_instance_obj(params=params)
|
||||
fake_inst.old_flavor = fake_inst.flavor
|
||||
|
|
|
@ -7586,6 +7586,78 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
do_revert_resize()
|
||||
do_finish_revert_resize()
|
||||
|
||||
@mock.patch.object(objects.Instance, 'drop_migration_context')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_finish_revert_resize_network_migrate_finish')
|
||||
@mock.patch('nova.scheduler.utils.'
|
||||
'fill_provider_mapping_based_on_allocation')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get_allocations_for_consumer')
|
||||
@mock.patch('nova.compute.manager.ComputeManager._revert_allocation')
|
||||
@mock.patch.object(objects.Instance, 'save')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_set_instance_info')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_notify_about_instance_usage')
|
||||
@mock.patch.object(compute_utils, 'notify_about_instance_action')
|
||||
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
|
||||
def test_finish_revert_resize_recalc_group_rp_mapping(
|
||||
self, mock_get_bdms, mock_notify_action, mock_notify_usage,
|
||||
mock_set_instance_info, mock_instance_save, mock_revert_allocation,
|
||||
mock_get_allocations, mock_fill_provider_mapping,
|
||||
mock_network_migrate_finish, mock_drop_migration_context):
|
||||
|
||||
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
|
||||
request_spec = objects.RequestSpec()
|
||||
mock_get_allocations.return_value = mock.sentinel.allocation
|
||||
|
||||
with mock.patch.object(
|
||||
self.compute.network_api, 'get_instance_nw_info'):
|
||||
self.compute.finish_revert_resize(
|
||||
self.context, self.instance, self.migration, request_spec)
|
||||
|
||||
mock_get_allocations.assert_called_once_with(
|
||||
self.context, self.instance.uuid)
|
||||
mock_fill_provider_mapping.assert_called_once_with(
|
||||
self.context, self.compute.reportclient, request_spec,
|
||||
mock.sentinel.allocation)
|
||||
|
||||
@mock.patch.object(objects.Instance, 'drop_migration_context')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_finish_revert_resize_network_migrate_finish')
|
||||
@mock.patch('nova.scheduler.utils.'
|
||||
'fill_provider_mapping_based_on_allocation')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
'get_allocations_for_consumer')
|
||||
@mock.patch('nova.compute.manager.ComputeManager._revert_allocation')
|
||||
@mock.patch.object(objects.Instance, 'save')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_set_instance_info')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_notify_about_instance_usage')
|
||||
@mock.patch.object(compute_utils, 'notify_about_instance_action')
|
||||
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
|
||||
def test_finish_revert_resize_recalc_group_rp_mapping_missing_request_spec(
|
||||
self, mock_get_bdms, mock_notify_action, mock_notify_usage,
|
||||
mock_set_instance_info, mock_instance_save, mock_revert_allocation,
|
||||
mock_get_allocations, mock_fill_provider_mapping,
|
||||
mock_network_migrate_finish, mock_drop_migration_context):
|
||||
|
||||
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
|
||||
mock_get_allocations.return_value = mock.sentinel.allocation
|
||||
|
||||
with mock.patch.object(
|
||||
self.compute.network_api, 'get_instance_nw_info'):
|
||||
# This is the case when the compute is pinned to use older than
|
||||
# RPC version 5.2
|
||||
self.compute.finish_revert_resize(
|
||||
self.context, self.instance, self.migration, request_spec=None)
|
||||
|
||||
mock_get_allocations.assert_not_called()
|
||||
mock_fill_provider_mapping.assert_not_called()
|
||||
mock_network_migrate_finish.assert_called_once_with(
|
||||
self.context, self.instance, self.migration, None)
|
||||
|
||||
def test_confirm_resize_deletes_allocations(self):
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.Migration.get_by_id')
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
import ddt
|
||||
import mock
|
||||
import os_resource_classes as orc
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils.fixture import uuidsentinel as uuids
|
||||
import six
|
||||
|
||||
|
@ -1206,6 +1207,88 @@ class TestUtils(TestUtilsBase):
|
|||
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
|
||||
)
|
||||
|
||||
@mock.patch('nova.scheduler.utils.'
|
||||
'fill_provider_mapping_based_on_allocation')
|
||||
def test_fill_provider_mapping_returns_early_if_nothing_to_do(
|
||||
self, mock_fill_provider):
|
||||
context = nova_context.RequestContext()
|
||||
request_spec = objects.RequestSpec()
|
||||
# set up the request that there is nothing to do
|
||||
request_spec.requested_resources = []
|
||||
report_client = mock.sentinel.report_client
|
||||
selection = objects.Selection()
|
||||
|
||||
utils.fill_provider_mapping(
|
||||
context, report_client, request_spec, selection)
|
||||
|
||||
mock_fill_provider.assert_not_called()
|
||||
|
||||
@mock.patch('nova.scheduler.utils.'
|
||||
'fill_provider_mapping_based_on_allocation')
|
||||
def test_fill_provider_mapping(self, mock_fill_provider):
|
||||
context = nova_context.RequestContext()
|
||||
request_spec = objects.RequestSpec()
|
||||
request_spec.requested_resources = [objects.RequestGroup()]
|
||||
report_client = mock.sentinel.report_client
|
||||
allocs = {
|
||||
uuids.rp_uuid: {
|
||||
'resources': {
|
||||
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
allocation_req = {'allocations': allocs}
|
||||
selection = objects.Selection(
|
||||
allocation_request=jsonutils.dumps(allocation_req))
|
||||
|
||||
utils.fill_provider_mapping(
|
||||
context, report_client, request_spec, selection)
|
||||
|
||||
mock_fill_provider.assert_called_once_with(
|
||||
context, report_client, request_spec, allocs)
|
||||
|
||||
@mock.patch.object(objects.RequestSpec,
|
||||
'map_requested_resources_to_providers')
|
||||
def test_fill_provider_mapping_based_on_allocation_returns_early(
|
||||
self, mock_map):
|
||||
context = nova_context.RequestContext()
|
||||
request_spec = objects.RequestSpec()
|
||||
# set up the request that there is nothing to do
|
||||
request_spec.requested_resources = []
|
||||
report_client = mock.sentinel.report_client
|
||||
allocation = mock.sentinel.allocation
|
||||
|
||||
utils.fill_provider_mapping_based_on_allocation(
|
||||
context, report_client, request_spec, allocation)
|
||||
|
||||
mock_map.assert_not_called()
|
||||
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
|
||||
@mock.patch.object(objects.RequestSpec,
|
||||
'map_requested_resources_to_providers')
|
||||
def test_fill_provider_mapping_based_on_allocation(
|
||||
self, mock_map, mock_report_client):
|
||||
context = nova_context.RequestContext()
|
||||
request_spec = objects.RequestSpec()
|
||||
# set up the request that there is nothing to do
|
||||
request_spec.requested_resources = [objects.RequestGroup()]
|
||||
allocation = {
|
||||
uuids.rp_uuid: {
|
||||
'resources': {
|
||||
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
traits = ['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL']
|
||||
mock_report_client.get_provider_traits.return_value = report.TraitInfo(
|
||||
traits=['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
generation=0)
|
||||
|
||||
utils.fill_provider_mapping_based_on_allocation(
|
||||
context, mock_report_client, request_spec, allocation)
|
||||
|
||||
mock_map.assert_called_once_with(allocation, {uuids.rp_uuid: traits})
|
||||
|
||||
|
||||
class TestEncryptedMemoryTranslation(TestUtilsBase):
|
||||
flavor_name = 'm1.test'
|
||||
|
|
Loading…
Reference in New Issue