Merge "Use Selection object to fill request group mapping"
This commit is contained in:
commit
10b375990d
@ -677,7 +677,7 @@ class ComputeTaskManager(base.Base):
|
|||||||
# the allocation of the instance to another host
|
# the allocation of the instance to another host
|
||||||
# TODO(gibi): handle if the below call raises
|
# TODO(gibi): handle if the below call raises
|
||||||
self._fill_provider_mapping(
|
self._fill_provider_mapping(
|
||||||
context, instance.uuid, request_spec)
|
context, instance.uuid, request_spec, host)
|
||||||
else:
|
else:
|
||||||
# Some deployments use different schedulers that do not
|
# Some deployments use different schedulers that do not
|
||||||
# use Placement, so they will not have an
|
# use Placement, so they will not have an
|
||||||
@ -1264,7 +1264,8 @@ class ComputeTaskManager(base.Base):
|
|||||||
with obj_target_cell(inst, cell0):
|
with obj_target_cell(inst, cell0):
|
||||||
inst.destroy()
|
inst.destroy()
|
||||||
|
|
||||||
def _fill_provider_mapping(self, context, instance_uuid, request_spec):
|
def _fill_provider_mapping(
|
||||||
|
self, context, instance_uuid, request_spec, host_selection):
|
||||||
"""Fills out the request group - resource provider mapping in the
|
"""Fills out the request group - resource provider mapping in the
|
||||||
request spec.
|
request spec.
|
||||||
|
|
||||||
@ -1276,26 +1277,26 @@ class ComputeTaskManager(base.Base):
|
|||||||
replaced with a simpler code that copies the group - RP
|
replaced with a simpler code that copies the group - RP
|
||||||
mapping out from the Selection object returned by the scheduler's
|
mapping out from the Selection object returned by the scheduler's
|
||||||
select_destinations call.
|
select_destinations call.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param instance_uuid: The UUID of the instance for which the provider
|
||||||
|
mapping is filled
|
||||||
|
:param request_spec: The RequestSpec object associated with the
|
||||||
|
operation
|
||||||
|
:param host_selection: The Selection object returned by the scheduler
|
||||||
|
for this operation
|
||||||
"""
|
"""
|
||||||
# Exit early if this request spec does not require mappings.
|
# Exit early if this request spec does not require mappings.
|
||||||
if not request_spec.maps_requested_resources:
|
if not request_spec.maps_requested_resources:
|
||||||
return
|
return
|
||||||
# TODO(mriedem): Could we use the Selection.allocation_request here
|
|
||||||
# to get the resource providers rather than making an API call to
|
# Technically out-of-tree scheduler drivers can still not create
|
||||||
# placement per instance being scheduled? Granted that is a
|
# allocations in placement but if request_spec.maps_requested_resources
|
||||||
# PUT /allocations/{consumer_id} *request* payload rather than a
|
# is not empty and the scheduling succeeded then placement has to be
|
||||||
# *response* but at least currently they are in the same format and
|
# involved
|
||||||
# could make this faster.
|
ar = jsonutils.loads(host_selection.allocation_request)
|
||||||
allocs = self.report_client.get_allocs_for_consumer(
|
allocs = ar['allocations']
|
||||||
context, instance_uuid)['allocations']
|
|
||||||
if not allocs:
|
|
||||||
# Technically out-of-tree scheduler drivers can still not create
|
|
||||||
# allocations in placement so move on if there are no allocations
|
|
||||||
# for the instance.
|
|
||||||
LOG.debug('No allocations found for instance after scheduling. '
|
|
||||||
'Assuming the scheduler driver is not using Placement.',
|
|
||||||
instance_uuid=instance_uuid)
|
|
||||||
return
|
|
||||||
# TODO(mriedem): Short-term we can optimize this by passing a cache by
|
# TODO(mriedem): Short-term we can optimize this by passing a cache by
|
||||||
# reference of the RP->traits mapping because if we are processing
|
# reference of the RP->traits mapping because if we are processing
|
||||||
# a multi-create request we could have the same RPs being used for
|
# a multi-create request we could have the same RPs being used for
|
||||||
@ -1308,6 +1309,9 @@ class ComputeTaskManager(base.Base):
|
|||||||
rp_uuid: self.report_client.get_provider_traits(
|
rp_uuid: self.report_client.get_provider_traits(
|
||||||
context, rp_uuid).traits
|
context, rp_uuid).traits
|
||||||
for rp_uuid in allocs}
|
for rp_uuid in allocs}
|
||||||
|
# NOTE(gibi): The allocs dict is in the format of the PUT /allocations
|
||||||
|
# and that format can change. The current format can be detected from
|
||||||
|
# host_selection.allocation_request_version
|
||||||
request_spec.map_requested_resources_to_providers(
|
request_spec.map_requested_resources_to_providers(
|
||||||
allocs, provider_traits)
|
allocs, provider_traits)
|
||||||
|
|
||||||
@ -1431,7 +1435,7 @@ class ComputeTaskManager(base.Base):
|
|||||||
# map allocations to resource providers in the request spec.
|
# map allocations to resource providers in the request spec.
|
||||||
try:
|
try:
|
||||||
self._fill_provider_mapping(
|
self._fill_provider_mapping(
|
||||||
context, instance.uuid, request_spec)
|
context, instance.uuid, request_spec, host)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
# If anything failed here we need to cleanup and bail out.
|
# If anything failed here we need to cleanup and bail out.
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
|
@ -66,28 +66,28 @@ from nova.volume import cinder
|
|||||||
CONF = conf.CONF
|
CONF = conf.CONF
|
||||||
|
|
||||||
|
|
||||||
fake_alloc1 = {"allocations": [
|
fake_alloc1 = {"allocations": {
|
||||||
{"resource_provider": {"uuid": uuids.host1},
|
uuids.host1: {
|
||||||
"resources": {"VCPU": 1,
|
"resources": {"VCPU": 1,
|
||||||
"MEMORY_MB": 1024,
|
"MEMORY_MB": 1024,
|
||||||
"DISK_GB": 100}
|
"DISK_GB": 100}
|
||||||
}]}
|
}}}
|
||||||
fake_alloc2 = {"allocations": [
|
fake_alloc2 = {"allocations": {
|
||||||
{"resource_provider": {"uuid": uuids.host2},
|
uuids.host2: {
|
||||||
"resources": {"VCPU": 1,
|
"resources": {"VCPU": 1,
|
||||||
"MEMORY_MB": 1024,
|
"MEMORY_MB": 1024,
|
||||||
"DISK_GB": 100}
|
"DISK_GB": 100}
|
||||||
}]}
|
}}}
|
||||||
fake_alloc3 = {"allocations": [
|
fake_alloc3 = {"allocations": {
|
||||||
{"resource_provider": {"uuid": uuids.host3},
|
uuids.host3: {
|
||||||
"resources": {"VCPU": 1,
|
"resources": {"VCPU": 1,
|
||||||
"MEMORY_MB": 1024,
|
"MEMORY_MB": 1024,
|
||||||
"DISK_GB": 100}
|
"DISK_GB": 100}
|
||||||
}]}
|
}}}
|
||||||
fake_alloc_json1 = jsonutils.dumps(fake_alloc1)
|
fake_alloc_json1 = jsonutils.dumps(fake_alloc1)
|
||||||
fake_alloc_json2 = jsonutils.dumps(fake_alloc2)
|
fake_alloc_json2 = jsonutils.dumps(fake_alloc2)
|
||||||
fake_alloc_json3 = jsonutils.dumps(fake_alloc3)
|
fake_alloc_json3 = jsonutils.dumps(fake_alloc3)
|
||||||
fake_alloc_version = "1.23"
|
fake_alloc_version = "1.28"
|
||||||
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
|
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
|
||||||
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json1,
|
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json1,
|
||||||
allocation_request_version=fake_alloc_version)
|
allocation_request_version=fake_alloc_version)
|
||||||
@ -992,7 +992,7 @@ class _BaseTaskTestCase(object):
|
|||||||
security_groups='security_groups',
|
security_groups='security_groups',
|
||||||
block_device_mapping='block_device_mapping',
|
block_device_mapping='block_device_mapping',
|
||||||
legacy_bdm=False,
|
legacy_bdm=False,
|
||||||
host_lists=fake_host_lists1,
|
host_lists=copy.deepcopy(fake_host_lists1),
|
||||||
request_spec=request_spec)
|
request_spec=request_spec)
|
||||||
|
|
||||||
expected_build_run_host_list = copy.copy(fake_host_lists1[0])
|
expected_build_run_host_list = copy.copy(fake_host_lists1[0])
|
||||||
@ -1018,7 +1018,8 @@ class _BaseTaskTestCase(object):
|
|||||||
host_list=expected_build_run_host_list)
|
host_list=expected_build_run_host_list)
|
||||||
|
|
||||||
mock_rp_mapping.assert_called_once_with(
|
mock_rp_mapping.assert_called_once_with(
|
||||||
self.context, instance.uuid, mock.ANY)
|
self.context, instance.uuid, mock.ANY,
|
||||||
|
test.MatchType(objects.Selection))
|
||||||
actual_request_spec = mock_rp_mapping.mock_calls[0][1][2]
|
actual_request_spec = mock_rp_mapping.mock_calls[0][1][2]
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
rg1.resources,
|
rg1.resources,
|
||||||
@ -1655,11 +1656,12 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
@mock.patch('nova.availability_zones.get_host_availability_zone')
|
@mock.patch('nova.availability_zones.get_host_availability_zone')
|
||||||
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance')
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance')
|
||||||
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
|
||||||
def _do_schedule_and_build_instances_test(self, params,
|
def _do_schedule_and_build_instances_test(
|
||||||
select_destinations,
|
self, params, select_destinations, build_and_run_instance,
|
||||||
build_and_run_instance,
|
get_az, host_list=None):
|
||||||
get_az):
|
if not host_list:
|
||||||
select_destinations.return_value = [[fake_selection1]]
|
host_list = copy.deepcopy(fake_host_lists1)
|
||||||
|
select_destinations.return_value = host_list
|
||||||
get_az.return_value = 'myaz'
|
get_az.return_value = 'myaz'
|
||||||
details = {}
|
details = {}
|
||||||
|
|
||||||
@ -2132,27 +2134,21 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||||
'get_provider_traits')
|
'get_provider_traits')
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
|
||||||
'get_allocs_for_consumer')
|
|
||||||
@mock.patch('nova.objects.request_spec.RequestSpec.'
|
@mock.patch('nova.objects.request_spec.RequestSpec.'
|
||||||
'map_requested_resources_to_providers')
|
'map_requested_resources_to_providers')
|
||||||
def test_schedule_and_build_instances_fill_request_spec(
|
def test_schedule_and_build_instances_fill_request_spec(
|
||||||
self, mock_map, mock_get_allocs, mock_traits):
|
self, mock_map, mock_traits):
|
||||||
# makes sure there is some request group in the spec to be mapped
|
# makes sure there is some request group in the spec to be mapped
|
||||||
self.params['request_specs'][0].requested_resources = [
|
self.params['request_specs'][0].requested_resources = [
|
||||||
objects.RequestGroup()]
|
objects.RequestGroup()]
|
||||||
|
|
||||||
mock_get_allocs.return_value = {
|
|
||||||
'allocations': {uuids.rp1: mock.sentinel.rp1_allocs}}
|
|
||||||
mock_traits.return_value.traits = ['TRAIT1']
|
mock_traits.return_value.traits = ['TRAIT1']
|
||||||
|
|
||||||
instance_uuid = self._do_schedule_and_build_instances_test(
|
self._do_schedule_and_build_instances_test(self.params)
|
||||||
self.params)
|
|
||||||
|
|
||||||
mock_map.assert_called_once_with({uuids.rp1: mock.sentinel.rp1_allocs},
|
mock_map.assert_called_once_with(fake_alloc1['allocations'],
|
||||||
{uuids.rp1: ['TRAIT1']})
|
{uuids.host1: ['TRAIT1']})
|
||||||
mock_get_allocs.assert_called_once_with(mock.ANY, instance_uuid)
|
mock_traits.assert_called_once_with(mock.ANY, uuids.host1)
|
||||||
mock_traits.assert_called_once_with(mock.ANY, uuids.rp1)
|
|
||||||
|
|
||||||
@mock.patch('nova.conductor.manager.ComputeTaskManager.'
|
@mock.patch('nova.conductor.manager.ComputeTaskManager.'
|
||||||
'_cleanup_build_artifacts')
|
'_cleanup_build_artifacts')
|
||||||
@ -2183,23 +2179,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||||||
self.params['request_specs'][0].requested_resources = []
|
self.params['request_specs'][0].requested_resources = []
|
||||||
self._do_schedule_and_build_instances_test(self.params)
|
self._do_schedule_and_build_instances_test(self.params)
|
||||||
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
|
||||||
'get_provider_traits', new_callable=mock.NonCallableMock)
|
|
||||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
|
||||||
'get_allocs_for_consumer', return_value={'allocations': {}})
|
|
||||||
@mock.patch('nova.objects.request_spec.RequestSpec.'
|
|
||||||
'map_requested_resources_to_providers',
|
|
||||||
new_callable=mock.NonCallableMock)
|
|
||||||
def test_schedule_and_build_instances_fill_request_spec_no_allocs(
|
|
||||||
self, mock_map, mock_get_allocs, mock_traits):
|
|
||||||
"""Tests to make sure _fill_provider_mapping handles a scheduler
|
|
||||||
driver which does not use placement (so there are no allocations).
|
|
||||||
"""
|
|
||||||
self.params['request_specs'][0].requested_resources = [
|
|
||||||
objects.RequestGroup()]
|
|
||||||
self._do_schedule_and_build_instances_test(self.params)
|
|
||||||
mock_get_allocs.assert_called_once()
|
|
||||||
|
|
||||||
@mock.patch('nova.objects.CellMapping.get_by_uuid')
|
@mock.patch('nova.objects.CellMapping.get_by_uuid')
|
||||||
def test_bury_in_cell0_no_cell0(self, mock_cm_get):
|
def test_bury_in_cell0_no_cell0(self, mock_cm_get):
|
||||||
mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0')
|
mock_cm_get.side_effect = exc.CellMappingNotFound(uuid='0')
|
||||||
|
Loading…
Reference in New Issue
Block a user