_schedule_instances() supporting a RequestSpec object

When we merged the use of RequestSpec objects for all the operations, we left
a lot of TODOs for untangling the use of the object with the scheduler.utils
methods instead of the legacy dictionaries.

Some other changes will be provided for changing how we use the objects in the
conductor (and fixing how we pass them to the scheduler.utils) but for the
moment, just modifying the needed setup_instance_group() method to also support
a RequestSpec object.

Change-Id: I434af8e4ad991ac114dd67d66797a562d16bafe2
Partially-Implements: blueprint request-spec-use-by-compute
This commit is contained in:
Sylvain Bauza 2017-05-30 11:56:09 +02:00
parent 3e02460e2e
commit e211fca55a
9 changed files with 113 additions and 159 deletions

View File

@ -537,13 +537,18 @@ class ComputeTaskManager(base.Base):
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
# TODO(sbauza): Provide directly the RequestSpec object
# when _set_vm_state_and_notify() and populate_retry()
# accept it
request_spec = scheduler_utils.build_request_spec(
context, image, instances)
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self._schedule_instances(
context, request_spec, filter_properties, instance_uuids)
context, spec_obj, instance_uuids)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
@ -612,16 +617,11 @@ class ComputeTaskManager(base.Base):
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _schedule_instances(self, context, request_spec, filter_properties,
instance_uuids=None):
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# TODO(sbauza): Hydrate here the object until we modify the
# scheduler.utils methods to directly use the RequestSpec object
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self.scheduler_client.select_destinations(context, spec_obj,
instance_uuids)
def _schedule_instances(self, context, request_spec,
instance_uuids=None):
scheduler_utils.setup_instance_group(context, request_spec)
hosts = self.scheduler_client.select_destinations(context,
request_spec, instance_uuids)
return hosts
@targets_cell
@ -676,8 +676,7 @@ class ComputeTaskManager(base.Base):
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances(),
# populate_filter_properties and populate_retry()
# when populate_filter_properties and populate_retry()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
@ -685,9 +684,10 @@ class ComputeTaskManager(base.Base):
to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
hosts = self._schedule_instances(
context, request_spec, filter_properties,
[instance.uuid])
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
@ -733,9 +733,13 @@ class ComputeTaskManager(base.Base):
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
# TODO(sbauza): Provide directly the RequestSpec object
# when _set_vm_state_and_notify() accepts it
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
else:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
@ -745,21 +749,15 @@ class ComputeTaskManager(base.Base):
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances() and _set_vm_state_and_notify()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.to_legacy_request_spec_dict()
try:
hosts = self._schedule_instances(
context, request_spec, filter_properties,
[instance.uuid])
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_dict = hosts.pop(0)
host, node, limits = (host_dict['host'],
host_dict['nodename'],
host_dict['limits'])
except exception.NoValidHost as ex:
request_spec = request_spec.to_legacy_request_spec_dict()
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
@ -768,6 +766,7 @@ class ComputeTaskManager(base.Base):
LOG.warning(_LW("No valid host found for rebuild"),
instance=instance)
except exception.UnsupportedPolicyException as ex:
request_spec = request_spec.to_legacy_request_spec_dict()
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
@ -903,13 +902,11 @@ class ComputeTaskManager(base.Base):
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping):
legacy_spec = request_specs[0].to_legacy_request_spec_dict()
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
hosts = self._schedule_instances(context, legacy_spec,
request_specs[0].to_legacy_filter_properties_dict(),
instance_uuids)
hosts = self._schedule_instances(context, request_specs[0],
instance_uuids)
except Exception as exc:
LOG.exception(_LE('Failed to schedule instances'))
self._bury_in_cell0(context, request_specs[0], exc,

View File

@ -156,11 +156,6 @@ class LiveMigrationTask(base.TaskBase):
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
filter_properties = {'ignore_hosts': attempted_hosts}
# TODO(sbauza): Remove that once setup_instance_group() accepts a
# RequestSpec object
request_spec = {'instance_properties': {'uuid': self.instance.uuid}}
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_properties)
if not self.request_spec:
# NOTE(sbauza): We were unable to find an original RequestSpec
# object - probably because the instance is old.
@ -177,7 +172,7 @@ class LiveMigrationTask(base.TaskBase):
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
scheduler_utils.setup_instance_group(self.context, request_spec)
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)

View File

@ -43,8 +43,7 @@ class MigrationTask(base.TaskBase):
# object in the signature and all the scheduler.utils methods too
legacy_spec = self.request_spec.to_legacy_request_spec_dict()
legacy_props = self.request_spec.to_legacy_filter_properties_dict()
scheduler_utils.setup_instance_group(self.context, legacy_spec,
legacy_props)
scheduler_utils.setup_instance_group(self.context, self.request_spec)
scheduler_utils.populate_retry(legacy_props,
self.instance.uuid)

View File

@ -568,20 +568,15 @@ def _set_or_delete_marker_for_migrate_instances(context, marker=None):
def _create_minimal_request_spec(context, instance):
image = instance.image_meta
# TODO(sbauza): Modify that once setup_instance_group() accepts a
# RequestSpec object
request_spec = {'instance_properties': {'uuid': instance.uuid}}
filter_properties = {}
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# This is an old instance. Let's try to populate a RequestSpec
# object using the existing information we have previously saved.
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image,
instance.flavor, instance.numa_topology,
instance.pci_requests,
filter_properties, None, instance.availability_zone
{}, None, instance.availability_zone
)
scheduler_utils.setup_instance_group(context, request_spec)
request_spec.create()

View File

@ -333,25 +333,23 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
policies=group.policies, members=group.members)
def setup_instance_group(context, request_spec, filter_properties):
def setup_instance_group(context, request_spec):
"""Add group_hosts and group_policies fields to filter_properties dict
based on instance uuids provided in request_spec, if those instances are
belonging to a group.
:param request_spec: Request spec
:param filter_properties: Filter properties
"""
group_hosts = filter_properties.get('group_hosts')
# NOTE(sbauza) If there are multiple instance UUIDs, it's a boot
# request and they will all be in the same group, so it's safe to
# only check the first one.
instance_uuid = request_spec.get('instance_properties', {}).get('uuid')
if request_spec.instance_group and request_spec.instance_group.hosts:
group_hosts = request_spec.instance_group.hosts
else:
group_hosts = None
instance_uuid = request_spec.instance_uuid
group_info = _get_group_details(context, instance_uuid, group_hosts)
if group_info is not None:
filter_properties['group_updated'] = True
filter_properties['group_hosts'] = group_info.hosts
filter_properties['group_policies'] = group_info.policies
filter_properties['group_members'] = group_info.members
request_spec.instance_group.hosts = list(group_info.hosts)
request_spec.instance_group.policies = group_info.policies
request_spec.instance_group.members = group_info.members
def retry_on_timeout(retries=1):

View File

@ -253,9 +253,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.context, self.fake_spec)
self.fake_spec.reset_forced_destinations()
self.task.scheduler_client.select_destinations(
self.context, self.fake_spec, [self.instance.uuid]).AndReturn(
@ -292,11 +291,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.assertEqual("host1", task._find_destination())
get_image.assert_called_once_with(self.instance.system_metadata)
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
setup_ig.assert_called_once_with(
self.context, fake_props,
{'ignore_hosts': [self.instance_host]}
)
setup_ig.assert_called_once_with(self.context, another_spec)
select_dest.assert_called_once_with(self.context, another_spec,
[self.instance.uuid])
check_compat.assert_called_once_with("host1")
@ -313,9 +308,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1'}])
@ -336,9 +329,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1'}])
@ -374,9 +365,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1'}])
@ -405,9 +394,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1'}])
@ -435,9 +422,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1'}])
@ -458,9 +443,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
'select_destinations')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndRaise(
exception.NoValidHost(reason=""))

View File

@ -55,25 +55,21 @@ class MigrationTaskTestCase(test.NoDBTestCase):
scheduler_client.SchedulerClient())
@mock.patch('nova.availability_zones.get_host_availability_zone')
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
@mock.patch.object(objects.Quotas, 'from_reservations')
def test_execute(self, quotas_mock, prep_resize_mock,
sel_dest_mock, sig_mock, request_spec_from_components,
az_mock):
sel_dest_mock, sig_mock, az_mock):
sel_dest_mock.return_value = self.hosts
az_mock.return_value = 'myaz'
task = self._generate_task()
request_spec_from_components.return_value = self.request_spec
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
task.execute()
quotas_mock.assert_called_once_with(self.context, self.reservations,
instance=self.instance)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
self.filter_properties)
sig_mock.assert_called_once_with(self.context, self.request_spec)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec, [self.instance.uuid])
prep_resize_mock.assert_called_once_with(

View File

@ -382,7 +382,11 @@ class _BaseTaskTestCase(object):
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid')
@mock.patch('nova.availability_zones.get_host_availability_zone')
@mock.patch('nova.objects.Instance.save')
def test_build_instances(self, mock_save, mock_getaz, mock_buildreq):
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_build_instances(self, mock_fp, mock_save, mock_getaz,
mock_buildreq):
fake_spec = objects.RequestSpec
mock_fp.return_value = fake_spec
instance_type = flavors.get_default_flavor()
# NOTE(danms): Avoid datetime timezone issues with converted flavors
instance_type.created_at = None
@ -407,7 +411,7 @@ class _BaseTaskTestCase(object):
'num_instances': 2}
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
self.conductor_manager._schedule_instances(self.context,
spec, filter_properties, [uuids.fake, uuids.fake]).AndReturn(
fake_spec, [uuids.fake, uuids.fake]).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
@ -472,6 +476,7 @@ class _BaseTaskTestCase(object):
mock_getaz.assert_has_calls([
mock.call(self.context, 'host1'),
mock.call(self.context, 'host2')])
mock_fp.assert_called_once_with(self.context, spec, filter_properties)
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@ -925,21 +930,25 @@ class _BaseTaskTestCase(object):
@mock.patch.object(scheduler_utils, 'populate_filter_properties')
@mock.patch.object(scheduler_utils, 'populate_retry')
@mock.patch.object(self.conductor_manager, '_schedule_instances')
@mock.patch.object(objects.RequestSpec, 'from_primitives')
@mock.patch.object(objects.RequestSpec, 'to_legacy_request_spec_dict')
@mock.patch.object(objects.RequestSpec,
'to_legacy_filter_properties_dict')
@mock.patch.object(objects.RequestSpec, 'reset_forced_destinations')
def do_test(reset_forced_destinations,
to_filtprops, to_reqspec, sched_instances,
to_filtprops, to_reqspec, from_primitives, sched_instances,
populate_retry, populate_filter_properties,
unshelve_instance):
to_filtprops.return_value = filter_properties
to_reqspec.return_value = request_spec
from_primitives.return_value = fake_spec
sched_instances.return_value = [host]
self.conductor.unshelve_instance(self.context, instance, fake_spec)
reset_forced_destinations.assert_called_once_with()
sched_instances.assert_called_once_with(self.context, request_spec,
filter_properties, [instance.uuid])
from_primitives.assert_called_once_with(self.context, request_spec,
filter_properties)
sched_instances.assert_called_once_with(self.context, fake_spec,
[instance.uuid])
# NOTE(sbauza): Since the instance is dehydrated when passing
# through the RPC API, we can only assert mock.ANY for it
unshelve_instance.assert_called_once_with(
@ -995,12 +1004,13 @@ class _BaseTaskTestCase(object):
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_unshelve_instance_schedule_and_rebuild(self, fp):
fake_spec = objects.RequestSpec()
fp.return_value = fake_spec
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {'retry': {'num_attempts': 1,
'hosts': []}}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
@ -1014,7 +1024,7 @@ class _BaseTaskTestCase(object):
scheduler_utils.build_request_spec(self.context, 'fake_image',
mox.IgnoreArg()).AndReturn('req_spec')
self.conductor_manager._schedule_instances(self.context,
'req_spec', filter_properties, [instance.uuid]).AndReturn(
fake_spec, [instance.uuid]).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
@ -1031,6 +1041,7 @@ class _BaseTaskTestCase(object):
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
fp.assert_called_once_with(self.context, 'req_spec', mock.ANY)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
@ -1038,8 +1049,7 @@ class _BaseTaskTestCase(object):
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
def fake_schedule_instances(context, request_spec, *instances):
raise exc.NoValidHost(reason='')
with test.nested(
@ -1081,12 +1091,13 @@ class _BaseTaskTestCase(object):
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self, fp):
fake_spec = objects.RequestSpec()
fp.return_value = fake_spec
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {'retry': {'num_attempts': 1,
'hosts': []}}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
@ -1097,7 +1108,7 @@ class _BaseTaskTestCase(object):
scheduler_utils.build_request_spec(self.context, None,
mox.IgnoreArg()).AndReturn('req_spec')
self.conductor_manager._schedule_instances(self.context,
'req_spec', filter_properties, [instance.uuid]).AndReturn(
fake_spec, [instance.uuid]).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
@ -1113,6 +1124,7 @@ class _BaseTaskTestCase(object):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
fp.assert_called_once_with(self.context, 'req_spec', mock.ANY)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
@ -1238,7 +1250,7 @@ class _BaseTaskTestCase(object):
updates = {'vm_state': vm_states.ACTIVE, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, request_spec)
exception, mock.ANY)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
@ -1275,10 +1287,7 @@ class _BaseTaskTestCase(object):
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = 'fake-limits'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
fake_spec = objects.RequestSpec(ignore_hosts=[])
augmented_spec = objects.RequestSpec(ignore_hosts=[inst_obj.host])
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits,
'request_spec': fake_spec})
@ -1287,30 +1296,19 @@ class _BaseTaskTestCase(object):
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(objects.RequestSpec, 'from_primitives',
return_value=augmented_spec),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host,
'nodename': expected_node,
'limits': expected_limits}]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
mock.patch.object(fake_spec, 'to_legacy_request_spec_dict',
return_value=request_spec),
mock.patch.object(fake_spec, 'to_legacy_filter_properties_dict',
return_value=filter_properties),
) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, reset_fd,
to_reqspec, to_filtprops):
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
reset_fd.assert_called_once_with()
to_reqspec.assert_called_once_with()
to_filtprops.assert_called_once_with()
fp_mock.assert_called_once_with(self.context, request_spec,
filter_properties)
select_dest_mock.assert_called_once_with(self.context,
augmented_spec, [inst_obj.uuid])
fake_spec, [inst_obj.uuid])
compute_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
@ -1339,7 +1337,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
cell_mapping=None, project_id=self.ctxt.project_id)
im.create()
params['request_specs'] = [objects.RequestSpec(
instance_uuid=build_request.instance_uuid)]
instance_uuid=build_request.instance_uuid,
instance_group=None)]
params['image'] = {'fake_data': 'should_pass_silently'}
params['admin_password'] = 'admin_password',
params['injected_files'] = 'injected_files'
@ -1446,7 +1445,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
cell_mapping=None, project_id=self.ctxt.project_id)
im2.create()
params['request_specs'].append(objects.RequestSpec(
instance_uuid=build_request.instance_uuid))
instance_uuid=build_request.instance_uuid,
instance_group=None))
# Now let's have some fun and delete the third build request before
# passing the object on to schedule_and_build_instances so that the
@ -1886,10 +1886,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
cell_mapping=objects.CellMapping.get_by_uuid(self.context,
uuids.cell1))
# Filter properties are populated during code execution
legacy_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
@ -1898,8 +1894,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
legacy_filter_props)
sig_mock.assert_called_once_with(self.context, fake_spec)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exc_info, legacy_request_spec)
@ -1936,10 +1931,6 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
spec_fc_mock.return_value = fake_spec
legacy_request_spec = fake_spec.to_legacy_request_spec_dict()
# Filter properties are populated during code execution
legacy_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
im_mock.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(self.context,
uuids.cell1))
@ -1957,8 +1948,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
legacy_filter_props)
sig_mock.assert_called_once_with(self.context, fake_spec)
notify_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exc_info, legacy_request_spec)
@ -2093,8 +2083,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
metadata_mock.assert_called_with({})
quotas_mock.assert_called_once_with(self.context, [resvs],
instance=inst_obj)
sig_mock.assert_called_once_with(self.context, legacy_request_spec,
legacy_filter_props)
sig_mock.assert_called_once_with(self.context, fake_spec)
select_dest_mock.assert_called_once_with(
self.context, fake_spec, [inst_obj.uuid])
prep_resize_mock.assert_called_once_with(
@ -2180,7 +2169,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertIn('resize', nvh.message)
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid')
def test_build_instances_instance_not_found(self, _mock_buildreq):
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_build_instances_instance_not_found(self, fp, _mock_buildreq):
fake_spec = objects.RequestSpec()
fp.return_value = fake_spec
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
self.mox.StubOutWithMock(instances[0], 'save')
@ -2198,7 +2190,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
inst_uuids = [inst.uuid for inst in instances]
self.conductor_manager._schedule_instances(self.context,
spec, filter_properties, inst_uuids).AndReturn(
fake_spec, inst_uuids).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].save().AndRaise(
@ -2232,6 +2224,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
fp.assert_called_once_with(self.context, spec, filter_properties)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@ -2245,12 +2238,14 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
fake_spec = objects.RequestSpec()
with test.nested(
mock.patch.object(instances[0], 'save',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'save'),
mock.patch.object(objects.RequestSpec, 'from_primitives'),
mock.patch.object(objects.RequestSpec, 'from_primitives',
return_value=fake_spec),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
@ -2274,11 +2269,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
block_device_mapping='block_device_mapping',
legacy_bdm=False)
# NOTE(sbauza): Due to populate_retry() later in the code,
# filter_properties is dynamically modified
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
self.context, fake_spec)
get_buildreq.return_value.destroy.assert_called_once_with()
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':

View File

@ -334,43 +334,42 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
def test_setup_instance_group_in_request_spec(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'],
members=['instance1'])
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
spec = objects.RequestSpec(instance_uuid=uuids.instance)
spec.instance_group = objects.InstanceGroup(hosts=['hostC'])
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
scheduler_utils.setup_instance_group(self.context, spec)
mock_ggd.assert_called_once_with(self.context, uuids.instance,
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy'],
'group_members': ['instance1']}
self.assertEqual(expected_filter_props, filter_props)
# Given it returns a list from a set, make sure it's sorted.
self.assertEqual(['hostA', 'hostB'], sorted(spec.instance_group.hosts))
self.assertEqual(['policy'], spec.instance_group.policies)
self.assertEqual(['instance1'], spec.instance_group.members)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
spec = objects.RequestSpec(instance_uuid=uuids.instance)
spec.instance_group = objects.InstanceGroup(hosts=['hostC'])
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
scheduler_utils.setup_instance_group(self.context, spec)
mock_ggd.assert_called_once_with(self.context, uuids.instance,
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
# Make sure the field isn't touched by the caller.
self.assertFalse(spec.instance_group.obj_attr_is_set('policies'))
self.assertEqual(['hostC'], spec.instance_group.hosts)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': uuids.instance}}
filter_props = {'group_hosts': ['hostC']}
spec = objects.RequestSpec(instance_uuid=uuids.instance)
spec.instance_group = objects.InstanceGroup(hosts=['hostC'])
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
self.context, spec)