OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

4504 lines
215 KiB

# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import copy
import mock
from oslo_db import exception as db_exc
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_versionedobjects import exception as ovo_exc
from nova.accelerator import cyborg
from nova import block_device
from nova.compute import flavors
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import conf
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception as exc
from nova.image import glance as image_api
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
from nova.tests.fixtures import cyborg as cyborg_fixture
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_build_request
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_request_spec
from nova.tests.unit import fake_server_actions
from nova.tests.unit import utils as test_utils
from nova import utils
from nova.volume import cinder
CONF = conf.CONF
fake_alloc1 = {
"allocations": {
uuids.host1: {
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}
},
"mappings": {
uuids.port1: [uuids.host1]
}
}
fake_alloc2 = {
"allocations": {
uuids.host2: {
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}
},
"mappings": {
uuids.port1: [uuids.host2]
}
}
fake_alloc3 = {
"allocations": {
uuids.host3: {
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}
},
"mappings": {
uuids.port1: [uuids.host3]
}
}
fake_alloc_json1 = jsonutils.dumps(fake_alloc1)
fake_alloc_json2 = jsonutils.dumps(fake_alloc2)
fake_alloc_json3 = jsonutils.dumps(fake_alloc3)
fake_alloc_version = "1.28"
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json1,
allocation_request_version=fake_alloc_version)
fake_selection2 = objects.Selection(service_host="host2", nodename="node2",
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json2,
allocation_request_version=fake_alloc_version)
fake_selection3 = objects.Selection(service_host="host3", nodename="node3",
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json3,
allocation_request_version=fake_alloc_version)
fake_host_lists1 = [[fake_selection1]]
fake_host_lists2 = [[fake_selection1], [fake_selection2]]
fake_host_lists_alt = [[fake_selection1, fake_selection2, fake_selection3]]
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.user_id = fakes.FAKE_USER_ID
self.project_id = fakes.FAKE_PROJECT_ID
self.context = FakeContext(self.user_id, self.project_id)
self.useFixture(fixtures.NotificationFixture(self))
self.stub_out('nova.rpc.RequestContextSerializer.deserialize_context',
lambda *args, **kwargs: self.context)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj_base.NovaObjectRegistry.register(TestObject)
obj = TestObject()
# NOTE(danms): After a trip over RPC, any tuple will be a list,
# so use a list here to make sure we can handle it
fake_args = []
if is_classmethod:
versions = {'TestObject': '1.0'}
result = self.conductor.object_class_action_versions(
self.context, TestObject.obj_name(), 'bar', versions,
fake_args, {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', fake_args,
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj_base.NovaObjectRegistry.register(TestObject)
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_object_class_action_versions(self):
@obj_base.NovaObjectRegistry.register
class TestObject(obj_base.NovaObject):
VERSION = '1.10'
@classmethod
def foo(cls, context):
return cls()
versions = {
'TestObject': '1.2',
'OtherObj': '1.0',
}
with mock.patch.object(self.conductor_manager,
'_object_dispatch') as m:
m.return_value = TestObject()
m.return_value.obj_to_primitive = mock.MagicMock()
self.conductor.object_class_action_versions(
self.context, TestObject.obj_name(), 'foo', versions,
tuple(), {})
m.return_value.obj_to_primitive.assert_called_once_with(
target_version='1.2', version_manifest=versions)
def test_object_class_action_versions_old_object(self):
# Make sure we return older than requested objects unmodified,
# see bug #1596119.
@obj_base.NovaObjectRegistry.register
class TestObject(obj_base.NovaObject):
VERSION = '1.10'
@classmethod
def foo(cls, context):
return cls()
versions = {
'TestObject': '1.10',
'OtherObj': '1.0',
}
with mock.patch.object(self.conductor_manager,
'_object_dispatch') as m:
m.return_value = TestObject()
m.return_value.VERSION = '1.9'
m.return_value.obj_to_primitive = mock.MagicMock()
obj = self.conductor.object_class_action_versions(
self.context, TestObject.obj_name(), 'foo', versions,
tuple(), {})
self.assertFalse(m.return_value.obj_to_primitive.called)
self.assertEqual('1.9', obj.VERSION)
def test_object_class_action_versions_major_version_diff(self):
@obj_base.NovaObjectRegistry.register
class TestObject(obj_base.NovaObject):
VERSION = '2.10'
@classmethod
def foo(cls, context):
return cls()
versions = {
'TestObject': '2.10',
'OtherObj': '1.0',
}
with mock.patch.object(self.conductor_manager,
'_object_dispatch') as m:
m.return_value = TestObject()
m.return_value.VERSION = '1.9'
self.assertRaises(
ovo_exc.InvalidTargetVersion,
self.conductor.object_class_action_versions,
self.context, TestObject.obj_name(), 'foo', versions,
tuple(), {})
def test_reset(self):
with mock.patch.object(objects.Service, 'clear_min_version_cache'
) as mock_clear_cache:
self.conductor.reset()
mock_clear_cache.assert_called_once_with()
def test_provider_fw_rule_get_all(self):
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual([], result)
def test_conductor_host(self):
self.assertTrue(hasattr(self.conductor_manager, 'host'))
self.assertEqual(CONF.host, self.conductor_manager.host)
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(self, context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stub_out('nova.baserpc.BaseAPI.ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = fakes.FAKE_USER_ID
self.project_id = fakes.FAKE_PROJECT_ID
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self)
self.request_spec = objects.RequestSpec()
self.stub_out('nova.rpc.RequestContextSerializer.deserialize_context',
lambda *args, **kwargs: self.context)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
self.heal_reqspec_is_bfv_mock = _p.start()
self.addCleanup(_p.stop)
_p = mock.patch('nova.objects.RequestSpec.ensure_network_information')
self.ensure_network_information_mock = _p.start()
self.addCleanup(_p.stop)
def _prepare_rebuild_args(self, update_args=None):
# Args that don't get passed in to the method but do get passed to RPC
migration = update_args and update_args.pop('migration', None)
node = update_args and update_args.pop('node', None)
limits = update_args and update_args.pop('limits', None)
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': uuids.image_ref,
'orig_image_ref': uuids.orig_image_ref,
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
'request_spec': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
compute_rebuild_args['migration'] = migration
compute_rebuild_args['node'] = node
compute_rebuild_args['limits'] = limits
compute_rebuild_args['accel_uuids'] = []
return rebuild_args, compute_rebuild_args
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(migrate.MigrationTask, 'execute')
@mock.patch.object(utils, 'get_image_from_system_metadata')
@mock.patch.object(objects.RequestSpec, 'from_components')
def _test_cold_migrate(self, spec_from_components, get_image_from_metadata,
migration_task_execute, spec_save, get_im,
clean_shutdown=True):
get_im.return_value.cell_mapping = (
objects.CellMappingList.get_all(self.context)[0])
get_image_from_metadata.return_value = 'image'
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
inst_obj.system_metadata = {'image_hw_disk_bus': 'scsi'}
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor.extra_specs = {'extra_specs': 'fake'}
inst_obj.flavor = flavor
fake_spec = fake_request_spec.fake_spec_obj()
spec_from_components.return_value = fake_spec
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, conductor_api.ComputeTaskAPI):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, scheduler_hint, flavor, [],
clean_shutdown, host_list=None)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
get_image_from_metadata.assert_called_once_with(
inst_obj.system_metadata)
migration_task_execute.assert_called_once_with()
spec_save.assert_called_once_with()
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arqs')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'build_and_run_instance')
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance',
return_value=[])
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances')
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid')
@mock.patch('nova.availability_zones.get_host_availability_zone')
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_build_instances(self, mock_fp, mock_save, mock_getaz,
mock_buildreq, mock_schedule, mock_bdm,
mock_build, mock_create_bind_arqs):
"""Tests creating two instances and the scheduler returns a unique
host/node combo for each instance.
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
instance_type.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
flavor=instance_type) for i in range(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
{}, instance_type)
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2}
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
sched_return = copy.deepcopy(fake_host_lists2)
mock_schedule.return_value = sched_return
filter_properties2 = {'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': {}}
filter_properties3 = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
mock_getaz.return_value = 'myaz'
mock_create_bind_arqs.return_value = mock.sentinel
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False, host_lists=None)
mock_getaz.assert_has_calls([
mock.call(self.context, 'host1'),
mock.call(self.context, 'host2')])
# A RequestSpec is built from primitives once before calling the
# scheduler to get hosts and then once per instance we're building.
mock_fp.assert_has_calls([
mock.call(self.context, spec, filter_properties),
mock.call(self.context, spec, filter_properties2),
mock.call(self.context, spec, filter_properties3)])
mock_schedule.assert_called_once_with(
self.context, fake_spec, [uuids.fake, uuids.fake],
return_alternates=True)
mock_bdm.assert_has_calls([mock.call(self.context, instances[0].uuid),
mock.call(self.context, instances[1].uuid)])
mock_build.assert_has_calls([
mock.call(self.context, instance=mock.ANY, host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec=fake_spec,
filter_properties=filter_properties2,
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node1', limits=None, host_list=sched_return[0],
accel_uuids=mock.sentinel),
mock.call(self.context, instance=mock.ANY, host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec=fake_spec,
filter_properties=filter_properties3,
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=None, host_list=sched_return[1],
accel_uuids=mock.sentinel)])
mock_create_bind_arqs.assert_has_calls([
mock.call(self.context, instances[0].uuid,
instances[0].flavor.extra_specs, 'node1', mock.ANY),
mock.call(self.context, instances[1].uuid,
instances[1].flavor.extra_specs, 'node2', mock.ANY),
])
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_cleanup_when_reschedule_fails')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arqs')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'build_and_run_instance')
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance',
return_value=[])
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances')
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid')
@mock.patch('nova.availability_zones.get_host_availability_zone')
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.RequestSpec, 'from_primitives')
def test_build_instances_arq_failure(self, mock_fp, mock_save, mock_getaz,
mock_buildreq, mock_schedule, mock_bdm,
mock_build, mock_create_bind_arqs, mock_cleanup):
"""If _create_and_bind_arqs throws an exception,
_destroy_build_request must be called for each instance.
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
instance_type.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
flavor=instance_type) for i in range(2)]
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
{}, instance_type)
sched_return = copy.deepcopy(fake_host_lists2)
mock_schedule.return_value = sched_return
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
mock_getaz.return_value = 'myaz'
mock_create_bind_arqs.side_effect = (
exc.AcceleratorRequestBindingFailed(arqs=[], msg=''))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False, host_lists=None)
mock_create_bind_arqs.assert_has_calls([
mock.call(self.context, instances[0].uuid,
instances[0].flavor.extra_specs, 'node1', mock.ANY),
mock.call(self.context, instances[1].uuid,
instances[1].flavor.extra_specs, 'node2', mock.ANY),
])
# Comparing instances fails because the instance objects have changed
# in the above flow. So, we compare the fields instead.
mock_cleanup.assert_has_calls([
mock.call(self.context, test.MatchType(objects.Instance),
test.MatchType(exc.AcceleratorRequestBindingFailed),
test.MatchType(dict), None),
mock.call(self.context, test.MatchType(objects.Instance),
test.MatchType(exc.AcceleratorRequestBindingFailed),
test.MatchType(dict), None),
])
call_list = mock_cleanup.call_args_list
for idx, instance in enumerate(instances):
actual_inst = call_list[idx][0][1]
self.assertEqual(actual_inst['uuid'], instance['uuid'])
self.assertEqual(actual_inst['flavor']['extra_specs'], {})
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_cleanup_allocated_networks')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_destroy_build_request')
def test_build_instances_scheduler_failure(
self, dest_build_req_mock, cleanup_mock, sd_mock, state_mock,
sig_mock, bs_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
dest_build_req_mock.side_effect = (
exc.BuildRequestNotFound(uuid='fake'),
None)
bs_mock.return_value = spec
sd_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
self.conductor.build_instances(
self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
set_state_calls = []
cleanup_network_calls = []
dest_build_req_calls = []
for instance in instances:
set_state_calls.append(mock.call(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec))
cleanup_network_calls.append(mock.call(
self.context, mock.ANY, None))
dest_build_req_calls.append(
mock.call(self.context, test.MatchType(type(instance))))
state_mock.assert_has_calls(set_state_calls)
cleanup_mock.assert_has_calls(cleanup_network_calls)
dest_build_req_mock.assert_has_calls(dest_build_req_calls)
def test_build_instances_retry_exceeded(self):
instances = [fake_instance.fake_instance_obj(self.context)]
image = {'fake-data': 'should_pass_silently'}
filter_properties = {'retry': {'num_attempts': 10, 'hosts': []}}
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_cleanup_allocated_networks')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'populate_retry')
def _test(populate_retry, build_spec,
set_vm_state_and_notify, cleanup_mock):
# build_instances() is a cast, we need to wait for it to
# complete
self.useFixture(fixtures.CastAsCallFixture(self))
populate_retry.side_effect = exc.MaxRetriesExceeded(
reason="Too many try")
self.conductor.build_instances(
self.context,
instances=instances,
image=image,
filter_properties=filter_properties,
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
populate_retry.assert_called_once_with(
filter_properties, instances[0].uuid)
set_vm_state_and_notify.assert_called_once_with(
self.context, instances[0].uuid, 'compute_task',
'build_instances', updates, mock.ANY, build_spec.return_value)
cleanup_mock.assert_called_once_with(self.context, mock.ANY, None)
_test()
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_cleanup_allocated_networks')
def test_build_instances_scheduler_group_failure(
self, cleanup_mock, state_mock, sig_mock, bs_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
bs_mock.return_value = spec
exception = exc.UnsupportedPolicyException(reason='fake-reason')
sig_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
set_state_calls = []
cleanup_network_calls = []
for instance in instances:
set_state_calls.append(mock.call(
self.context, instance.uuid, 'build_instances', updates,
exception, spec))
cleanup_network_calls.append(mock.call(
self.context, mock.ANY, None))
state_mock.assert_has_calls(set_state_calls)
cleanup_mock.assert_has_calls(cleanup_network_calls)
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid',
side_effect=exc.InstanceMappingNotFound(uuid='fake'))
@mock.patch.object(objects.HostMapping, 'get_by_host')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_no_instance_mapping(self, _mock_set_state,
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, _mock_buildreq):
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
with mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance'):
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
mock_get_inst_map_by_uuid.assert_has_calls([
mock.call(self.context, instances[0].uuid),
mock.call(self.context, instances[1].uuid)])
self.assertFalse(mock_get_by_host.called)
@mock.patch('nova.compute.utils.notify_about_compute_task_error')
@mock.patch.object(objects.Instance, 'save')
def test_build_instances_exhaust_host_list(self, _mock_save, mock_notify):
# A list of three alternate hosts for one instance
host_lists = copy.deepcopy(fake_host_lists_alt)
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
self.conductor.build_instances(
context=self.context,
instances=[instance], image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=None,
legacy_bdm=None,
host_lists=host_lists
)
# Since claim_resources() is mocked to always return False, we will run
# out of alternate hosts, and complain about MaxRetriesExceeded.
mock_notify.assert_called_once_with(
self.context, 'build_instances',
instance.uuid, test.MatchType(dict), 'error',
test.MatchType(exc.MaxRetriesExceeded))
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_destroy_build_request')
@mock.patch.object(conductor_manager.LOG, 'debug')
@mock.patch("nova.scheduler.utils.claim_resources", return_value=True)
@mock.patch.object(objects.Instance, 'save')
def test_build_instances_logs_selected_and_alts(self, _mock_save,
mock_claim, mock_debug, mock_destroy):
# A list of three alternate hosts for one instance
host_lists = copy.deepcopy(fake_host_lists_alt)
expected_host = host_lists[0][0]
expected_alts = host_lists[0][1:]
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
with mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance'):
self.conductor.build_instances(context=self.context,
instances=[instance], image=image, filter_properties={},
admin_password='admin_password',
injected_files='injected_files', requested_networks=None,
security_groups='security_groups',
block_device_mapping=None, legacy_bdm=None,
host_lists=host_lists)
# The last LOG.debug call should record the selected host name and the
# list of alternates.
last_call = mock_debug.call_args_list[-1][0]
self.assertIn(expected_host.service_host, last_call)
expected_alt_hosts = [(alt.service_host, alt.nodename)
for alt in expected_alts]
self.assertIn(expected_alt_hosts, last_call)
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(objects.HostMapping, 'get_by_host',
side_effect=exc.HostMappingNotFound(name='fake'))
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_no_host_mapping(self, _mock_set_state,
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, mock_buildreq):
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
num_instances = 2
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(num_instances)]
inst_mapping_mocks = [mock.Mock() for i in range(num_instances)]
mock_get_inst_map_by_uuid.side_effect = inst_mapping_mocks
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
with mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance'):
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
for instance in instances:
mock_get_inst_map_by_uuid.assert_any_call(self.context,
instance.uuid)
for inst_mapping in inst_mapping_mocks:
inst_mapping.destroy.assert_called_once_with()
mock_get_by_host.assert_has_calls([mock.call(self.context, 'host1'),
mock.call(self.context, 'host2')])
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(objects.HostMapping, 'get_by_host')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_update_instance_mapping(self, _mock_set_state,
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, _mock_buildreq):
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
mock_get_by_host.side_effect = [
objects.HostMapping(cell_mapping=objects.CellMapping(id=1)),
objects.HostMapping(cell_mapping=objects.CellMapping(id=2))]
num_instances = 2
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(num_instances)]
inst_mapping_mocks = [mock.Mock() for i in range(num_instances)]
mock_get_inst_map_by_uuid.side_effect = inst_mapping_mocks
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
with mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance'):
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
for instance in instances:
mock_get_inst_map_by_uuid.assert_any_call(self.context,
instance.uuid)
for inst_mapping in inst_mapping_mocks:
inst_mapping.save.assert_called_once_with()
self.assertEqual(1, inst_mapping_mocks[0].cell_mapping.id)
self.assertEqual(2, inst_mapping_mocks[1].cell_mapping.id)
mock_get_by_host.assert_has_calls([mock.call(self.context, 'host1'),
mock.call(self.context, 'host2')])
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock())
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify', new=mock.MagicMock())
def test_build_instances_destroy_build_request(self, mock_select_dests,
mock_build_req_get):
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
num_instances = 2
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(num_instances)]
build_req_mocks = [mock.Mock() for i in range(num_instances)]
mock_build_req_get.side_effect = build_req_mocks
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
@mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance', new=mock.MagicMock())
@mock.patch.object(self.conductor_manager,
'_populate_instance_mapping', new=mock.MagicMock())
def do_test():
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False,
host_lists=None)
do_test()
for build_req in build_req_mocks:
build_req.destroy.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock())
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify', new=mock.MagicMock())
def test_build_instances_reschedule_ignores_build_request(self,
mock_select_dests):
# This test calls build_instances as if it was a reschedule. This means
# that the exc.BuildRequestNotFound() exception raised by
# conductor_manager._destroy_build_request() should not cause the
# build to stop.
mock_select_dests.return_value = [[fake_selection1]]
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
@mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
@mock.patch.object(self.conductor_manager,
'_populate_instance_mapping')
@mock.patch.object(self.conductor_manager,
'_destroy_build_request',
side_effect=exc.BuildRequestNotFound(uuid='fake'))
def do_test(mock_destroy_build_req, mock_pop_inst_map,
mock_build_and_run):
self.conductor.build_instances(
context=self.context,
instances=[instance],
image=image,
filter_properties={'retry': {'num_attempts': 1, 'hosts': []}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False, host_lists=None)
expected_build_run_host_list = copy.copy(fake_host_lists1[0])
if expected_build_run_host_list:
expected_build_run_host_list.pop(0)
mock_build_and_run.assert_called_once_with(
self.context,
instance=mock.ANY,
host='host1',
image=image,
request_spec=mock.ANY,
filter_properties={'retry': {'num_attempts': 2,
'hosts': [['host1', 'node1']]},
'limits': {}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node1', limits=None,
host_list=expected_build_run_host_list,
accel_uuids=[])
mock_pop_inst_map.assert_not_called()
mock_destroy_build_req.assert_not_called()
do_test()
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock())
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify', new=mock.MagicMock())
def test_build_instances_reschedule_recalculates_provider_mapping(self,
mock_select_dests):
rg1 = objects.RequestGroup(resources={"CUSTOM_FOO": 1})
request_spec = objects.RequestSpec(requested_resources=[rg1])
mock_select_dests.return_value = [[fake_selection1]]
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
@mock.patch('nova.scheduler.utils.'
'fill_provider_mapping')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.objects.request_spec.RequestSpec.from_primitives',
return_value=request_spec)
@mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
@mock.patch.object(self.conductor_manager,
'_populate_instance_mapping')
@mock.patch.object(self.conductor_manager, '_destroy_build_request')
def do_test(mock_destroy_build_req, mock_pop_inst_map,
mock_build_and_run, mock_request_spec_from_primitives,
mock_claim, mock_rp_mapping):
self.conductor.build_instances(
context=self.context,
instances=[instance],
image=image,
filter_properties={'retry': {'num_attempts': 1, 'hosts': []}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False,
host_lists=copy.deepcopy(fake_host_lists1),
request_spec=request_spec)
expected_build_run_host_list = copy.copy(fake_host_lists1[0])
if expected_build_run_host_list:
expected_build_run_host_list.pop(0)
mock_build_and_run.assert_called_once_with(
self.context,
instance=mock.ANY,
host='host1',
image=image,
request_spec=request_spec,
filter_properties={'retry': {'num_attempts': 2,
'hosts': [['host1', 'node1']]},
'limits': {}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node1',
limits=None,
host_list=expected_build_run_host_list,
accel_uuids=[])
mock_rp_mapping.assert_called_once_with(
test.MatchType(objects.RequestSpec),
test.MatchType(objects.Selection))
actual_request_spec = mock_rp_mapping.mock_calls[0][1][0]
self.assertEqual(
rg1.resources,
actual_request_spec.requested_resources[0].resources)
do_test()
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock())
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify', new=mock.MagicMock())
def test_build_instances_reschedule_not_recalc_mapping_if_claim_fails(
self, mock_select_dests):
rg1 = objects.RequestGroup(resources={"CUSTOM_FOO": 1})
request_spec = objects.RequestSpec(requested_resources=[rg1])
mock_select_dests.return_value = [[fake_selection1]]
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
@mock.patch('nova.scheduler.utils.'
'fill_provider_mapping')
@mock.patch('nova.scheduler.utils.claim_resources',
# simulate that the first claim fails during re-schedule
side_effect=[False, True])
@mock.patch('nova.objects.request_spec.RequestSpec.from_primitives',
return_value=request_spec)
@mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
@mock.patch.object(self.conductor_manager,
'_populate_instance_mapping')
@mock.patch.object(self.conductor_manager, '_destroy_build_request')
def do_test(mock_destroy_build_req, mock_pop_inst_map,
mock_build_and_run, mock_request_spec_from_primitives,
mock_claim, mock_rp_mapping):
self.conductor.build_instances(
context=self.context,
instances=[instance],
image=image,
filter_properties={'retry': {'num_attempts': 1, 'hosts': []}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False,
host_lists=copy.deepcopy(fake_host_lists_alt),
request_spec=request_spec)
expected_build_run_host_list = copy.copy(fake_host_lists_alt[0])
if expected_build_run_host_list:
# first is consumed but the claim fails so the conductor takes
# the next host
expected_build_run_host_list.pop(0)
# second is consumed and claim succeeds
expected_build_run_host_list.pop(0)
mock_build_and_run.assert_called_with(
self.context,
instance=mock.ANY,
host='host2',
image=image,
request_spec=request_spec,
filter_properties={'retry': {'num_attempts': 2,
'hosts': [['host2', 'node2']]},
'limits': {}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node2',
limits=None,
host_list=expected_build_run_host_list,
accel_uuids=[])
# called only once when the claim succeeded
mock_rp_mapping.assert_called_once_with(
test.MatchType(objects.RequestSpec),
test.MatchType(objects.Selection))
actual_request_spec = mock_rp_mapping.mock_calls[0][1][0]
self.assertEqual(
rg1.resources,
actual_request_spec.requested_resources[0].resources)
do_test()
@mock.patch.object(cinder.API, 'attachment_get')
@mock.patch.object(cinder.API, 'attachment_create')
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
def test_validate_existing_attachment_ids_with_missing_attachments(self,
mock_bdm_save, mock_attachment_create, mock_attachment_get):
instance = self._create_fake_instance_obj()
bdms = [
block_device.BlockDeviceDict({
'boot_index': 0,
'guest_format': None,
'connection_info': None,
'device_type': u'disk',
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 1,
'image_id': 1,
'device_name': '/dev/vdb',
'attachment_id': uuids.attachment,
'volume_id': uuids.volume
})]
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, bdms)
mock_attachment_get.side_effect = exc.VolumeAttachmentNotFound(
attachment_id=uuids.attachment)
mock_attachment_create.return_value = {'id': uuids.new_attachment}
self.assertEqual(uuids.attachment, bdms[0].attachment_id)
self.conductor_manager._validate_existing_attachment_ids(self.context,
instance,
bdms)
mock_attachment_get.assert_called_once_with(self.context,
uuids.attachment)
mock_attachment_create.assert_called_once_with(self.context,
uuids.volume,
instance.uuid)
mock_bdm_save.assert_called_once()
self.assertEqual(uuids.new_attachment, bdms[0].attachment_id)
@mock.patch.object(cinder.API, 'attachment_get')
@mock.patch.object(cinder.API, 'attachment_create')
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
def test_validate_existing_attachment_ids_with_attachments_present(self,
mock_bdm_save, mock_attachment_create, mock_attachment_get):
instance = self._create_fake_instance_obj()
bdms = [
block_device.BlockDeviceDict({
'boot_index': 0,
'guest_format': None,
'connection_info': None,
'device_type': u'disk',
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 1,
'image_id': 1,
'device_name': '/dev/vdb',
'attachment_id': uuids.attachment,
'volume_id': uuids.volume
})]
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, bdms)
mock_attachment_get.return_value = {
"attachment": {
"status": "attaching",
"detached_at": "2015-09-16T09:28:52.000000",
"connection_info": {},
"attached_at": "2015-09-16T09:28:52.000000",
"attach_mode": "ro",
"instance": instance.uuid,
"volume_id": uuids.volume,
"id": uuids.attachment
}}
self.assertEqual(uuids.attachment, bdms[0].attachment_id)
self.conductor_manager._validate_existing_attachment_ids(self.context,
instance,
bdms)
mock_attachment_get.assert_called_once_with(self.context,
uuids.attachment)
mock_attachment_create.assert_not_called()
mock_bdm_save.assert_not_called()
self.assertEqual(uuids.attachment, bdms[0].attachment_id)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'start_instance')
def test_unshelve_instance_on_host(self, mock_start, mock_unshelve):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(
self.context, instance, self.request_spec)
mock_start.assert_called_once_with(self.context, instance)
mock_unshelve.assert_not_called()
def test_unshelve_offload_instance_on_host_with_request_spec(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
fake_spec = fake_request_spec.fake_spec_obj()
fake_spec.flavor = instance.flavor
# FIXME(sbauza): Modify the fake RequestSpec object to either add a
# non-empty SchedulerRetries object or nullify the field
fake_spec.retry = None
# FIXME(sbauza): Modify the fake RequestSpec object to either add a
# non-empty SchedulerLimits object or nullify the field
fake_spec.limits = None
# FIXME(sbauza): Modify the fake RequestSpec object to either add a
# non-empty InstanceGroup object or nullify the field
fake_spec.instance_group = None
filter_properties = fake_spec.to_legacy_filter_properties_dict()
host = {'host': 'host1', 'nodename': 'node1', 'limits': {}}
# unshelve_instance() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
@mock.patch.object(scheduler_utils, 'populate_filter_properties')
@mock.patch.object(self.conductor_manager, '_schedule_instances')
@mock.patch.object(objects.RequestSpec,
'to_legacy_filter_properties_dict')
@mock.patch.object(objects.RequestSpec, 'reset_forced_destinations')
def do_test(reset_forced_destinations,
to_filtprops, sched_instances, populate_filter_properties,
unshelve_instance, get_by_instance_uuid):
cell_mapping = objects.CellMapping.get_by_uuid(self.context,
uuids.cell1)
get_by_instance_uuid.return_value = objects.InstanceMapping(
cell_mapping=cell_mapping)
to_filtprops.return_value = filter_properties
sched_instances.return_value = [[fake_selection1]]
self.conductor.unshelve_instance(self.context, instance, fake_spec)
# The fake_spec already has a project_id set which doesn't match
# the instance.project_id so the spec's project_id won't be
# overridden using the instance.project_id.
self.assertNotEqual(fake_spec.project_id, instance.project_id)
reset_forced_destinations.assert_called_once_with()
# The fake_spec is only going to modified by reference for
# ComputeTaskManager.
if isinstance(self.conductor,
conductor_manager.ComputeTaskManager):
self.ensure_network_information_mock.assert_called_once_with(
test.MatchType(objects.Instance))
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, fake_spec, instance)
sched_instances.assert_called_once_with(
self.context, fake_spec, [instance.uuid],
return_alternates=False)
self.assertEqual(cell_mapping,
fake_spec.requested_destination.cell)
else:
# RPC API tests won't have the same request spec or instance
# since they go over the wire.
self.ensure_network_information_mock.assert_called_once_with(
test.MatchType(objects.Instance))
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, test.MatchType(objects.RequestSpec),
test.MatchType(objects.Instance))
sched_instances.assert_called_once_with(
self.context, test.MatchType(objects.RequestSpec),
[instance.uuid], return_alternates=False)
# NOTE(sbauza): Since the instance is dehydrated when passing
# through the RPC API, we can only assert mock.ANY for it
unshelve_instance.assert_called_once_with(
self.context, mock.ANY, host['host'],
test.MatchType(objects.RequestSpec), image=mock.ANY,
accel_uuids=[],
filter_properties=filter_properties, node=host['nodename']
)
do_test()
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
@mock.patch.object(image_api.API, 'get',
side_effect=exc.ImageNotFound(image_id=uuids.image))
def test_unshelve_offloaded_instance_glance_image_not_found(
self, mock_get, add_instance_fault_from_exc):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = uuids.image
reason = ('Unshelve attempted but the image %s '
'cannot be found.') % uuids.image
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance, self.request_spec)
add_instance_fault_from_exc.assert_called_once_with(
self.context, instance, mock_get.side_effect, mock.ANY,
fault_message=reason)
self.assertEqual(instance.vm_state, vm_states.ERROR)
mock_get.assert_called_once_with(self.context, uuids.image,
show_deleted=False)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
# 'shelved_image_id' is None for volumebacked instance
instance.system_metadata['shelved_image_id'] = None
self.request_spec.flavor = instance.flavor
with test.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
mock.patch.object(objects.InstanceMapping,
'get_by_instance_uuid'),
) as (schedule_mock, unshelve_mock, get_by_instance_uuid):
schedule_mock.return_value = [[fake_selection1]]
get_by_instance_uuid.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(
self.context, uuids.cell1))
self.conductor_manager.unshelve_instance(
self.context, instance, self.request_spec)
self.assertEqual(1, unshelve_mock.call_count)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
return_value=[[objects.Selection(
service_host='fake_host', nodename='fake_node',
limits=None)]])
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def test_unshelve_instance_schedule_and_rebuild(
self, mock_im, mock_get, mock_schedule, mock_unshelve):
fake_spec = objects.RequestSpec()
# Set requested_destination to test setting cell_mapping in
# existing object.
fake_spec.requested_destination = objects.Destination(
host="dummy", cell=None)
cell_mapping = objects.CellMapping.get_by_uuid(self.context,
uuids.cell1)
mock_im.return_value = objects.InstanceMapping(
cell_mapping=cell_mapping)
instance = self._create_fake_instance_obj()
fake_spec.flavor = instance.flavor
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(
self.context, instance, fake_spec)
self.assertEqual(cell_mapping, fake_spec.requested_destination.cell)
mock_get.assert_called_once_with(
self.context, 'fake_image_id', show_deleted=False)
mock_schedule.assert_called_once_with(
self.context, fake_spec, [instance.uuid], return_alternates=False)
mock_unshelve.assert_called_once_with(
self.context, instance, 'fake_host', fake_spec, image='fake_image',
filter_properties=dict(
# populate_filter_properties adds limits={}
fake_spec.to_legacy_filter_properties_dict(), limits={}),
accel_uuids=[],
node='fake_node')
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
self.request_spec.flavor = instance.flavor
def fake_schedule_instances(context, request_spec, *instances,
**kwargs):
raise exc.NoValidHost(reason='')
with test.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances),
mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid'),
mock.patch.object(objects.Instance, 'save')
) as (_get_image, _schedule_instances, get_by_instance_uuid, save):
get_by_instance_uuid.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(
self.context, uuids.cell1))
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(
self.context, instance, self.request_spec)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
side_effect=messaging.MessagingTimeout())
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
@mock.patch.object(objects.Instance, 'save')
def test_unshelve_instance_schedule_and_rebuild_messaging_exception(
self, mock_save, mock_get_image, mock_schedule_instances, mock_im):
mock_im.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(self.context,
uuids.cell1))
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.request_spec.flavor = instance.flavor
self.assertRaises(messaging.MessagingTimeout,
self.conductor_manager.unshelve_instance,
self.context, instance, self.request_spec)
mock_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances', return_value=[[
objects.Selection(service_host='fake_host',
nodename='fake_node',
limits=None)]])
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def test_unshelve_instance_schedule_and_rebuild_volume_backed(
self, mock_im, mock_schedule, mock_unshelve):
fake_spec = objects.RequestSpec()
mock_im.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(self.context,
uuids.cell1))
instance = self._create_fake_instance_obj()
fake_spec.flavor = instance.flavor
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(
self.context, instance, fake_spec)
mock_schedule.assert_called_once_with(
self.context, fake_spec, [instance.uuid], return_alternates=False)
mock_unshelve.assert_called_once_with(
self.context, instance, 'fake_host', fake_spec, image=None,
accel_uuids=[], filter_properties={'limits': {}}, node='fake_node')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances', )
def test_unshelve_instance_resource_request(
self, mock_schedule, mock_get_res_req, mock_fill_provider_mapping):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
request_spec = objects.RequestSpec()
request_spec.flavor = instance.flavor
selection = objects.Selection(
service_host='fake_host',
nodename='fake_node',
limits=None)
mock_schedule.return_value = [[selection]]
res_req = [objects.RequestGroup()]
mock_get_res_req.return_value = res_req
self.conductor_manager.unshelve_instance(
self.context, instance, request_spec)
self.assertEqual(res_req, request_spec.requested_resources)
mock_get_res_req.assert_called_once_with(self.context, instance.uuid)
mock_schedule.assert_called_once_with(
self.context, request_spec, [instance.uuid],
return_alternates=False)
mock_fill_provider_mapping.assert_called_once_with(
request_spec, selection)
@mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arq_for_instance', )
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances', )
def test_unshelve_instance_resource_request_with_device_profile(
self, mock_schedule, mock_get_res_req, mock_fill_provider_mapping,
mock_get_arq, mock_get_dp):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
request_spec = objects.RequestSpec()
request_spec.flavor = instance.flavor
request_spec.flavor.extra_specs = {'accel:device_profile': 'mydp'}
selection = objects.Selection(
service_host='fake_host',
nodename='fake_node',
limits=None)
mock_schedule.return_value = [[selection]]
mock_get_res_req.return_value = []
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
objects.RequestGroup(requester_id='deviceprofile3')]
mock_get_dp.return_value = dp_groups
mock_get_arq.return_value = []
self.conductor_manager.unshelve_instance(
self.context, instance, request_spec)
self.assertEqual(dp_groups, request_spec.requested_resources)
mock_get_res_req.assert_called_once_with(self.context, instance.uuid)
mock_schedule.assert_called_once_with(
self.context, request_spec, [instance.uuid],
return_alternates=False)
mock_fill_provider_mapping.assert_called_once_with(
request_spec, selection)
@mock.patch('nova.accelerator.cyborg._CyborgClient.delete_arqs_by_uuid')
@mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arq_for_instance', )
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances', )
def test_unshelve_instance_resource_request_with_arq_bind_fail(
self, mock_schedule, mock_get_res_req, mock_fill_provider_mapping,
mock_get_arqs, mock_get_dp, mock_del_arqs):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
request_spec = objects.RequestSpec()
request_spec.flavor = instance.flavor
request_spec.flavor.extra_specs = {'accel:device_profile': 'mydp'}
selection = objects.Selection(
service_host='fake_host',
nodename='fake_node',
limits=None)
mock_schedule.return_value = [[selection]]
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
objects.RequestGroup(requester_id='deviceprofile3')]
mock_get_dp.return_value = dp_groups
mock_get_res_req.return_value = []
arqs = ["fake-arq-uuid"]
mock_get_arqs.side_effect = exc.AcceleratorRequestBindingFailed(
arqs=arqs, msg='')
ex = self.assertRaises(exc.AcceleratorRequestBindingFailed,
self.conductor_manager.unshelve_instance,
context=self.context, instance=instance,
request_spec=request_spec)
self.assertIn('Failed to bind accelerator requests', ex.message)
mock_del_arqs.assert_called_with(arqs)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': inst_obj.host})
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations'),
mock.patch('nova.scheduler.utils.fill_provider_mapping',
new_callable=mock.NonCallableMock),
mock.patch('nova.network.neutron.API.'
'get_requested_resource_for_instance',
new_callable=mock.NonCallableMock)
) as (rebuild_mock, select_dest_mock, fill_provider_mock,
get_resources_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
def test_rebuild_instance_with_scheduler(self, mock_notify):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits})
fake_spec = objects.RequestSpec()
rebuild_args['request_spec'] = fake_spec
inst_uuids = [inst_obj.uuid]
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
return_value=[[fake_selection]])
) as (rebuild_mock, sig_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.ensure_network_information_mock.assert_called_once_with(
inst_obj)
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, fake_spec, inst_obj)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
inst_uuids, return_objects=True, return_alternates=False)
compute_args['host'] = expected_host
compute_args['request_spec'] = fake_spec
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
self.assertEqual(inst_obj.project_id, fake_spec.project_id)
self.assertEqual('compute.instance.rebuild.scheduled',
self.notifier.notifications[0].event_type)
mock_notify.assert_called_once_with(
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled',
source='nova-conductor')
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'get_arq_uuids_for_instance')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
def test_rebuild_instance_with_device_profile(self, mock_notify,
mock_get_arqs):
inst_obj = self._create_fake_instance_obj()
inst_obj.flavor.extra_specs = {'accel:device_profile': 'mydp'}
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits})
fake_spec = objects.RequestSpec()
rebuild_args['request_spec'] = fake_spec
inst_uuids = [inst_obj.uuid]
arqs = ['fake-arq-uuid']
mock_get_arqs.return_value = arqs
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
return_value=[[fake_selection]])
) as (rebuild_mock, sig_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.ensure_network_information_mock.assert_called_once_with(
inst_obj)
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, fake_spec, inst_obj)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
inst_uuids, return_objects=True, return_alternates=False)
compute_args['host'] = expected_host
compute_args['request_spec'] = fake_spec
compute_args['accel_uuids'] = arqs
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
self.assertEqual(inst_obj.project_id, fake_spec.project_id)
self.assertEqual('compute.instance.rebuild.scheduled',
self.notifier.notifications[0].event_type)
mock_notify.assert_called_once_with(
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled',
source='nova-conductor')
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args, _ = self._prepare_rebuild_args({'host': None})
fake_spec = objects.RequestSpec()
rebuild_args['request_spec'] = fake_spec
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
) as (rebuild_mock, sig_mock,
select_dest_mock, set_vm_state_and_notify_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
[inst_obj.uuid], return_objects=True,
return_alternates=False)
self.assertEqual(
set_vm_state_and_notify_mock.call_args[0][4]['vm_state'],
vm_states.ERROR)
self.assertFalse(rebuild_mock.called)
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
'rebuild_instance')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.query.SchedulerQueryClient,
'select_destinations')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_rebuild_instance_with_scheduler_group_failure(self,
state_mock,
select_dest_mock,
sig_mock,
rebuild_mock):
inst_obj = self._create_fake_instance_obj()
rebuild_args, _ = self._prepare_rebuild_args({'host': None})
rebuild_args['request_spec'] = self.request_spec
exception = exc.UnsupportedPolicyException(reason='')
sig_mock.side_effect = exception
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(fixtures.CastAsCallFixture(self))
# Create the migration record (normally created by the compute API).
migration = objects.Migration(self.context,
source_compute=inst_obj.host,
source_node=inst_obj.node,
instance_uuid=inst_obj.uuid,
status='accepted',
migration_type='evacuation')
migration.create()
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
inst_obj,
**rebuild_args)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, mock.ANY)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
# Assert the migration status was updated.
migration = objects.Migration.get_by_id(self.context, migration.id)
self.assertEqual('error', migration.status)
def test_rebuild_instance_fill_provider_mapping_raises(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args, _ = self._prepare_rebuild_args(
{'host': None, 'recreate': True})
fake_spec = objects.RequestSpec()
fake_spec.flavor = inst_obj.flavor
rebuild_args['request_spec'] = fake_spec
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations'),
mock.patch.object(scheduler_utils, 'set_vm_state_and_notify'),
mock.patch.object(scheduler_utils, 'fill_provider_mapping',
side_effect=ValueError(
'No valid group - RP mapping is found'))
) as (rebuild_mock, sig_mock,
select_dest_mock, set_vm_state_and_notify_mock,
fill_mapping_mock):
self.assertRaises(ValueError,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
[inst_obj.uuid], return_objects=True,
return_alternates=False)
set_vm_state_and_notify_mock.assert_called_once_with(
self.context, inst_obj.uuid, 'compute_task', 'rebuild_server',
{'vm_state': vm_states.ERROR, 'task_state': None},
test.MatchType(ValueError), fake_spec)
self.assertFalse(rebuild_mock.called)
def test_rebuild_instance_evacuate_migration_record(self):
inst_obj = self._create_fake_instance_obj()
migration = objects.Migration(context=self.context,
source_compute=inst_obj.host,
source_node=inst_obj.node,
instance_uuid=inst_obj.uuid,
status='accepted',
migration_type='evacuation')
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': inst_obj.host, 'migration': migration})
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations'),
mock.patch.object(objects.Migration, 'get_by_instance_and_status',
return_value=migration)
) as (rebuild_mock, select_dest_mock, get_migration_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
def test_evacuate_instance_with_request_spec(self, mock_notify):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
fake_spec = objects.RequestSpec(ignore_hosts=[uuids.ignored_host])
fake_spec.flavor = inst_obj.flavor
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits,
'request_spec': fake_spec, 'recreate': True})
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
return_value=[[fake_selection]]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
mock.patch('nova.network.neutron.API.'
'get_requested_resource_for_instance',
return_value=[])
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd,
fill_rp_mapping_mock, get_req_res_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
reset_fd.assert_called_once_with()
# The RequestSpec.ignore_hosts field should be overwritten.
self.assertEqual([inst_obj.host], fake_spec.ignore_hosts)
# The RequestSpec.requested_destination.cell field should be set.
self.assertIn('requested_destination', fake_spec)
self.assertIn('cell', fake_spec.requested_destination)
self.assertIsNotNone(fake_spec.requested_destination.cell)
select_dest_mock.assert_called_once_with(self.context,
fake_spec, [inst_obj.uuid], return_objects=True,
return_alternates=False)
compute_args['host'] = expected_host
compute_args['request_spec'] = fake_spec
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
get_req_res_mock.assert_called_once_with(
self.context, inst_obj.uuid)
fill_rp_mapping_mock.assert_called_once_with(
fake_spec, fake_selection)
self.assertEqual('compute.instance.rebuild.scheduled',
self.notifier.notifications[0].event_type)
mock_notify.assert_called_once_with(
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled',
source='nova-conductor')
@mock.patch(
'nova.accelerator.cyborg._CyborgClient.delete_arqs_for_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arq_for_instance', )
@mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
def test_evacuate_instance_with_request_spec_device_profile(
self, mock_notify, mock_get_dp, mock_get_arqs, mock_del_arqs):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
fake_spec = objects.RequestSpec(ignore_hosts=[uuids.ignored_host])
fake_spec.flavor = inst_obj.flavor
fake_spec.flavor.extra_specs = {'accel:device_profile': 'mydp'}
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits,
'request_spec': fake_spec, 'recreate': True})
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
objects.RequestGroup(requester_id='deviceprofile3')]
mock_get_dp.return_value = dp_groups
arqs = ['fake-arq-uuid']
mock_get_arqs.return_value = arqs
with test.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
return_value=[[fake_selection]]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
mock.patch('nova.network.neutron.API.'
'get_requested_resource_for_instance',
return_value=[])
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd,
fill_rp_mapping_mock, get_req_res_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
reset_fd.assert_called_once_with()
# The RequestSpec.ignore_hosts field should be overwritten.
self.assertEqual([inst_obj.host], fake_spec.ignore_hosts)
# The RequestSpec.requested_destination.cell field should be set.
self.assertIn('requested_destination', fake_spec)
self.assertIn('cell', fake_spec.requested_destination)
self.assertIsNotNone(fake_spec.requested_destination.cell)
select_dest_mock.assert_called_once_with(self.context,
fake_spec, [inst_obj.uuid], return_objects=True,
return_alternates=False)
compute_args['host'] = expected_host
compute_args['request_spec'] = fake_spec
compute_args['accel_uuids'] = arqs
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**compute_args)
get_req_res_mock.assert_called_once_with(
self.context, inst_obj.uuid)
fill_rp_mapping_mock.assert_called_once_with(
fake_spec, fake_selection)
mock_del_arqs.assert_called_once()
self.assertEqual('compute.instance.rebuild.scheduled',
self.notifier.notifications[0].event_type)
mock_notify.assert_called_once_with(
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled',
source='nova-conductor')
@mock.patch('nova.accelerator.cyborg._CyborgClient.delete_arqs_by_uuid')
@mock.patch(
'nova.accelerator.cyborg._CyborgClient.delete_arqs_for_instance')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_create_and_bind_arq_for_instance', )
@mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
def test_evacuate_instance_with_request_spec_arq_bind_fail(
self, mock_notify, mock_get_dp, mock_get_arqs,
mock_del_arqs_instance, mock_del_arqs):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
fake_spec = objects.RequestSpec(ignore_hosts=[uuids.ignored_host])
fake_spec.flavor = inst_obj.flavor
fake_spec.flavor.extra_specs = {'accel:device_profile': 'mydp'}
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
objects.RequestGroup(requester_id='deviceprofile3')]
mock_get_dp.return_value = dp_groups
fake_spec.requested_resources = dp_groups
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits,
'request_spec': fake_spec, 'recreate': True})
arqs = ['fake-arq-uuid']
mock_get_arqs.side_effect = exc.AcceleratorRequestBindingFailed(
arqs=arqs, msg='')
with test.nested(
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.query_client,
'select_destinations',
return_value=[[fake_selection]]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
mock.patch('nova.network.neutron.API.'
'get_requested_resource_for_instance',
return_value=[])
) as (sig_mock, select_dest_mock, reset_fd,
fill_rp_mapping_mock, get_req_res_mock):
ex = self.assertRaises(exc.AcceleratorRequestBindingFailed,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
reset_fd.assert_called_once_with()
# The RequestSpec.ignore_hosts field should be overwritten.
self.assertEqual([inst_obj.host], fake_spec.ignore_hosts)
# The RequestSpec.requested_destination.cell field should be set.
self.assertIn('requested_destination', fake_spec)
self.assertIn('cell', fake_spec.requested_destination)
self.assertIsNotNone(fake_spec.requested_destination.cell)
select_dest_mock.assert_called_once_with(self.context,
fake_spec, [inst_obj.uuid], return_objects=True,
return_alternates=False)
compute_args['host'] = expected_host
compute_args['request_spec'] = fake_spec
get_req_res_mock.assert_called_once_with(
self.context, inst_obj.uuid)
fill_rp_mapping_mock.assert_called_once_with(
fake_spec, fake_selection)
self.assertIn('Failed to bind accelerator requests', ex.message)