You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
3389 lines
161 KiB
3389 lines
161 KiB
# Copyright 2012 IBM Corp. |
|
# Copyright 2013 Red Hat, Inc. |
|
# |
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
|
# not use this file except in compliance with the License. You may obtain |
|
# a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
|
# License for the specific language governing permissions and limitations |
|
# under the License. |
|
|
|
"""Tests for the conductor service.""" |
|
|
|
import copy |
|
|
|
import mock |
|
import oslo_messaging as messaging |
|
from oslo_serialization import jsonutils |
|
from oslo_utils.fixture import uuidsentinel as uuids |
|
from oslo_utils import timeutils |
|
from oslo_versionedobjects import exception as ovo_exc |
|
import six |
|
|
|
from nova import block_device |
|
from nova.compute import flavors |
|
from nova.compute import rpcapi as compute_rpcapi |
|
from nova.compute import task_states |
|
from nova.compute import vm_states |
|
from nova.conductor import api as conductor_api |
|
from nova.conductor import manager as conductor_manager |
|
from nova.conductor import rpcapi as conductor_rpcapi |
|
from nova.conductor.tasks import live_migrate |
|
from nova.conductor.tasks import migrate |
|
from nova import conf |
|
from nova import context |
|
from nova.db import api as db |
|
from nova.db.sqlalchemy import api as db_api |
|
from nova.db.sqlalchemy import api_models |
|
from nova import exception as exc |
|
from nova.image import api as image_api |
|
from nova import objects |
|
from nova.objects import base as obj_base |
|
from nova.objects import block_device as block_device_obj |
|
from nova.objects import fields |
|
from nova.scheduler.client import query |
|
from nova.scheduler import utils as scheduler_utils |
|
from nova import test |
|
from nova.tests import fixtures |
|
from nova.tests.unit.api.openstack import fakes |
|
from nova.tests.unit import cast_as_call |
|
from nova.tests.unit.compute import test_compute |
|
from nova.tests.unit import fake_build_request |
|
from nova.tests.unit import fake_instance |
|
from nova.tests.unit import fake_notifier |
|
from nova.tests.unit import fake_request_spec |
|
from nova.tests.unit import fake_server_actions |
|
from nova.tests.unit import utils as test_utils |
|
from nova import utils |
|
from nova.volume import cinder |
|
|
|
|
|
CONF = conf.CONF |
|
|
|
|
|
fake_alloc1 = {"allocations": { |
|
uuids.host1: { |
|
"resources": {"VCPU": 1, |
|
"MEMORY_MB": 1024, |
|
"DISK_GB": 100} |
|
}}} |
|
fake_alloc2 = {"allocations": { |
|
uuids.host2: { |
|
"resources": {"VCPU": 1, |
|
"MEMORY_MB": 1024, |
|
"DISK_GB": 100} |
|
}}} |
|
fake_alloc3 = {"allocations": { |
|
uuids.host3: { |
|
"resources": {"VCPU": 1, |
|
"MEMORY_MB": 1024, |
|
"DISK_GB": 100} |
|
}}} |
|
fake_alloc_json1 = jsonutils.dumps(fake_alloc1) |
|
fake_alloc_json2 = jsonutils.dumps(fake_alloc2) |
|
fake_alloc_json3 = jsonutils.dumps(fake_alloc3) |
|
fake_alloc_version = "1.28" |
|
fake_selection1 = objects.Selection(service_host="host1", nodename="node1", |
|
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json1, |
|
allocation_request_version=fake_alloc_version) |
|
fake_selection2 = objects.Selection(service_host="host2", nodename="node2", |
|
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json2, |
|
allocation_request_version=fake_alloc_version) |
|
fake_selection3 = objects.Selection(service_host="host3", nodename="node3", |
|
cell_uuid=uuids.cell, limits=None, allocation_request=fake_alloc_json3, |
|
allocation_request_version=fake_alloc_version) |
|
fake_host_lists1 = [[fake_selection1]] |
|
fake_host_lists2 = [[fake_selection1], [fake_selection2]] |
|
fake_host_lists_alt = [[fake_selection1, fake_selection2, fake_selection3]] |
|
|
|
|
|
class FakeContext(context.RequestContext): |
|
def elevated(self): |
|
"""Return a consistent elevated context so we can detect it.""" |
|
if not hasattr(self, '_elevated'): |
|
self._elevated = super(FakeContext, self).elevated() |
|
return self._elevated |
|
|
|
|
|
class _BaseTestCase(object): |
|
def setUp(self): |
|
super(_BaseTestCase, self).setUp() |
|
self.user_id = fakes.FAKE_USER_ID |
|
self.project_id = fakes.FAKE_PROJECT_ID |
|
self.context = FakeContext(self.user_id, self.project_id) |
|
|
|
fake_notifier.stub_notifier(self) |
|
self.addCleanup(fake_notifier.reset) |
|
|
|
self.stub_out('nova.rpc.RequestContextSerializer.deserialize_context', |
|
lambda *args, **kwargs: self.context) |
|
|
|
self.useFixture(fixtures.SpawnIsSynchronousFixture()) |
|
|
|
|
|
class ConductorTestCase(_BaseTestCase, test.TestCase): |
|
"""Conductor Manager Tests.""" |
|
def setUp(self): |
|
super(ConductorTestCase, self).setUp() |
|
self.conductor = conductor_manager.ConductorManager() |
|
self.conductor_manager = self.conductor |
|
|
|
def _test_object_action(self, is_classmethod, raise_exception): |
|
class TestObject(obj_base.NovaObject): |
|
def foo(self, raise_exception=False): |
|
if raise_exception: |
|
raise Exception('test') |
|
else: |
|
return 'test' |
|
|
|
@classmethod |
|
def bar(cls, context, raise_exception=False): |
|
if raise_exception: |
|
raise Exception('test') |
|
else: |
|
return 'test' |
|
|
|
obj_base.NovaObjectRegistry.register(TestObject) |
|
|
|
obj = TestObject() |
|
# NOTE(danms): After a trip over RPC, any tuple will be a list, |
|
# so use a list here to make sure we can handle it |
|
fake_args = [] |
|
if is_classmethod: |
|
versions = {'TestObject': '1.0'} |
|
result = self.conductor.object_class_action_versions( |
|
self.context, TestObject.obj_name(), 'bar', versions, |
|
fake_args, {'raise_exception': raise_exception}) |
|
else: |
|
updates, result = self.conductor.object_action( |
|
self.context, obj, 'foo', fake_args, |
|
{'raise_exception': raise_exception}) |
|
self.assertEqual('test', result) |
|
|
|
def test_object_action(self): |
|
self._test_object_action(False, False) |
|
|
|
def test_object_action_on_raise(self): |
|
self.assertRaises(messaging.ExpectedException, |
|
self._test_object_action, False, True) |
|
|
|
def test_object_class_action(self): |
|
self._test_object_action(True, False) |
|
|
|
def test_object_class_action_on_raise(self): |
|
self.assertRaises(messaging.ExpectedException, |
|
self._test_object_action, True, True) |
|
|
|
def test_object_action_copies_object(self): |
|
class TestObject(obj_base.NovaObject): |
|
fields = {'dict': fields.DictOfStringsField()} |
|
|
|
def touch_dict(self): |
|
self.dict['foo'] = 'bar' |
|
self.obj_reset_changes() |
|
|
|
obj_base.NovaObjectRegistry.register(TestObject) |
|
|
|
obj = TestObject() |
|
obj.dict = {} |
|
obj.obj_reset_changes() |
|
updates, result = self.conductor.object_action( |
|
self.context, obj, 'touch_dict', tuple(), {}) |
|
# NOTE(danms): If conductor did not properly copy the object, then |
|
# the new and reference copies of the nested dict object will be |
|
# the same, and thus 'dict' will not be reported as changed |
|
self.assertIn('dict', updates) |
|
self.assertEqual({'foo': 'bar'}, updates['dict']) |
|
|
|
def test_object_class_action_versions(self): |
|
@obj_base.NovaObjectRegistry.register |
|
class TestObject(obj_base.NovaObject): |
|
VERSION = '1.10' |
|
|
|
@classmethod |
|
def foo(cls, context): |
|
return cls() |
|
|
|
versions = { |
|
'TestObject': '1.2', |
|
'OtherObj': '1.0', |
|
} |
|
with mock.patch.object(self.conductor_manager, |
|
'_object_dispatch') as m: |
|
m.return_value = TestObject() |
|
m.return_value.obj_to_primitive = mock.MagicMock() |
|
self.conductor.object_class_action_versions( |
|
self.context, TestObject.obj_name(), 'foo', versions, |
|
tuple(), {}) |
|
m.return_value.obj_to_primitive.assert_called_once_with( |
|
target_version='1.2', version_manifest=versions) |
|
|
|
def test_object_class_action_versions_old_object(self): |
|
# Make sure we return older than requested objects unmodified, |
|
# see bug #1596119. |
|
@obj_base.NovaObjectRegistry.register |
|
class TestObject(obj_base.NovaObject): |
|
VERSION = '1.10' |
|
|
|
@classmethod |
|
def foo(cls, context): |
|
return cls() |
|
|
|
versions = { |
|
'TestObject': '1.10', |
|
'OtherObj': '1.0', |
|
} |
|
with mock.patch.object(self.conductor_manager, |
|
'_object_dispatch') as m: |
|
m.return_value = TestObject() |
|
m.return_value.VERSION = '1.9' |
|
m.return_value.obj_to_primitive = mock.MagicMock() |
|
obj = self.conductor.object_class_action_versions( |
|
self.context, TestObject.obj_name(), 'foo', versions, |
|
tuple(), {}) |
|
self.assertFalse(m.return_value.obj_to_primitive.called) |
|
self.assertEqual('1.9', obj.VERSION) |
|
|
|
def test_object_class_action_versions_major_version_diff(self): |
|
@obj_base.NovaObjectRegistry.register |
|
class TestObject(obj_base.NovaObject): |
|
VERSION = '2.10' |
|
|
|
@classmethod |
|
def foo(cls, context): |
|
return cls() |
|
|
|
versions = { |
|
'TestObject': '2.10', |
|
'OtherObj': '1.0', |
|
} |
|
with mock.patch.object(self.conductor_manager, |
|
'_object_dispatch') as m: |
|
m.return_value = TestObject() |
|
m.return_value.VERSION = '1.9' |
|
self.assertRaises( |
|
ovo_exc.InvalidTargetVersion, |
|
self.conductor.object_class_action_versions, |
|
self.context, TestObject.obj_name(), 'foo', versions, |
|
tuple(), {}) |
|
|
|
def test_reset(self): |
|
with mock.patch.object(objects.Service, 'clear_min_version_cache' |
|
) as mock_clear_cache: |
|
self.conductor.reset() |
|
mock_clear_cache.assert_called_once_with() |
|
|
|
def test_provider_fw_rule_get_all(self): |
|
result = self.conductor.provider_fw_rule_get_all(self.context) |
|
self.assertEqual([], result) |
|
|
|
def test_conductor_host(self): |
|
self.assertTrue(hasattr(self.conductor_manager, 'host')) |
|
self.assertEqual(CONF.host, self.conductor_manager.host) |
|
|
|
|
|
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): |
|
"""Conductor RPC API Tests.""" |
|
def setUp(self): |
|
super(ConductorRPCAPITestCase, self).setUp() |
|
self.conductor_service = self.start_service( |
|
'conductor', manager='nova.conductor.manager.ConductorManager') |
|
self.conductor_manager = self.conductor_service.manager |
|
self.conductor = conductor_rpcapi.ConductorAPI() |
|
|
|
|
|
class ConductorAPITestCase(_BaseTestCase, test.TestCase): |
|
"""Conductor API Tests.""" |
|
def setUp(self): |
|
super(ConductorAPITestCase, self).setUp() |
|
self.conductor_service = self.start_service( |
|
'conductor', manager='nova.conductor.manager.ConductorManager') |
|
self.conductor = conductor_api.API() |
|
self.conductor_manager = self.conductor_service.manager |
|
|
|
def test_wait_until_ready(self): |
|
timeouts = [] |
|
calls = dict(count=0) |
|
|
|
def fake_ping(self, context, message, timeout): |
|
timeouts.append(timeout) |
|
calls['count'] += 1 |
|
if calls['count'] < 15: |
|
raise messaging.MessagingTimeout("fake") |
|
|
|
self.stub_out('nova.baserpc.BaseAPI.ping', fake_ping) |
|
|
|
self.conductor.wait_until_ready(self.context) |
|
|
|
self.assertEqual(timeouts.count(10), 10) |
|
self.assertIn(None, timeouts) |
|
|
|
|
|
class _BaseTaskTestCase(object): |
|
def setUp(self): |
|
super(_BaseTaskTestCase, self).setUp() |
|
self.user_id = fakes.FAKE_USER_ID |
|
self.project_id = fakes.FAKE_PROJECT_ID |
|
self.context = FakeContext(self.user_id, self.project_id) |
|
fake_server_actions.stub_out_action_events(self) |
|
self.request_spec = objects.RequestSpec() |
|
|
|
self.stub_out('nova.rpc.RequestContextSerializer.deserialize_context', |
|
lambda *args, **kwargs: self.context) |
|
|
|
self.useFixture(fixtures.SpawnIsSynchronousFixture()) |
|
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv') |
|
self.heal_reqspec_is_bfv_mock = _p.start() |
|
self.addCleanup(_p.stop) |
|
|
|
_p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata') |
|
self.ensure_network_metadata_mock = _p.start() |
|
self.addCleanup(_p.stop) |
|
|
|
def _prepare_rebuild_args(self, update_args=None): |
|
# Args that don't get passed in to the method but do get passed to RPC |
|
migration = update_args and update_args.pop('migration', None) |
|
node = update_args and update_args.pop('node', None) |
|
limits = update_args and update_args.pop('limits', None) |
|
|
|
rebuild_args = {'new_pass': 'admin_password', |
|
'injected_files': 'files_to_inject', |
|
'image_ref': uuids.image_ref, |
|
'orig_image_ref': uuids.orig_image_ref, |
|
'orig_sys_metadata': 'orig_sys_meta', |
|
'bdms': {}, |
|
'recreate': False, |
|
'on_shared_storage': False, |
|
'preserve_ephemeral': False, |
|
'host': 'compute-host', |
|
'request_spec': None} |
|
if update_args: |
|
rebuild_args.update(update_args) |
|
compute_rebuild_args = copy.deepcopy(rebuild_args) |
|
compute_rebuild_args['migration'] = migration |
|
compute_rebuild_args['node'] = node |
|
compute_rebuild_args['limits'] = limits |
|
|
|
return rebuild_args, compute_rebuild_args |
|
|
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.RequestSpec, 'save') |
|
@mock.patch.object(migrate.MigrationTask, 'execute') |
|
@mock.patch.object(utils, 'get_image_from_system_metadata') |
|
@mock.patch.object(objects.RequestSpec, 'from_components') |
|
def _test_cold_migrate(self, spec_from_components, get_image_from_metadata, |
|
migration_task_execute, spec_save, get_im, |
|
clean_shutdown=True): |
|
get_im.return_value.cell_mapping = ( |
|
objects.CellMappingList.get_all(self.context)[0]) |
|
get_image_from_metadata.return_value = 'image' |
|
inst = fake_instance.fake_db_instance(image_ref='image_ref') |
|
inst_obj = objects.Instance._from_db_object( |
|
self.context, objects.Instance(), inst, []) |
|
inst_obj.system_metadata = {'image_hw_disk_bus': 'scsi'} |
|
flavor = flavors.get_default_flavor() |
|
flavor.extra_specs = {'extra_specs': 'fake'} |
|
inst_obj.flavor = flavor |
|
|
|
fake_spec = fake_request_spec.fake_spec_obj() |
|
spec_from_components.return_value = fake_spec |
|
|
|
scheduler_hint = {'filter_properties': {}} |
|
|
|
if isinstance(self.conductor, conductor_api.ComputeTaskAPI): |
|
# The API method is actually 'resize_instance'. It gets |
|
# converted into 'migrate_server' when doing RPC. |
|
self.conductor.resize_instance( |
|
self.context, inst_obj, {}, scheduler_hint, flavor, [], |
|
clean_shutdown, host_list=None) |
|
else: |
|
self.conductor.migrate_server( |
|
self.context, inst_obj, scheduler_hint, |
|
False, False, flavor, None, None, [], |
|
clean_shutdown) |
|
|
|
get_image_from_metadata.assert_called_once_with( |
|
inst_obj.system_metadata) |
|
migration_task_execute.assert_called_once_with() |
|
spec_save.assert_called_once_with() |
|
|
|
def test_cold_migrate(self): |
|
self._test_cold_migrate() |
|
|
|
def test_cold_migrate_forced_shutdown(self): |
|
self._test_cold_migrate(clean_shutdown=False) |
|
|
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'build_and_run_instance') |
|
@mock.patch.object(db, 'block_device_mapping_get_all_by_instance', |
|
return_value=[]) |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_schedule_instances') |
|
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') |
|
@mock.patch('nova.availability_zones.get_host_availability_zone') |
|
@mock.patch('nova.objects.Instance.save') |
|
@mock.patch.object(objects.RequestSpec, 'from_primitives') |
|
def test_build_instances(self, mock_fp, mock_save, mock_getaz, |
|
mock_buildreq, mock_schedule, mock_bdm, |
|
mock_build): |
|
"""Tests creating two instances and the scheduler returns a unique |
|
host/node combo for each instance. |
|
""" |
|
fake_spec = objects.RequestSpec() |
|
mock_fp.return_value = fake_spec |
|
instance_type = flavors.get_default_flavor() |
|
# NOTE(danms): Avoid datetime timezone issues with converted flavors |
|
instance_type.created_at = None |
|
instances = [objects.Instance(context=self.context, |
|
id=i, |
|
uuid=uuids.fake, |
|
flavor=instance_type) for i in range(2)] |
|
instance_type_p = obj_base.obj_to_primitive(instance_type) |
|
instance_properties = obj_base.obj_to_primitive(instances[0]) |
|
instance_properties['system_metadata'] = flavors.save_flavor_info( |
|
{}, instance_type) |
|
|
|
spec = {'image': {'fake_data': 'should_pass_silently'}, |
|
'instance_properties': instance_properties, |
|
'instance_type': instance_type_p, |
|
'num_instances': 2} |
|
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}} |
|
sched_return = copy.deepcopy(fake_host_lists2) |
|
mock_schedule.return_value = sched_return |
|
filter_properties2 = {'retry': {'num_attempts': 1, |
|
'hosts': [['host1', 'node1']]}, |
|
'limits': {}} |
|
filter_properties3 = {'limits': {}, |
|
'retry': {'num_attempts': 1, |
|
'hosts': [['host2', 'node2']]}} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
mock_getaz.return_value = 'myaz' |
|
|
|
self.conductor.build_instances(self.context, |
|
instances=instances, |
|
image={'fake_data': 'should_pass_silently'}, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False, host_lists=None) |
|
mock_getaz.assert_has_calls([ |
|
mock.call(self.context, 'host1'), |
|
mock.call(self.context, 'host2')]) |
|
# A RequestSpec is built from primitives once before calling the |
|
# scheduler to get hosts and then once per instance we're building. |
|
mock_fp.assert_has_calls([ |
|
mock.call(self.context, spec, filter_properties), |
|
mock.call(self.context, spec, filter_properties2), |
|
mock.call(self.context, spec, filter_properties3)]) |
|
mock_schedule.assert_called_once_with( |
|
self.context, fake_spec, [uuids.fake, uuids.fake], |
|
return_alternates=True) |
|
mock_bdm.assert_has_calls([mock.call(self.context, instances[0].uuid), |
|
mock.call(self.context, instances[1].uuid)]) |
|
mock_build.assert_has_calls([ |
|
mock.call(self.context, instance=mock.ANY, host='host1', |
|
image={'fake_data': 'should_pass_silently'}, |
|
request_spec=fake_spec, |
|
filter_properties=filter_properties2, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=mock.ANY, |
|
node='node1', limits=None, host_list=sched_return[0]), |
|
mock.call(self.context, instance=mock.ANY, host='host2', |
|
image={'fake_data': 'should_pass_silently'}, |
|
request_spec=fake_spec, |
|
filter_properties=filter_properties3, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=mock.ANY, |
|
node='node2', limits=None, host_list=sched_return[1])]) |
|
|
|
@mock.patch.object(scheduler_utils, 'build_request_spec') |
|
@mock.patch.object(scheduler_utils, 'setup_instance_group') |
|
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_cleanup_allocated_networks') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_destroy_build_request') |
|
def test_build_instances_scheduler_failure( |
|
self, dest_build_req_mock, cleanup_mock, sd_mock, state_mock, |
|
sig_mock, bs_mock): |
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(2)] |
|
image = {'fake-data': 'should_pass_silently'} |
|
spec = {'fake': 'specs', |
|
'instance_properties': instances[0]} |
|
exception = exc.NoValidHost(reason='fake-reason') |
|
|
|
dest_build_req_mock.side_effect = ( |
|
exc.BuildRequestNotFound(uuid='fake'), |
|
None) |
|
bs_mock.return_value = spec |
|
sd_mock.side_effect = exception |
|
updates = {'vm_state': vm_states.ERROR, 'task_state': None} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
self.conductor.build_instances( |
|
self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
|
|
set_state_calls = [] |
|
cleanup_network_calls = [] |
|
dest_build_req_calls = [] |
|
for instance in instances: |
|
set_state_calls.append(mock.call( |
|
self.context, instance.uuid, 'compute_task', 'build_instances', |
|
updates, exception, spec)) |
|
cleanup_network_calls.append(mock.call( |
|
self.context, mock.ANY, None)) |
|
dest_build_req_calls.append( |
|
mock.call(self.context, test.MatchType(type(instance)))) |
|
state_mock.assert_has_calls(set_state_calls) |
|
cleanup_mock.assert_has_calls(cleanup_network_calls) |
|
dest_build_req_mock.assert_has_calls(dest_build_req_calls) |
|
|
|
def test_build_instances_retry_exceeded(self): |
|
instances = [fake_instance.fake_instance_obj(self.context)] |
|
image = {'fake-data': 'should_pass_silently'} |
|
filter_properties = {'retry': {'num_attempts': 10, 'hosts': []}} |
|
updates = {'vm_state': vm_states.ERROR, 'task_state': None} |
|
|
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_cleanup_allocated_networks') |
|
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') |
|
@mock.patch.object(scheduler_utils, 'build_request_spec') |
|
@mock.patch.object(scheduler_utils, 'populate_retry') |
|
def _test(populate_retry, build_spec, |
|
set_vm_state_and_notify, cleanup_mock): |
|
# build_instances() is a cast, we need to wait for it to |
|
# complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
populate_retry.side_effect = exc.MaxRetriesExceeded( |
|
reason="Too many try") |
|
|
|
self.conductor.build_instances( |
|
self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties=filter_properties, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
|
|
populate_retry.assert_called_once_with( |
|
filter_properties, instances[0].uuid) |
|
set_vm_state_and_notify.assert_called_once_with( |
|
self.context, instances[0].uuid, 'compute_task', |
|
'build_instances', updates, mock.ANY, build_spec.return_value) |
|
cleanup_mock.assert_called_once_with(self.context, mock.ANY, None) |
|
|
|
_test() |
|
|
|
@mock.patch.object(scheduler_utils, 'build_request_spec') |
|
@mock.patch.object(scheduler_utils, 'setup_instance_group') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_cleanup_allocated_networks') |
|
def test_build_instances_scheduler_group_failure( |
|
self, cleanup_mock, state_mock, sig_mock, bs_mock): |
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(2)] |
|
image = {'fake-data': 'should_pass_silently'} |
|
spec = {'fake': 'specs', |
|
'instance_properties': instances[0]} |
|
|
|
bs_mock.return_value = spec |
|
exception = exc.UnsupportedPolicyException(reason='fake-reason') |
|
sig_mock.side_effect = exception |
|
|
|
updates = {'vm_state': vm_states.ERROR, 'task_state': None} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
set_state_calls = [] |
|
cleanup_network_calls = [] |
|
for instance in instances: |
|
set_state_calls.append(mock.call( |
|
self.context, instance.uuid, 'build_instances', updates, |
|
exception, spec)) |
|
cleanup_network_calls.append(mock.call( |
|
self.context, mock.ANY, None)) |
|
state_mock.assert_has_calls(set_state_calls) |
|
cleanup_mock.assert_has_calls(cleanup_network_calls) |
|
|
|
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.Instance, 'save') |
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid', |
|
side_effect=exc.InstanceMappingNotFound(uuid='fake')) |
|
@mock.patch.object(objects.HostMapping, 'get_by_host') |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify') |
|
def test_build_instances_no_instance_mapping(self, _mock_set_state, |
|
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid, |
|
_mock_save, _mock_buildreq): |
|
|
|
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]] |
|
|
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(2)] |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
with mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance'): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
mock_get_inst_map_by_uuid.assert_has_calls([ |
|
mock.call(self.context, instances[0].uuid), |
|
mock.call(self.context, instances[1].uuid)]) |
|
self.assertFalse(mock_get_by_host.called) |
|
|
|
@mock.patch("nova.scheduler.utils.claim_resources", return_value=False) |
|
@mock.patch.object(objects.Instance, 'save') |
|
def test_build_instances_exhaust_host_list(self, _mock_save, mock_claim): |
|
# A list of three alternate hosts for one instance |
|
host_lists = copy.deepcopy(fake_host_lists_alt) |
|
instance = fake_instance.fake_instance_obj(self.context) |
|
image = {'fake-data': 'should_pass_silently'} |
|
expected_claim_count = len(host_lists[0]) |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
# Since claim_resources() is mocked to always return False, we will run |
|
# out of alternate hosts, and MaxRetriesExceeded should be raised. |
|
self.assertRaises(exc.MaxRetriesExceeded, |
|
self.conductor.build_instances, context=self.context, |
|
instances=[instance], image=image, filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=None, legacy_bdm=None, |
|
host_lists=host_lists) |
|
self.assertEqual(expected_claim_count, mock_claim.call_count) |
|
|
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_destroy_build_request') |
|
@mock.patch.object(conductor_manager.LOG, 'debug') |
|
@mock.patch("nova.scheduler.utils.claim_resources", return_value=True) |
|
@mock.patch.object(objects.Instance, 'save') |
|
def test_build_instances_logs_selected_and_alts(self, _mock_save, |
|
mock_claim, mock_debug, mock_destroy): |
|
# A list of three alternate hosts for one instance |
|
host_lists = copy.deepcopy(fake_host_lists_alt) |
|
expected_host = host_lists[0][0] |
|
expected_alts = host_lists[0][1:] |
|
instance = fake_instance.fake_instance_obj(self.context) |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
with mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance'): |
|
self.conductor.build_instances(context=self.context, |
|
instances=[instance], image=image, filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=None, legacy_bdm=None, |
|
host_lists=host_lists) |
|
# The last LOG.debug call should record the selected host name and the |
|
# list of alternates. |
|
last_call = mock_debug.call_args_list[-1][0] |
|
self.assertIn(expected_host.service_host, last_call) |
|
expected_alt_hosts = [(alt.service_host, alt.nodename) |
|
for alt in expected_alts] |
|
self.assertIn(expected_alt_hosts, last_call) |
|
|
|
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.Instance, 'save') |
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.HostMapping, 'get_by_host', |
|
side_effect=exc.HostMappingNotFound(name='fake')) |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify') |
|
def test_build_instances_no_host_mapping(self, _mock_set_state, |
|
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid, |
|
_mock_save, mock_buildreq): |
|
|
|
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]] |
|
num_instances = 2 |
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(num_instances)] |
|
inst_mapping_mocks = [mock.Mock() for i in range(num_instances)] |
|
mock_get_inst_map_by_uuid.side_effect = inst_mapping_mocks |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
with mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance'): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
for instance in instances: |
|
mock_get_inst_map_by_uuid.assert_any_call(self.context, |
|
instance.uuid) |
|
|
|
for inst_mapping in inst_mapping_mocks: |
|
inst_mapping.destroy.assert_called_once_with() |
|
|
|
mock_get_by_host.assert_has_calls([mock.call(self.context, 'host1'), |
|
mock.call(self.context, 'host2')]) |
|
|
|
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.Instance, 'save') |
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
@mock.patch.object(objects.HostMapping, 'get_by_host') |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify') |
|
def test_build_instances_update_instance_mapping(self, _mock_set_state, |
|
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid, |
|
_mock_save, _mock_buildreq): |
|
|
|
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]] |
|
mock_get_by_host.side_effect = [ |
|
objects.HostMapping(cell_mapping=objects.CellMapping(id=1)), |
|
objects.HostMapping(cell_mapping=objects.CellMapping(id=2))] |
|
|
|
num_instances = 2 |
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(num_instances)] |
|
inst_mapping_mocks = [mock.Mock() for i in range(num_instances)] |
|
mock_get_inst_map_by_uuid.side_effect = inst_mapping_mocks |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
with mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance'): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False) |
|
for instance in instances: |
|
mock_get_inst_map_by_uuid.assert_any_call(self.context, |
|
instance.uuid) |
|
|
|
for inst_mapping in inst_mapping_mocks: |
|
inst_mapping.save.assert_called_once_with() |
|
|
|
self.assertEqual(1, inst_mapping_mocks[0].cell_mapping.id) |
|
self.assertEqual(2, inst_mapping_mocks[1].cell_mapping.id) |
|
mock_get_by_host.assert_has_calls([mock.call(self.context, 'host1'), |
|
mock.call(self.context, 'host2')]) |
|
|
|
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock()) |
|
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid') |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify', new=mock.MagicMock()) |
|
def test_build_instances_destroy_build_request(self, mock_select_dests, |
|
mock_build_req_get): |
|
|
|
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]] |
|
num_instances = 2 |
|
instances = [fake_instance.fake_instance_obj(self.context) |
|
for i in range(num_instances)] |
|
build_req_mocks = [mock.Mock() for i in range(num_instances)] |
|
mock_build_req_get.side_effect = build_req_mocks |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
@mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance', new=mock.MagicMock()) |
|
@mock.patch.object(self.conductor_manager, |
|
'_populate_instance_mapping', new=mock.MagicMock()) |
|
def do_test(): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=instances, |
|
image=image, |
|
filter_properties={}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False, |
|
host_lists=None) |
|
|
|
do_test() |
|
|
|
for build_req in build_req_mocks: |
|
build_req.destroy.assert_called_once_with() |
|
|
|
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock()) |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify', new=mock.MagicMock()) |
|
def test_build_instances_reschedule_ignores_build_request(self, |
|
mock_select_dests): |
|
# This test calls build_instances as if it was a reschedule. This means |
|
# that the exc.BuildRequestNotFound() exception raised by |
|
# conductor_manager._destroy_build_request() should not cause the |
|
# build to stop. |
|
|
|
mock_select_dests.return_value = [[fake_selection1]] |
|
instance = fake_instance.fake_instance_obj(self.context) |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
@mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance') |
|
@mock.patch.object(self.conductor_manager, |
|
'_populate_instance_mapping') |
|
@mock.patch.object(self.conductor_manager, |
|
'_destroy_build_request', |
|
side_effect=exc.BuildRequestNotFound(uuid='fake')) |
|
def do_test(mock_destroy_build_req, mock_pop_inst_map, |
|
mock_build_and_run): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=[instance], |
|
image=image, |
|
filter_properties={'retry': {'num_attempts': 1, 'hosts': []}}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False, host_lists=None) |
|
|
|
expected_build_run_host_list = copy.copy(fake_host_lists1[0]) |
|
if expected_build_run_host_list: |
|
expected_build_run_host_list.pop(0) |
|
mock_build_and_run.assert_called_once_with( |
|
self.context, |
|
instance=mock.ANY, |
|
host='host1', |
|
image=image, |
|
request_spec=mock.ANY, |
|
filter_properties={'retry': {'num_attempts': 2, |
|
'hosts': [['host1', 'node1']]}, |
|
'limits': {}}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=test.MatchType( |
|
objects.BlockDeviceMappingList), |
|
node='node1', limits=None, |
|
host_list=expected_build_run_host_list) |
|
mock_pop_inst_map.assert_not_called() |
|
mock_destroy_build_req.assert_not_called() |
|
|
|
do_test() |
|
|
|
@mock.patch.object(objects.Instance, 'save', new=mock.MagicMock()) |
|
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify', new=mock.MagicMock()) |
|
def test_build_instances_reschedule_recalculates_provider_mapping(self, |
|
mock_select_dests): |
|
|
|
rg1 = objects.RequestGroup(resources={"CUSTOM_FOO": 1}) |
|
request_spec = objects.RequestSpec(requested_resources=[rg1]) |
|
|
|
mock_select_dests.return_value = [[fake_selection1]] |
|
instance = fake_instance.fake_instance_obj(self.context) |
|
image = {'fake-data': 'should_pass_silently'} |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
@mock.patch('nova.conductor.manager.ComputeTaskManager.' |
|
'_fill_provider_mapping') |
|
@mock.patch('nova.scheduler.utils.claim_resources') |
|
@mock.patch('nova.objects.request_spec.RequestSpec.from_primitives', |
|
return_value=request_spec) |
|
@mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'build_and_run_instance') |
|
@mock.patch.object(self.conductor_manager, |
|
'_populate_instance_mapping') |
|
@mock.patch.object(self.conductor_manager, '_destroy_build_request') |
|
def do_test(mock_destroy_build_req, mock_pop_inst_map, |
|
mock_build_and_run, mock_request_spec_from_primitives, |
|
mock_claim, mock_rp_mapping): |
|
self.conductor.build_instances( |
|
context=self.context, |
|
instances=[instance], |
|
image=image, |
|
filter_properties={'retry': {'num_attempts': 1, 'hosts': []}}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping='block_device_mapping', |
|
legacy_bdm=False, |
|
host_lists=copy.deepcopy(fake_host_lists1), |
|
request_spec=request_spec) |
|
|
|
expected_build_run_host_list = copy.copy(fake_host_lists1[0]) |
|
if expected_build_run_host_list: |
|
expected_build_run_host_list.pop(0) |
|
mock_build_and_run.assert_called_once_with( |
|
self.context, |
|
instance=mock.ANY, |
|
host='host1', |
|
image=image, |
|
request_spec=request_spec, |
|
filter_properties={'retry': {'num_attempts': 2, |
|
'hosts': [['host1', 'node1']]}, |
|
'limits': {}}, |
|
admin_password='admin_password', |
|
injected_files='injected_files', |
|
requested_networks=None, |
|
security_groups='security_groups', |
|
block_device_mapping=test.MatchType( |
|
objects.BlockDeviceMappingList), |
|
node='node1', |
|
limits=None, |
|
host_list=expected_build_run_host_list) |
|
|
|
mock_rp_mapping.assert_called_once_with( |
|
self.context, instance.uuid, mock.ANY, |
|
test.MatchType(objects.Selection)) |
|
actual_request_spec = mock_rp_mapping.mock_calls[0][1][2] |
|
self.assertEqual( |
|
rg1.resources, |
|
actual_request_spec.requested_resources[0].resources) |
|
|
|
do_test() |
|
|
|
@mock.patch.object(cinder.API, 'attachment_get') |
|
@mock.patch.object(cinder.API, 'attachment_create') |
|
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') |
|
def test_validate_existing_attachment_ids_with_missing_attachments(self, |
|
mock_bdm_save, mock_attachment_create, mock_attachment_get): |
|
instance = self._create_fake_instance_obj() |
|
bdms = [ |
|
block_device.BlockDeviceDict({ |
|
'boot_index': 0, |
|
'guest_format': None, |
|
'connection_info': None, |
|
'device_type': u'disk', |
|
'source_type': 'image', |
|
'destination_type': 'volume', |
|
'volume_size': 1, |
|
'image_id': 1, |
|
'device_name': '/dev/vdb', |
|
'attachment_id': uuids.attachment, |
|
'volume_id': uuids.volume |
|
})] |
|
bdms = block_device_obj.block_device_make_list_from_dicts( |
|
self.context, bdms) |
|
mock_attachment_get.side_effect = exc.VolumeAttachmentNotFound( |
|
attachment_id=uuids.attachment) |
|
mock_attachment_create.return_value = {'id': uuids.new_attachment} |
|
|
|
self.assertEqual(uuids.attachment, bdms[0].attachment_id) |
|
self.conductor_manager._validate_existing_attachment_ids(self.context, |
|
instance, |
|
bdms) |
|
mock_attachment_get.assert_called_once_with(self.context, |
|
uuids.attachment) |
|
mock_attachment_create.assert_called_once_with(self.context, |
|
uuids.volume, |
|
instance.uuid) |
|
mock_bdm_save.assert_called_once() |
|
self.assertEqual(uuids.new_attachment, bdms[0].attachment_id) |
|
|
|
@mock.patch.object(cinder.API, 'attachment_get') |
|
@mock.patch.object(cinder.API, 'attachment_create') |
|
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') |
|
def test_validate_existing_attachment_ids_with_attachments_present(self, |
|
mock_bdm_save, mock_attachment_create, mock_attachment_get): |
|
instance = self._create_fake_instance_obj() |
|
bdms = [ |
|
block_device.BlockDeviceDict({ |
|
'boot_index': 0, |
|
'guest_format': None, |
|
'connection_info': None, |
|
'device_type': u'disk', |
|
'source_type': 'image', |
|
'destination_type': 'volume', |
|
'volume_size': 1, |
|
'image_id': 1, |
|
'device_name': '/dev/vdb', |
|
'attachment_id': uuids.attachment, |
|
'volume_id': uuids.volume |
|
})] |
|
bdms = block_device_obj.block_device_make_list_from_dicts( |
|
self.context, bdms) |
|
mock_attachment_get.return_value = { |
|
"attachment": { |
|
"status": "attaching", |
|
"detached_at": "2015-09-16T09:28:52.000000", |
|
"connection_info": {}, |
|
"attached_at": "2015-09-16T09:28:52.000000", |
|
"attach_mode": "ro", |
|
"instance": instance.uuid, |
|
"volume_id": uuids.volume, |
|
"id": uuids.attachment |
|
}} |
|
|
|
self.assertEqual(uuids.attachment, bdms[0].attachment_id) |
|
self.conductor_manager._validate_existing_attachment_ids(self.context, |
|
instance, |
|
bdms) |
|
mock_attachment_get.assert_called_once_with(self.context, |
|
uuids.attachment) |
|
mock_attachment_create.assert_not_called() |
|
mock_bdm_save.assert_not_called() |
|
self.assertEqual(uuids.attachment, bdms[0].attachment_id) |
|
|
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance') |
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'start_instance') |
|
def test_unshelve_instance_on_host(self, mock_start, mock_unshelve): |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED |
|
instance.task_state = task_states.UNSHELVING |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_image_id'] = 'fake_image_id' |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
self.conductor_manager.unshelve_instance( |
|
self.context, instance, self.request_spec) |
|
|
|
mock_start.assert_called_once_with(self.context, instance) |
|
mock_unshelve.assert_not_called() |
|
|
|
def test_unshelve_offload_instance_on_host_with_request_spec(self): |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.task_state = task_states.UNSHELVING |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_image_id'] = 'fake_image_id' |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
|
|
fake_spec = fake_request_spec.fake_spec_obj() |
|
# FIXME(sbauza): Modify the fake RequestSpec object to either add a |
|
# non-empty SchedulerRetries object or nullify the field |
|
fake_spec.retry = None |
|
# FIXME(sbauza): Modify the fake RequestSpec object to either add a |
|
# non-empty SchedulerLimits object or nullify the field |
|
fake_spec.limits = None |
|
# FIXME(sbauza): Modify the fake RequestSpec object to either add a |
|
# non-empty InstanceGroup object or nullify the field |
|
fake_spec.instance_group = None |
|
|
|
filter_properties = fake_spec.to_legacy_filter_properties_dict() |
|
|
|
host = {'host': 'host1', 'nodename': 'node1', 'limits': {}} |
|
|
|
# unshelve_instance() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
@mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'unshelve_instance') |
|
@mock.patch.object(scheduler_utils, 'populate_filter_properties') |
|
@mock.patch.object(self.conductor_manager, '_schedule_instances') |
|
@mock.patch.object(objects.RequestSpec, |
|
'to_legacy_filter_properties_dict') |
|
@mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') |
|
def do_test(reset_forced_destinations, |
|
to_filtprops, sched_instances, populate_filter_properties, |
|
unshelve_instance, get_by_instance_uuid): |
|
cell_mapping = objects.CellMapping.get_by_uuid(self.context, |
|
uuids.cell1) |
|
get_by_instance_uuid.return_value = objects.InstanceMapping( |
|
cell_mapping=cell_mapping) |
|
to_filtprops.return_value = filter_properties |
|
sched_instances.return_value = [[fake_selection1]] |
|
self.conductor.unshelve_instance(self.context, instance, fake_spec) |
|
# The fake_spec already has a project_id set which doesn't match |
|
# the instance.project_id so the spec's project_id won't be |
|
# overridden using the instance.project_id. |
|
self.assertNotEqual(fake_spec.project_id, instance.project_id) |
|
reset_forced_destinations.assert_called_once_with() |
|
# The fake_spec is only going to modified by reference for |
|
# ComputeTaskManager. |
|
if isinstance(self.conductor, |
|
conductor_manager.ComputeTaskManager): |
|
self.ensure_network_metadata_mock.assert_called_once_with( |
|
test.MatchType(objects.Instance)) |
|
self.heal_reqspec_is_bfv_mock.assert_called_once_with( |
|
self.context, fake_spec, instance) |
|
sched_instances.assert_called_once_with( |
|
self.context, fake_spec, [instance.uuid], |
|
return_alternates=False) |
|
self.assertEqual(cell_mapping, |
|
fake_spec.requested_destination.cell) |
|
else: |
|
# RPC API tests won't have the same request spec or instance |
|
# since they go over the wire. |
|
self.ensure_network_metadata_mock.assert_called_once_with( |
|
test.MatchType(objects.Instance)) |
|
self.heal_reqspec_is_bfv_mock.assert_called_once_with( |
|
self.context, test.MatchType(objects.RequestSpec), |
|
test.MatchType(objects.Instance)) |
|
sched_instances.assert_called_once_with( |
|
self.context, test.MatchType(objects.RequestSpec), |
|
[instance.uuid], return_alternates=False) |
|
# NOTE(sbauza): Since the instance is dehydrated when passing |
|
# through the RPC API, we can only assert mock.ANY for it |
|
unshelve_instance.assert_called_once_with( |
|
self.context, mock.ANY, host['host'], image=mock.ANY, |
|
filter_properties=filter_properties, node=host['nodename'] |
|
) |
|
|
|
do_test() |
|
|
|
@mock.patch('nova.compute.utils.add_instance_fault_from_exc') |
|
@mock.patch.object(image_api.API, 'get', |
|
side_effect=exc.ImageNotFound(image_id=uuids.image)) |
|
def test_unshelve_offloaded_instance_glance_image_not_found( |
|
self, mock_get, add_instance_fault_from_exc): |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.task_state = task_states.UNSHELVING |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
system_metadata['shelved_image_id'] = uuids.image |
|
|
|
reason = ('Unshelve attempted but the image %s ' |
|
'cannot be found.') % uuids.image |
|
|
|
self.assertRaises( |
|
exc.UnshelveException, |
|
self.conductor_manager.unshelve_instance, |
|
self.context, instance, self.request_spec) |
|
add_instance_fault_from_exc.assert_called_once_with( |
|
self.context, instance, mock_get.side_effect, mock.ANY, |
|
fault_message=reason) |
|
self.assertEqual(instance.vm_state, vm_states.ERROR) |
|
mock_get.assert_called_once_with(self.context, uuids.image, |
|
show_deleted=False) |
|
|
|
def test_unshelve_offloaded_instance_image_id_is_none(self): |
|
|
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.task_state = task_states.UNSHELVING |
|
# 'shelved_image_id' is None for volumebacked instance |
|
instance.system_metadata['shelved_image_id'] = None |
|
|
|
with test.nested( |
|
mock.patch.object(self.conductor_manager, |
|
'_schedule_instances'), |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'unshelve_instance'), |
|
mock.patch.object(objects.InstanceMapping, |
|
'get_by_instance_uuid'), |
|
) as (schedule_mock, unshelve_mock, get_by_instance_uuid): |
|
schedule_mock.return_value = [[fake_selection1]] |
|
get_by_instance_uuid.return_value = objects.InstanceMapping( |
|
cell_mapping=objects.CellMapping.get_by_uuid( |
|
self.context, uuids.cell1)) |
|
self.conductor_manager.unshelve_instance( |
|
self.context, instance, self.request_spec) |
|
self.assertEqual(1, unshelve_mock.call_count) |
|
|
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_schedule_instances', |
|
return_value=[[objects.Selection( |
|
service_host='fake_host', nodename='fake_node', |
|
limits=None)]]) |
|
@mock.patch.object(image_api.API, 'get', return_value='fake_image') |
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
def test_unshelve_instance_schedule_and_rebuild( |
|
self, mock_im, mock_get, mock_schedule, mock_unshelve): |
|
fake_spec = objects.RequestSpec() |
|
# Set requested_destination to test setting cell_mapping in |
|
# existing object. |
|
fake_spec.requested_destination = objects.Destination( |
|
host="dummy", cell=None) |
|
cell_mapping = objects.CellMapping.get_by_uuid(self.context, |
|
uuids.cell1) |
|
mock_im.return_value = objects.InstanceMapping( |
|
cell_mapping=cell_mapping) |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_image_id'] = 'fake_image_id' |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
self.conductor_manager.unshelve_instance( |
|
self.context, instance, fake_spec) |
|
self.assertEqual(cell_mapping, fake_spec.requested_destination.cell) |
|
mock_get.assert_called_once_with( |
|
self.context, 'fake_image_id', show_deleted=False) |
|
mock_schedule.assert_called_once_with( |
|
self.context, fake_spec, [instance.uuid], return_alternates=False) |
|
mock_unshelve.assert_called_once_with( |
|
self.context, instance, 'fake_host', image='fake_image', |
|
filter_properties=dict( |
|
# populate_filter_properties adds limits={} |
|
fake_spec.to_legacy_filter_properties_dict(), limits={}), |
|
node='fake_node') |
|
|
|
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self): |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
def fake_schedule_instances(context, request_spec, *instances, |
|
**kwargs): |
|
raise exc.NoValidHost(reason='') |
|
|
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.image_api, 'get', |
|
return_value='fake_image'), |
|
mock.patch.object(self.conductor_manager, '_schedule_instances', |
|
fake_schedule_instances), |
|
mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid'), |
|
mock.patch.object(objects.Instance, 'save') |
|
) as (_get_image, _schedule_instances, get_by_instance_uuid, save): |
|
get_by_instance_uuid.return_value = objects.InstanceMapping( |
|
cell_mapping=objects.CellMapping.get_by_uuid( |
|
self.context, uuids.cell1)) |
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_image_id'] = 'fake_image_id' |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
self.conductor_manager.unshelve_instance( |
|
self.context, instance, self.request_spec) |
|
_get_image.assert_has_calls([mock.call(self.context, |
|
system_metadata['shelved_image_id'], |
|
show_deleted=False)]) |
|
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) |
|
|
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_schedule_instances', |
|
side_effect=messaging.MessagingTimeout()) |
|
@mock.patch.object(image_api.API, 'get', return_value='fake_image') |
|
@mock.patch.object(objects.Instance, 'save') |
|
def test_unshelve_instance_schedule_and_rebuild_messaging_exception( |
|
self, mock_save, mock_get_image, mock_schedule_instances, mock_im): |
|
mock_im.return_value = objects.InstanceMapping( |
|
cell_mapping=objects.CellMapping.get_by_uuid(self.context, |
|
uuids.cell1)) |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.task_state = task_states.UNSHELVING |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_image_id'] = 'fake_image_id' |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
self.assertRaises(messaging.MessagingTimeout, |
|
self.conductor_manager.unshelve_instance, |
|
self.context, instance, self.request_spec) |
|
mock_get_image.assert_has_calls([mock.call(self.context, |
|
system_metadata['shelved_image_id'], |
|
show_deleted=False)]) |
|
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state) |
|
self.assertIsNone(instance.task_state) |
|
|
|
@mock.patch.object(compute_rpcapi.ComputeAPI, 'unshelve_instance') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_schedule_instances', return_value=[[ |
|
objects.Selection(service_host='fake_host', |
|
nodename='fake_node', |
|
limits=None)]]) |
|
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid') |
|
def test_unshelve_instance_schedule_and_rebuild_volume_backed( |
|
self, mock_im, mock_schedule, mock_unshelve): |
|
fake_spec = objects.RequestSpec() |
|
mock_im.return_value = objects.InstanceMapping( |
|
cell_mapping=objects.CellMapping.get_by_uuid(self.context, |
|
uuids.cell1)) |
|
instance = self._create_fake_instance_obj() |
|
instance.vm_state = vm_states.SHELVED_OFFLOADED |
|
instance.save() |
|
system_metadata = instance.system_metadata |
|
|
|
system_metadata['shelved_at'] = timeutils.utcnow() |
|
system_metadata['shelved_host'] = 'fake-mini' |
|
self.conductor_manager.unshelve_instance( |
|
self.context, instance, fake_spec) |
|
mock_schedule.assert_called_once_with( |
|
self.context, fake_spec, [instance.uuid], return_alternates=False) |
|
mock_unshelve.assert_called_once_with( |
|
self.context, instance, 'fake_host', image=None, |
|
filter_properties={'limits': {}}, node='fake_node') |
|
|
|
def test_rebuild_instance(self): |
|
inst_obj = self._create_fake_instance_obj() |
|
rebuild_args, compute_args = self._prepare_rebuild_args( |
|
{'host': inst_obj.host}) |
|
|
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'rebuild_instance'), |
|
mock.patch.object(self.conductor_manager.query_client, |
|
'select_destinations') |
|
) as (rebuild_mock, select_dest_mock): |
|
self.conductor_manager.rebuild_instance(context=self.context, |
|
instance=inst_obj, |
|
**rebuild_args) |
|
self.assertFalse(select_dest_mock.called) |
|
rebuild_mock.assert_called_once_with(self.context, |
|
instance=inst_obj, |
|
**compute_args) |
|
|
|
@mock.patch('nova.compute.utils.notify_about_instance_rebuild') |
|
def test_rebuild_instance_with_scheduler(self, mock_notify): |
|
inst_obj = self._create_fake_instance_obj() |
|
inst_obj.host = 'noselect' |
|
expected_host = 'thebesthost' |
|
expected_node = 'thebestnode' |
|
expected_limits = None |
|
fake_selection = objects.Selection(service_host=expected_host, |
|
nodename=expected_node, limits=None) |
|
rebuild_args, compute_args = self._prepare_rebuild_args( |
|
{'host': None, 'node': expected_node, 'limits': expected_limits}) |
|
fake_spec = objects.RequestSpec() |
|
rebuild_args['request_spec'] = fake_spec |
|
inst_uuids = [inst_obj.uuid] |
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'rebuild_instance'), |
|
mock.patch.object(scheduler_utils, 'setup_instance_group', |
|
return_value=False), |
|
mock.patch.object(self.conductor_manager.query_client, |
|
'select_destinations', |
|
return_value=[[fake_selection]]) |
|
) as (rebuild_mock, sig_mock, select_dest_mock): |
|
self.conductor_manager.rebuild_instance(context=self.context, |
|
instance=inst_obj, |
|
**rebuild_args) |
|
self.ensure_network_metadata_mock.assert_called_once_with( |
|
inst_obj) |
|
self.heal_reqspec_is_bfv_mock.assert_called_once_with( |
|
self.context, fake_spec, inst_obj) |
|
select_dest_mock.assert_called_once_with(self.context, fake_spec, |
|
inst_uuids, return_objects=True, return_alternates=False) |
|
compute_args['host'] = expected_host |
|
compute_args['request_spec'] = fake_spec |
|
rebuild_mock.assert_called_once_with(self.context, |
|
instance=inst_obj, |
|
**compute_args) |
|
self.assertEqual(inst_obj.project_id, fake_spec.project_id) |
|
self.assertEqual('compute.instance.rebuild.scheduled', |
|
fake_notifier.NOTIFICATIONS[0].event_type) |
|
mock_notify.assert_called_once_with( |
|
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled', |
|
source='nova-conductor') |
|
|
|
def test_rebuild_instance_with_scheduler_no_host(self): |
|
inst_obj = self._create_fake_instance_obj() |
|
inst_obj.host = 'noselect' |
|
rebuild_args, _ = self._prepare_rebuild_args({'host': None}) |
|
fake_spec = objects.RequestSpec() |
|
rebuild_args['request_spec'] = fake_spec |
|
|
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'rebuild_instance'), |
|
mock.patch.object(scheduler_utils, 'setup_instance_group', |
|
return_value=False), |
|
mock.patch.object(self.conductor_manager.query_client, |
|
'select_destinations', |
|
side_effect=exc.NoValidHost(reason='')), |
|
mock.patch.object(scheduler_utils, 'set_vm_state_and_notify') |
|
) as (rebuild_mock, sig_mock, |
|
select_dest_mock, set_vm_state_and_notify_mock): |
|
self.assertRaises(exc.NoValidHost, |
|
self.conductor_manager.rebuild_instance, |
|
context=self.context, instance=inst_obj, |
|
**rebuild_args) |
|
select_dest_mock.assert_called_once_with(self.context, fake_spec, |
|
[inst_obj.uuid], return_objects=True, |
|
return_alternates=False) |
|
self.assertEqual( |
|
set_vm_state_and_notify_mock.call_args[0][4]['vm_state'], |
|
vm_states.ERROR) |
|
self.assertFalse(rebuild_mock.called) |
|
|
|
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI, |
|
'rebuild_instance') |
|
@mock.patch.object(scheduler_utils, 'setup_instance_group') |
|
@mock.patch.object(conductor_manager.query.SchedulerQueryClient, |
|
'select_destinations') |
|
@mock.patch.object(conductor_manager.ComputeTaskManager, |
|
'_set_vm_state_and_notify') |
|
def test_rebuild_instance_with_scheduler_group_failure(self, |
|
state_mock, |
|
select_dest_mock, |
|
sig_mock, |
|
rebuild_mock): |
|
inst_obj = self._create_fake_instance_obj() |
|
rebuild_args, _ = self._prepare_rebuild_args({'host': None}) |
|
rebuild_args['request_spec'] = self.request_spec |
|
|
|
exception = exc.UnsupportedPolicyException(reason='') |
|
sig_mock.side_effect = exception |
|
|
|
# build_instances() is a cast, we need to wait for it to complete |
|
self.useFixture(cast_as_call.CastAsCall(self)) |
|
|
|
# Create the migration record (normally created by the compute API). |
|
migration = objects.Migration(self.context, |
|
source_compute=inst_obj.host, |
|
source_node=inst_obj.node, |
|
instance_uuid=inst_obj.uuid, |
|
status='accepted', |
|
migration_type='evacuation') |
|
migration.create() |
|
|
|
self.assertRaises(exc.UnsupportedPolicyException, |
|
self.conductor.rebuild_instance, |
|
self.context, |
|
inst_obj, |
|
**rebuild_args) |
|
updates = {'vm_state': vm_states.ERROR, 'task_state': None} |
|
state_mock.assert_called_once_with(self.context, inst_obj.uuid, |
|
'rebuild_server', updates, |
|
exception, mock.ANY) |
|
self.assertFalse(select_dest_mock.called) |
|
self.assertFalse(rebuild_mock.called) |
|
|
|
# Assert the migration status was updated. |
|
migration = objects.Migration.get_by_id(self.context, migration.id) |
|
self.assertEqual('error', migration.status) |
|
|
|
def test_rebuild_instance_evacuate_migration_record(self): |
|
inst_obj = self._create_fake_instance_obj() |
|
migration = objects.Migration(context=self.context, |
|
source_compute=inst_obj.host, |
|
source_node=inst_obj.node, |
|
instance_uuid=inst_obj.uuid, |
|
status='accepted', |
|
migration_type='evacuation') |
|
rebuild_args, compute_args = self._prepare_rebuild_args( |
|
{'host': inst_obj.host, 'migration': migration}) |
|
|
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'rebuild_instance'), |
|
mock.patch.object(self.conductor_manager.query_client, |
|
'select_destinations'), |
|
mock.patch.object(objects.Migration, 'get_by_instance_and_status', |
|
return_value=migration) |
|
) as (rebuild_mock, select_dest_mock, get_migration_mock): |
|
self.conductor_manager.rebuild_instance(context=self.context, |
|
instance=inst_obj, |
|
**rebuild_args) |
|
self.assertFalse(select_dest_mock.called) |
|
rebuild_mock.assert_called_once_with(self.context, |
|
instance=inst_obj, |
|
**compute_args) |
|
|
|
@mock.patch('nova.compute.utils.notify_about_instance_rebuild') |
|
def test_rebuild_instance_with_request_spec(self, mock_notify): |
|
inst_obj = self._create_fake_instance_obj() |
|
inst_obj.host = 'noselect' |
|
expected_host = 'thebesthost' |
|
expected_node = 'thebestnode' |
|
expected_limits = None |
|
fake_selection = objects.Selection(service_host=expected_host, |
|
nodename=expected_node, limits=None) |
|
fake_spec = objects.RequestSpec(ignore_hosts=[]) |
|
rebuild_args, compute_args = self._prepare_rebuild_args( |
|
{'host': None, 'node': expected_node, 'limits': expected_limits, |
|
'request_spec': fake_spec}) |
|
with test.nested( |
|
mock.patch.object(self.conductor_manager.compute_rpcapi, |
|
'rebuild_instance'), |
|
mock.patch.object(scheduler_utils, 'setup_instance_group', |
|
return_value=False), |
|
mock.patch.object(self.conductor_manager.query_client, |
|
'select_destinations', |
|
return_value=[[fake_selection]]), |
|
mock.patch.object(fake_spec, 'reset_forced_destinations'), |
|
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd): |
|
self.conductor_manager.rebuild_instance(context=self.context, |
|
instance=inst_obj, |
|
**rebuild_args) |
|
if rebuild_args['recreate']: |
|
reset_fd.assert_called_once_with() |
|
else: |
|
reset_fd.assert_not_called() |
|
select_dest_mock.assert_called_once_with(self.context, |
|
fake_spec, [inst_obj.uuid], return_objects=True, |
|
return_alternates=False) |
|
compute_args['host'] = expected_host |
|
compute_args['request_spec'] = fake_spec |
|
rebuild_mock.assert_called_once_with(self.context, |
|
instance=inst_obj, |
|
**compute_args) |
|
self.assertEqual('compute.instance.rebuild.scheduled', |
|
fake_notifier.NOTIFICATIONS[0].event_type) |
|
mock_notify.assert_called_once_with( |
|
self.context, inst_obj, 'thebesthost', action='rebuild_scheduled', |
|
source='nova-conductor') |
|
|
|
|
|
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): |
|
"""ComputeTaskManager Tests.""" |
|
|
|
NUMBER_OF_CELLS = 2 |
|
|
|
def setUp(self): |
|
super(ConductorTaskTestCase, self).setUp() |
|
self.conductor = conductor_manager.ComputeTaskManager() |
|
self.conductor_manager = self.conductor |
|
|
|
params = {} |
|
self.ctxt = params['context'] = context.RequestContext( |
|
'fake-user', 'fake-project').elevated() |
|
build_request = fake_build_request.fake_req_obj(self.ctxt) |
|
del build_request.instance.id |
|
build_request.create() |
|
params['build_requests'] = objects.BuildRequestList( |
|
objects=[build_request]) |
|
im = objects.InstanceMapping( |
|
self.ctxt, instance_uuid=build_request.instance.uuid, |
|
cell_mapping=None, project_id=self.ctxt.project_id) |
|
im.create() |
|
rs = fake_request_spec.fake_spec_obj(remove_id=True) |
|
rs._context = self.ctxt |
|
rs.instance_uuid = build_request.instance_uuid |
|
rs.instance_group = None |
|
rs.retry = None |
|
rs.limits = None |
|
rs.create() |
|
params['request_specs'] = [rs] |
|
params['image'] = {'fake_data': 'should_pass_silently'} |
|
params['admin_password'] = 'admin_password', |
|
params['injected_files'] = 'injected_files' |
|
params['requested_networks'] = None |
|
bdm = objects.BlockDeviceMapping(self.ctxt, **dict( |
|
source_type='blank', destination_type='local', |
|
guest_format='foo', device_type='disk', disk_bus='', |
|
boot_index=1, device_name='xvda', delete_on_termination=False, |
|
snapshot_id=None, volume_id=None, volume_size=1, |
|
image_id='bar', no_device=False, connection_info=None, |
|
tag='')) |
|
params['block_device_mapping'] = objects.BlockDeviceMappingList( |
|
objects=[bdm]) |
|
tag = objects.Tag(self.ctxt, tag='tag1') |
|
params['tags'] = objects.TagList(objects=[tag]) |
|
self.params = params |
|
|
|
@mock.patch('nova.availability_zones.get_host_availability_zone') |
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
def _do_schedule_and_build_instances_test( |
|
self, params, select_destinations, build_and_run_instance, |
|
get_az, host_list=None): |
|
if not host_list: |
|
host_list = copy.deepcopy(fake_host_lists1) |
|
select_destinations.return_value = host_list |
|
get_az.return_value = 'myaz' |
|
details = {} |
|
|
|
def _build_and_run_instance(ctxt, *args, **kwargs): |
|
details['instance'] = kwargs['instance'] |
|
self.assertTrue(kwargs['instance'].id) |
|
self.assertTrue(kwargs['filter_properties'].get('retry')) |
|
self.assertEqual(1, len(kwargs['block_device_mapping'])) |
|
# FIXME(danms): How to validate the db connection here? |
|
|
|
self.start_service('compute', host='host1') |
|
build_and_run_instance.side_effect = _build_and_run_instance |
|
self.conductor.schedule_and_build_instances(**params) |
|
self.assertTrue(build_and_run_instance.called) |
|
get_az.assert_called_once_with(mock.ANY, 'host1') |
|
|
|
instance_uuid = details['instance'].uuid |
|
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( |
|
self.ctxt, instance_uuid) |
|
ephemeral = list(filter(block_device.new_format_is_ephemeral, bdms)) |
|
self.assertEqual(1, len(ephemeral)) |
|
swap = list(filter(block_device.new_format_is_swap, bdms)) |
|
self.assertEqual(0, len(swap)) |
|
|
|
self.assertEqual(1, ephemeral[0].volume_size) |
|
return instance_uuid |
|
|
|
@mock.patch('nova.notifications.send_update_with_states') |
|
def test_schedule_and_build_instances(self, mock_notify): |
|
# NOTE(melwitt): This won't work with call_args because the call |
|
# arguments are recorded as references and not as copies of objects. |
|
# So even though the notify method was called with Instance._context |
|
# targeted, by the time we assert with call_args, the target_cell |
|
# context manager has already exited and the referenced Instance |
|
# object's _context.db_connection has been restored to None. |
|
def fake_notify(ctxt, instance, *args, **kwargs): |
|
# Assert the instance object is targeted when going through the |
|
# notification code. |
|
self.assertIsNotNone(ctxt.db_connection) |
|
self.assertIsNotNone(instance._context.db_connection) |
|
|
|
mock_notify.side_effect = fake_notify |
|
|
|
instance_uuid = self._do_schedule_and_build_instances_test( |
|
self.params) |
|
cells = objects.CellMappingList.get_all(self.ctxt) |
|
|
|
# NOTE(danms): Assert that we created the InstanceAction in the |
|
# correct cell |
|
# NOTE(Kevin Zheng): Also assert tags in the correct cell |
|
for cell in cells: |
|
with context.target_cell(self.ctxt, cell) as cctxt: |
|
actions = objects.InstanceActionList.get_by_instance_uuid( |
|
cctxt, instance_uuid) |
|
if cell.name == 'cell1': |
|
self.assertEqual(1, len(actions)) |
|
tags = objects.TagList.get_by_resource_id( |
|
cctxt, instance_uuid) |
|
self.assertEqual(1, len(tags)) |
|
else: |
|
self.assertEqual(0, len(actions)) |
|
|
|
def test_schedule_and_build_instances_no_tags_provided(self): |
|
params = copy.deepcopy(self.params) |
|
del params['tags'] |
|
instance_uuid = self._do_schedule_and_build_instances_test(params) |
|
cells = objects.CellMappingList.get_all(self.ctxt) |
|
|
|
# NOTE(danms): Assert that we created the InstanceAction in the |
|
# correct cell |
|
# NOTE(Kevin Zheng): Also assert tags in the correct cell |
|
for cell in cells: |
|
with context.target_cell(self.ctxt, cell) as cctxt: |
|
actions = objects.InstanceActionList.get_by_instance_uuid( |
|
cctxt, instance_uuid) |
|
if cell.name == 'cell1': |
|
self.assertEqual(1, len(actions)) |
|
tags = objects.TagList.get_by_resource_id( |
|
cctxt, instance_uuid) |
|
self.assertEqual(0, len(tags)) |
|
else: |
|
self.assertEqual(0, len(actions)) |
|
|
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.HostMapping.get_by_host') |
|
@mock.patch('nova.availability_zones.get_host_availability_zone', |
|
return_value='nova') |
|
def test_schedule_and_build_multiple_instances(self, mock_get_az, |
|
get_hostmapping, |
|
select_destinations, |
|
build_and_run_instance): |
|
# This list needs to match the number of build_requests and the number |
|
# of request_specs in params. |
|
select_destinations.return_value = [[fake_selection1], |
|
[fake_selection2], [fake_selection1], [fake_selection2]] |
|
params = self.params |
|
|
|
self.start_service('compute', host='host1') |
|
self.start_service('compute', host='host2') |
|
|
|
# Because of the cache, this should only be called twice, |
|
# once for the first and once for the third request. |
|
get_hostmapping.side_effect = self.host_mappings.values() |
|
|
|
# create three additional build requests for a total of four |
|
for x in range(3): |
|
build_request = fake_build_request.fake_req_obj(self.ctxt) |
|
del build_request.instance.id |
|
build_request.create() |
|
params['build_requests'].objects.append(build_request) |
|
im2 = objects.InstanceMapping( |
|
self.ctxt, instance_uuid=build_request.instance.uuid, |
|
cell_mapping=None, project_id=self.ctxt.project_id) |
|
im2.create() |
|
params['request_specs'].append(objects.RequestSpec( |
|
instance_uuid=build_request.instance_uuid, |
|
instance_group=None)) |
|
|
|
# Now let's have some fun and delete the third build request before |
|
# passing the object on to schedule_and_build_instances so that the |
|
# instance will be created for that build request but when it calls |
|
# BuildRequest.destroy(), it will raise BuildRequestNotFound and we'll |
|
# cleanup the instance instead of passing it to build_and_run_instance |
|
# and we make sure that the fourth build request still gets processed. |
|
deleted_build_request = params['build_requests'][2] |
|
deleted_build_request.destroy() |
|
|
|
def _build_and_run_instance(ctxt, *args, **kwargs): |
|
# Make sure the instance wasn't the one that was deleted. |
|
instance = kwargs['instance'] |
|
self.assertNotEqual(deleted_build_request.instance_uuid, |
|
instance.uuid) |
|
# This just makes sure that the instance was created in the DB. |
|
self.assertTrue(kwargs['instance'].id) |
|
self.assertEqual(1, len(kwargs['block_device_mapping'])) |
|
# FIXME(danms): How to validate the db connection here? |
|
|
|
build_and_run_instance.side_effect = _build_and_run_instance |
|
self.conductor.schedule_and_build_instances(**params) |
|
self.assertEqual(3, build_and_run_instance.call_count) |
|
# We're processing 4 instances over 2 hosts, so we should only lookup |
|
# the AZ per host once. |
|
mock_get_az.assert_has_calls([ |
|
mock.call(self.ctxt, 'host1'), mock.call(self.ctxt, 'host2')], |
|
any_order=True) |
|
|
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.HostMapping.get_by_host') |
|
def test_schedule_and_build_multiple_cells( |
|
self, get_hostmapping, select_destinations, |
|
build_and_run_instance): |
|
"""Test that creates two instances in separate cells.""" |
|
# This list needs to match the number of build_requests and the number |
|
# of request_specs in params. |
|
select_destinations.return_value = [[fake_selection1], |
|
[fake_selection2]] |
|
|
|
params = self.params |
|
|
|
# The cells are created in the base TestCase setup. |
|
self.start_service('compute', host='host1', cell='cell1') |
|
self.start_service('compute', host='host2', cell='cell2') |
|
|
|
get_hostmapping.side_effect = self.host_mappings.values() |
|
|
|
# create an additional build request and request spec |
|
build_request = fake_build_request.fake_req_obj(self.ctxt) |
|
del build_request.instance.id |
|
build_request.create() |
|
params['build_requests'].objects.append(build_request) |
|
im2 = objects.InstanceMapping( |
|
self.ctxt, instance_uuid=build_request.instance.uuid, |
|
cell_mapping=None, project_id=self.ctxt.project_id) |
|
im2.create() |
|
params['request_specs'].append(objects.RequestSpec( |
|
instance_uuid=build_request.instance_uuid, |
|
instance_group=None)) |
|
|
|
instance_cells = set() |
|
|
|
def _build_and_run_instance(ctxt, *args, **kwargs): |
|
instance = kwargs['instance'] |
|
# Keep track of the cells that the instances were created in. |
|
inst_mapping = objects.InstanceMapping.get_by_instance_uuid( |
|
ctxt, instance.uuid) |
|
instance_cells.add(inst_mapping.cell_mapping.uuid) |
|
|
|
build_and_run_instance.side_effect = _build_and_run_instance |
|
self.conductor.schedule_and_build_instances(**params) |
|
self.assertEqual(2, build_and_run_instance.call_count) |
|
self.assertEqual(2, len(instance_cells)) |
|
|
|
@mock.patch('nova.compute.utils.notify_about_compute_task_error') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
def test_schedule_and_build_scheduler_failure(self, select_destinations, |
|
mock_notify): |
|
select_destinations.side_effect = Exception |
|
self.start_service('compute', host='fake-host') |
|
self.conductor.schedule_and_build_instances(**self.params) |
|
with conductor_manager.try_target_cell(self.ctxt, |
|
self.cell_mappings['cell0']): |
|
instance = objects.Instance.get_by_uuid( |
|
self.ctxt, self.params['build_requests'][0].instance_uuid) |
|
self.assertEqual('error', instance.vm_state) |
|
self.assertIsNone(instance.task_state) |
|
|
|
mock_notify.assert_called_once_with( |
|
test.MatchType(context.RequestContext), 'build_instances', |
|
instance.uuid, test.MatchType(dict), 'error', |
|
test.MatchType(Exception), test.MatchType(str)) |
|
request_spec_dict = mock_notify.call_args_list[0][0][3] |
|
for key in ('instance_type', 'num_instances', 'instance_properties', |
|
'image'): |
|
self.assertIn(key, request_spec_dict) |
|
tb = mock_notify.call_args_list[0][0][6] |
|
self.assertIn('Traceback (most recent call last):', tb) |
|
|
|
@mock.patch('nova.objects.TagList.destroy') |
|
@mock.patch('nova.objects.TagList.create') |
|
@mock.patch('nova.compute.utils.notify_about_instance_action') |
|
@mock.patch('nova.compute.utils.notify_about_instance_usage') |
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.BuildRequest.destroy') |
|
@mock.patch('nova.conductor.manager.ComputeTaskManager._bury_in_cell0') |
|
def test_schedule_and_build_delete_during_scheduling(self, |
|
bury, |
|
br_destroy, |
|
select_destinations, |
|
build_and_run, |
|
legacy_notify, |
|
notify, |
|
taglist_create, |
|
taglist_destroy): |
|
|
|
br_destroy.side_effect = exc.BuildRequestNotFound(uuid='foo') |
|
self.start_service('compute', host='host1') |
|
select_destinations.return_value = [[fake_selection1]] |
|
taglist_create.return_value = self.params['tags'] |
|
self.conductor.schedule_and_build_instances(**self.params) |
|
self.assertFalse(build_and_run.called) |
|
self.assertFalse(bury.called) |
|
self.assertTrue(br_destroy.called) |
|
taglist_destroy.assert_called_once_with( |
|
test.MatchType(context.RequestContext), |
|
self.params['build_requests'][0].instance_uuid) |
|
# Make sure TagList.destroy was called with the targeted context. |
|
self.assertIsNotNone(taglist_destroy.call_args[0][0].db_connection) |
|
|
|
test_utils.assert_instance_delete_notification_by_uuid( |
|
legacy_notify, notify, |
|
self.params['build_requests'][0].instance_uuid, |
|
self.conductor.notifier, test.MatchType(context.RequestContext), |
|
expect_targeted_context=True, expected_source='nova-conductor', |
|
expected_host='host1') |
|
|
|
@mock.patch('nova.compute.utils.notify_about_instance_action') |
|
@mock.patch('nova.objects.Instance.destroy') |
|
@mock.patch('nova.compute.utils.notify_about_instance_usage') |
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.BuildRequest.destroy') |
|
@mock.patch('nova.conductor.manager.ComputeTaskManager._bury_in_cell0') |
|
def test_schedule_and_build_delete_during_scheduling_host_changed( |
|
self, bury, br_destroy, select_destinations, |
|
build_and_run, legacy_notify, instance_destroy, notify): |
|
|
|
br_destroy.side_effect = exc.BuildRequestNotFound(uuid='foo') |
|
instance_destroy.side_effect = [ |
|
exc.ObjectActionError(action='destroy', |
|
reason='host changed'), |
|
None, |
|
] |
|
|
|
self.start_service('compute', host='host1') |
|
select_destinations.return_value = [[fake_selection1]] |
|
self.conductor.schedule_and_build_instances(**self.params) |
|
self.assertFalse(build_and_run.called) |
|
self.assertFalse(bury.called) |
|
self.assertTrue(br_destroy.called) |
|
self.assertEqual(2, instance_destroy.call_count) |
|
|
|
test_utils.assert_instance_delete_notification_by_uuid( |
|
legacy_notify, notify, |
|
self.params['build_requests'][0].instance_uuid, |
|
self.conductor.notifier, test.MatchType(context.RequestContext), |
|
expect_targeted_context=True, expected_source='nova-conductor', |
|
expected_host='host1') |
|
|
|
@mock.patch('nova.compute.utils.notify_about_instance_action') |
|
@mock.patch('nova.objects.Instance.destroy') |
|
@mock.patch('nova.compute.utils.notify_about_instance_usage') |
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.BuildRequest.destroy') |
|
@mock.patch('nova.conductor.manager.ComputeTaskManager._bury_in_cell0') |
|
def test_schedule_and_build_delete_during_scheduling_instance_not_found( |
|
self, bury, br_destroy, select_destinations, |
|
build_and_run, legacy_notify, instance_destroy, notify): |
|
|
|
br_destroy.side_effect = exc.BuildRequestNotFound(uuid='foo') |
|
instance_destroy.side_effect = [ |
|
exc.InstanceNotFound(instance_id='fake'), |
|
None, |
|
] |
|
|
|
self.start_service('compute', host='host1') |
|
select_destinations.return_value = [[fake_selection1]] |
|
self.conductor.schedule_and_build_instances(**self.params) |
|
self.assertFalse(build_and_run.called) |
|
self.assertFalse(bury.called) |
|
self.assertTrue(br_destroy.called) |
|
self.assertEqual(1, instance_destroy.call_count) |
|
test_utils.assert_instance_delete_notification_by_uuid( |
|
legacy_notify, notify, |
|
self.params['build_requests'][0].instance_uuid, |
|
self.conductor.notifier, test.MatchType(context.RequestContext), |
|
expect_targeted_context=True, expected_source='nova-conductor', |
|
expected_host='host1') |
|
|
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') |
|
@mock.patch('nova.objects.BuildRequest.destroy') |
|
@mock.patch('nova.conductor.manager.ComputeTaskManager._bury_in_cell0') |
|
@mock.patch('nova.objects.Instance.create') |
|
def test_schedule_and_build_delete_before_scheduling(self, inst_create, |
|
bury, br_destroy, |
|
br_get_by_inst, |
|
select_destinations, |
|
build_and_run): |
|
"""Tests the case that the build request is deleted before the instance |
|
is created, so we do not create the instance. |
|
""" |
|
inst_uuid = self.params['build_requests'][0].instance.uuid |
|
br_get_by_inst.side_effect = exc.BuildRequestNotFound(uuid=inst_uuid) |
|
self.start_service('compute', host='host1') |
|
select_destinations.return_value = [[fake_selection1]] |
|
self.conductor.schedule_and_build_instances(**self.params) |
|
# we don't create the instance since the build request is gone |
|
self.assertFalse(inst_create.called) |
|
# we don't build the instance since we didn't create it |
|
self.assertFalse(build_and_run.called) |
|
# we don't bury the instance in cell0 since it's already deleted |
|
self.assertFalse(bury.called) |
|
# we don't don't destroy the build request since it's already gone |
|
self.assertFalse(br_destroy.called) |
|
|
|
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') |
|
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') |
|
@mock.patch('nova.objects.BuildRequest.destroy') |
|
@mock.patch('nova.conductor.manager.ComputeTaskManager._bury_in_cell0') |
|
def test_schedule_and_build_unmapped_host_ends_up_in_cell0(self, |
|
bury, |
|
br_destroy, |
|
select_dest, |
|
build_and_run): |
|
def _fake_bury(ctxt, request_spec, exc, |
|
build_requests=None, instances=None, |
|
block_device_mapping=None, |
|
tags=None): |
|