OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

12225 lines
600 KiB

# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for ComputeManager()."""
import contextlib
import copy
import datetime
import fixtures as std_fixtures
import time
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
import ddt
from eventlet import event as eventlet_event
from eventlet import timeout as eventlet_timeout
from keystoneauth1 import exceptions as keystone_exception
import mock
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import fixture as service_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
import testtools
import nova
from nova.compute import build_results
from nova.compute import manager
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
import nova.conf
from nova import context
from nova.db import api as db
from nova import exception
from nova.network import model as network_model
from nova.network import neutron as neutronv2_api
from nova import objects
from nova.objects import base as base_obj
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova.tests.unit.objects import test_instance_numa
from nova.virt.block_device import DriverVolumeBlockDevice as driver_bdm_volume
from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
from nova.volume import cinder
CONF = nova.conf.CONF
fake_host_list = [mock.sentinel.host1]
@ddt.ddt
class ComputeManagerUnitTestCase(test.NoDBTestCase,
fake_resource_tracker.RTMockMixin):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = manager.ComputeManager()
self.context = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.useFixture(fixtures.EventReporterStub())
self.allocations = {
uuids.provider1: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512
}
}
}
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(manager.ComputeManager, '_sync_instance_power_state')
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@mock.patch.object(neutronv2_api.API, 'migrate_instance_start')
def _test_handle_lifecycle_event(self, migrate_instance_start,
mock_get_migration, mock_get,
mock_sync, mock_get_power_state,
transition, event_pwr_state,
current_pwr_state):
event = mock.Mock()
mock_get.return_value = fake_instance.fake_instance_obj(self.context,
task_state=task_states.MIGRATING)
event.get_transition.return_value = transition
mock_get_power_state.return_value = current_pwr_state
self.compute.handle_lifecycle_event(event)
expected_attrs = []
if transition in [virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED,
virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED]:
expected_attrs.append('info_cache')
mock_get.assert_called_once_with(
test.MatchType(context.RequestContext),
event.get_instance_uuid.return_value,
expected_attrs=expected_attrs)
if event_pwr_state == current_pwr_state:
mock_sync.assert_called_with(mock.ANY, mock_get.return_value,
event_pwr_state)
else:
self.assertFalse(mock_sync.called)
migrate_finish_statuses = {
virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED: 'running (post-copy)',
virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED: 'running'
}
if transition in migrate_finish_statuses:
mock_get_migration.assert_called_with(
test.MatchType(context.RequestContext),
mock_get.return_value.uuid,
migrate_finish_statuses[transition])
migrate_instance_start.assert_called_once_with(
test.MatchType(context.RequestContext),
mock_get.return_value,
mock_get_migration.return_value)
else:
mock_get_migration.assert_not_called()
migrate_instance_start.assert_not_called()
def test_handle_lifecycle_event(self):
event_map = {virtevent.EVENT_LIFECYCLE_STOPPED: power_state.SHUTDOWN,
virtevent.EVENT_LIFECYCLE_STARTED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_PAUSED: power_state.PAUSED,
virtevent.EVENT_LIFECYCLE_RESUMED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_SUSPENDED:
power_state.SUSPENDED,
virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED:
power_state.PAUSED,
virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED:
power_state.PAUSED,
}
for transition, pwr_state in event_map.items():
self._test_handle_lifecycle_event(transition=transition,
event_pwr_state=pwr_state,
current_pwr_state=pwr_state)
def test_handle_lifecycle_event_state_mismatch(self):
self._test_handle_lifecycle_event(
transition=virtevent.EVENT_LIFECYCLE_STOPPED,
event_pwr_state=power_state.SHUTDOWN,
current_pwr_state=power_state.RUNNING)
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
@mock.patch('nova.objects.Migration.get_by_instance_and_status',
side_effect=exception.MigrationNotFoundByStatus(
instance_id=uuids.instance, status='running (post-copy)'))
def test_handle_lifecycle_event_postcopy_migration_not_found(
self, mock_get_migration, mock_sync, mock_get_instance):
"""Tests a EVENT_LIFECYCLE_POSTCOPY_STARTED scenario where the
migration record is not found by the expected status.
"""
inst = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance,
task_state=task_states.MIGRATING)
mock_get_instance.return_value = inst
event = virtevent.LifecycleEvent(
uuids.instance, virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED)
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.PAUSED):
with mock.patch.object(self.compute.network_api,
'migrate_instance_start') as mig_start:
self.compute.handle_lifecycle_event(event)
# Since we failed to find the migration record, we shouldn't call
# migrate_instance_start.
mig_start.assert_not_called()
mock_get_migration.assert_called_once_with(
test.MatchType(context.RequestContext), uuids.instance,
'running (post-copy)')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_delete_instance_info_cache_delete_ordering(self, mock_notify):
call_tracker = mock.Mock()
call_tracker.clear_events_for_instance.return_value = None
mgr_class = self.compute.__class__
orig_delete = mgr_class._delete_instance
specd_compute = mock.create_autospec(mgr_class)
# spec out everything except for the method we really want
# to test, then use call_tracker to verify call sequence
specd_compute._delete_instance = orig_delete
specd_compute.host = 'compute'
mock_inst = mock.Mock()
mock_inst.uuid = uuids.instance
mock_inst.save = mock.Mock()
mock_inst.destroy = mock.Mock()
mock_inst.system_metadata = mock.Mock()
def _mark_notify(*args, **kwargs):
call_tracker._notify_about_instance_usage(*args, **kwargs)
def _mark_shutdown(*args, **kwargs):
call_tracker._shutdown_instance(*args, **kwargs)
specd_compute.instance_events = call_tracker
specd_compute._notify_about_instance_usage = _mark_notify
specd_compute._shutdown_instance = _mark_shutdown
mock_bdms = mock.Mock()
specd_compute._delete_instance(specd_compute,
self.context,
mock_inst,
mock_bdms)
methods_called = [n for n, a, k in call_tracker.mock_calls]
self.assertEqual(['clear_events_for_instance',
'_notify_about_instance_usage',
'_shutdown_instance',
'_notify_about_instance_usage'],
methods_called)
mock_notify.assert_has_calls([
mock.call(self.context,
mock_inst,
specd_compute.host,
action='delete',
phase='start',
bdms=mock_bdms),
mock.call(self.context,
mock_inst,
specd_compute.host,
action='delete',
phase='end',
bdms=mock_bdms)])
@mock.patch.object(objects.Instance, 'destroy')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_complete_deletion')
@mock.patch.object(manager.ComputeManager, '_cleanup_volumes')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def _test_delete_instance_with_accels(self, instance, mock_inst_usage,
mock_inst_action, mock_shutdown, mock_cleanup_vols,
mock_complete_del, mock_inst_save, mock_inst_destroy):
self.compute._delete_instance(self.context, instance, bdms=None)
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'delete_arqs_for_instance')
def test_delete_instance_with_accels_ok(self, mock_del_arqs):
# _delete_instance() calls Cyborg to delete ARQs, if
# the extra specs has a device profile name.
instance = fake_instance.fake_instance_obj(self.context)
instance.flavor.extra_specs = {'accel:device_profile': 'mydp'}
self._test_delete_instance_with_accels(instance)
mock_del_arqs.assert_called_once_with(instance.uuid)
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'delete_arqs_for_instance')
def test_delete_instance_with_accels_no_dp(self, mock_del_arqs):
# _delete_instance() does not call Cyborg to delete ARQs, if
# the extra specs has no device profile name.
instance = fake_instance.fake_instance_obj(self.context)
self._test_delete_instance_with_accels(instance)
mock_del_arqs.assert_not_called()
def _make_compute_node(self, hyp_hostname, cn_id):
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id', 'uuid',
'destroy'])
cn.id = cn_id
cn.hypervisor_hostname = hyp_hostname
return cn
def test_update_available_resource_for_node(self):
rt = self._mock_rt(spec_set=['update_available_resource'])
self.compute._update_available_resource_for_node(
self.context,
mock.sentinel.node,
)
rt.update_available_resource.assert_called_once_with(
self.context,
mock.sentinel.node,
startup=False,
)
@mock.patch('nova.compute.manager.LOG')
def test_update_available_resource_for_node_reshape_failed(self, log_mock):
"""ReshapeFailed logs and reraises."""
rt = self._mock_rt(spec_set=['update_available_resource'])
rt.update_available_resource.side_effect = exception.ReshapeFailed(
error='error')
self.assertRaises(exception.ReshapeFailed,
self.compute._update_available_resource_for_node,
self.context, mock.sentinel.node,
# While we're here, unit test the startup kwarg
startup=True)
rt.update_available_resource.assert_called_once_with(
self.context, mock.sentinel.node, startup=True)
log_mock.critical.assert_called_once()
@mock.patch('nova.compute.manager.LOG')
def test_update_available_resource_for_node_reshape_needed(self, log_mock):
"""ReshapeNeeded logs and reraises."""
rt = self._mock_rt(spec_set=['update_available_resource'])
rt.update_available_resource.side_effect = exception.ReshapeNeeded()
self.assertRaises(exception.ReshapeNeeded,
self.compute._update_available_resource_for_node,
self.context, mock.sentinel.node,
# While we're here, unit test the startup kwarg
startup=True)
rt.update_available_resource.assert_called_once_with(
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
@mock.patch.object(manager, 'LOG')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
@mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db')
def test_update_available_resource(self, get_db_nodes, get_avail_nodes,
update_mock, mock_log):
mock_rt = self._mock_rt()
rc_mock = self.useFixture(fixtures.fixtures.MockPatchObject(
self.compute, 'reportclient')).mock
rc_mock.delete_resource_provider.side_effect = (
keystone_exception.EndpointNotFound)
db_nodes = [self._make_compute_node('node%s' % i, i)
for i in range(1, 5)]
avail_nodes = set(['node2', 'node3', 'node4', 'node5'])
avail_nodes_l = list(avail_nodes)
get_db_nodes.return_value = db_nodes
get_avail_nodes.return_value = avail_nodes
self.compute.update_available_resource(self.context, startup=True)
get_db_nodes.assert_called_once_with(self.context, avail_nodes,
use_slave=True, startup=True)
self.assertEqual(len(avail_nodes_l), update_mock.call_count)
update_mock.assert_has_calls(
[mock.call(self.context, node, startup=True)
for node in avail_nodes_l]
)
# First node in set should have been removed from DB
for db_node in db_nodes:
if db_node.hypervisor_hostname == 'node1':
db_node.destroy.assert_called_once_with()
rc_mock.delete_resource_provider.assert_called_once_with(
self.context, db_node, cascade=True)
mock_rt.remove_node.assert_called_once_with(
'node1')
mock_log.error.assert_called_once_with(
"Failed to delete compute node resource provider for "
"compute node %s: %s", db_node.uuid, mock.ANY)
else:
self.assertFalse(db_node.destroy.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_resource_provider')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
@mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db')
def test_update_available_resource_not_ready(self, get_db_nodes,
get_avail_nodes,
update_mock,
del_rp_mock):
db_nodes = [self._make_compute_node('node1', 1)]
get_db_nodes.return_value = db_nodes
get_avail_nodes.side_effect = exception.VirtDriverNotReady
self.compute.update_available_resource(self.context)
# these shouldn't get processed on VirtDriverNotReady
update_mock.assert_not_called()
del_rp_mock.assert_not_called()
@mock.patch('nova.context.get_admin_context')
def test_pre_start_hook(self, get_admin_context):
"""Very simple test just to make sure update_available_resource is
called as expected.
"""
with mock.patch.object(
self.compute, 'update_available_resource') as update_res:
self.compute.pre_start_hook()
update_res.assert_called_once_with(
get_admin_context.return_value, startup=True)
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host',
side_effect=exception.NotFound)
@mock.patch('nova.compute.manager.LOG')
def test_get_compute_nodes_in_db_on_startup(self, mock_log,
get_all_by_host):
"""Tests to make sure we only log a warning when we do not find a
compute node on startup since this may be expected.
"""
self.assertEqual([], self.compute._get_compute_nodes_in_db(
self.context, {'fake-node'}, startup=True))
get_all_by_host.assert_called_once_with(
self.context, self.compute.host, use_slave=False)
self.assertTrue(mock_log.warning.called)
self.assertFalse(mock_log.error.called)
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host',
side_effect=exception.NotFound)
@mock.patch('nova.compute.manager.LOG')
def test_get_compute_nodes_in_db_not_found_no_nodenames(
self, mock_log, get_all_by_host):
"""Tests to make sure that _get_compute_nodes_in_db does not log
anything when ComputeNodeList.get_all_by_host raises NotFound and the
driver did not report any nodenames.
"""
self.assertEqual([], self.compute._get_compute_nodes_in_db(
self.context, set()))
get_all_by_host.assert_called_once_with(
self.context, self.compute.host, use_slave=False)
mock_log.assert_not_called()
def _trusted_certs_setup_instance(self, include_trusted_certs=True):
instance = fake_instance.fake_instance_obj(self.context)
if include_trusted_certs:
instance.trusted_certs = objects.trusted_certs.TrustedCerts(
ids=['fake-trusted-cert-1', 'fake-trusted-cert-2'])
else:
instance.trusted_certs = None
return instance
def test_check_trusted_certs_provided_no_support(self):
instance = self._trusted_certs_setup_instance()
with mock.patch.dict(self.compute.driver.capabilities,
supports_trusted_certs=False):
self.assertRaises(exception.BuildAbortException,
self.compute._check_trusted_certs,
instance)
def test_check_trusted_certs_not_provided_no_support(self):
instance = self._trusted_certs_setup_instance(
include_trusted_certs=False)
with mock.patch.dict(self.compute.driver.capabilities,
supports_trusted_certs=False):
self.compute._check_trusted_certs(instance)
def test_check_trusted_certs_provided_support(self):
instance = self._trusted_certs_setup_instance()
with mock.patch.dict(self.compute.driver.capabilities,
supports_trusted_certs=True):
self.compute._check_trusted_certs(instance)
def test_check_device_tagging_no_tagging(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
instance_uuid=uuids.instance)])
net_req = net_req_obj.NetworkRequest(tag=None)
net_req_list = net_req_obj.NetworkRequestList(objects=[net_req])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=False):
self.compute._check_device_tagging(net_req_list, bdms)
def test_check_device_tagging_no_networks(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
instance_uuid=uuids.instance)])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=False):
self.compute._check_device_tagging(None, bdms)
def test_check_device_tagging_tagged_net_req_no_virt_support(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
instance_uuid=uuids.instance)])
net_req = net_req_obj.NetworkRequest(port_id=uuids.bar, tag='foo')
net_req_list = net_req_obj.NetworkRequestList(objects=[net_req])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=False):
self.assertRaises(exception.BuildAbortException,
self.compute._check_device_tagging,
net_req_list, bdms)
def test_check_device_tagging_tagged_bdm_no_driver_support(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
tag='foo',
instance_uuid=uuids.instance)])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=False):
self.assertRaises(exception.BuildAbortException,
self.compute._check_device_tagging,
None, bdms)
def test_check_device_tagging_tagged_bdm_no_driver_support_declared(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
tag='foo',
instance_uuid=uuids.instance)])
with mock.patch.dict(self.compute.driver.capabilities):
self.compute.driver.capabilities.pop('supports_device_tagging',
None)
self.assertRaises(exception.BuildAbortException,
self.compute._check_device_tagging,
None, bdms)
def test_check_device_tagging_tagged_bdm_with_driver_support(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
tag='foo',
instance_uuid=uuids.instance)])
net_req = net_req_obj.NetworkRequest(network_id=uuids.bar)
net_req_list = net_req_obj.NetworkRequestList(objects=[net_req])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=True):
self.compute._check_device_tagging(net_req_list, bdms)
def test_check_device_tagging_tagged_net_req_with_driver_support(self):
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(source_type='volume',
destination_type='volume',
instance_uuid=uuids.instance)])
net_req = net_req_obj.NetworkRequest(network_id=uuids.bar, tag='foo')
net_req_list = net_req_obj.NetworkRequestList(objects=[net_req])
with mock.patch.dict(self.compute.driver.capabilities,
supports_device_tagging=True):
self.compute._check_device_tagging(net_req_list, bdms)
@mock.patch.object(objects.BlockDeviceMapping, 'create')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid',
return_value=objects.BlockDeviceMappingList())
def test_reserve_block_device_name_with_tag(self, mock_get, mock_create):
instance = fake_instance.fake_instance_obj(self.context)
with test.nested(
mock.patch.object(self.compute,
'_get_device_name_for_instance',
return_value='/dev/vda'),
mock.patch.dict(self.compute.driver.capabilities,
supports_tagged_attach_volume=True)):
bdm = self.compute.reserve_block_device_name(
self.context, instance, None, None, None, None, 'foo',
False)
self.assertEqual('foo', bdm.tag)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_reserve_block_device_name_raises(self, _):
with mock.patch.dict(self.compute.driver.capabilities,
supports_tagged_attach_volume=False):
self.assertRaises(exception.VolumeTaggedAttachNotSupported,
self.compute.reserve_block_device_name,
self.context,
fake_instance.fake_instance_obj(self.context),
'fake_device', 'fake_volume_id', 'fake_disk_bus',
'fake_device_type', 'foo', False)
@mock.patch.object(objects.BlockDeviceMapping, 'create')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid',
return_value=objects.BlockDeviceMappingList())
def test_reserve_block_device_name_multiattach(self, mock_get,
mock_create):
"""Tests the case that multiattach=True and the driver supports it."""
instance = fake_instance.fake_instance_obj(self.context)
with test.nested(
mock.patch.object(self.compute,
'_get_device_name_for_instance',
return_value='/dev/vda'),
mock.patch.dict(self.compute.driver.capabilities,
supports_multiattach=True)):
self.compute.reserve_block_device_name(
self.context, instance, device=None, volume_id=uuids.volume_id,
disk_bus=None, device_type=None, tag=None, multiattach=True)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_reserve_block_device_name_multiattach_raises(self, _):
with mock.patch.dict(self.compute.driver.capabilities,
supports_multiattach=False):
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
self.compute.reserve_block_device_name,
self.context,
fake_instance.fake_instance_obj(self.context),
'fake_device', 'fake_volume_id', 'fake_disk_bus',
'fake_device_type', tag=None, multiattach=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(time, 'sleep')
def test_allocate_network_succeeds_after_retries(
self, mock_sleep, mock_save):
self.flags(network_allocate_retries=8)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
req_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake')])
sec_groups = 'fake-sec-groups'
final_result = 'meow'
rp_mapping = {}
expected_sleep_times = [mock.call(t) for t in
(1, 2, 4, 8, 16, 30, 30)]
with mock.patch.object(
self.compute.network_api, 'allocate_for_instance',
side_effect=[test.TestingException()] * 7 + [final_result]):
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
sec_groups,
rp_mapping)
self.assertEqual(7, mock_sleep.call_count)
mock_sleep.assert_has_calls(expected_sleep_times)
self.assertEqual(final_result, res)
# Ensure save is not called in while allocating networks, the instance
# is saved after the allocation.
self.assertFalse(mock_save.called)
self.assertEqual('True', instance.system_metadata['network_allocated'])
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
instance = {}
req_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake')])
sec_groups = 'fake-sec-groups'
rp_mapping = {}
with mock.patch.object(
self.compute.network_api, 'allocate_for_instance',
side_effect=test.TestingException) as mock_allocate:
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks,
sec_groups, rp_mapping)
mock_allocate.assert_called_once_with(
self.context, instance,
requested_networks=req_networks,
security_groups=sec_groups,
bind_host_id=instance.get('host'),
resource_provider_mapping=rp_mapping)
@mock.patch.object(manager.ComputeManager, '_instance_update')
@mock.patch.object(time, 'sleep')
def test_allocate_network_with_conf_value_is_one(
self, sleep, _instance_update):
self.flags(network_allocate_retries=1)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
req_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='fake')])
sec_groups = 'fake-sec-groups'
final_result = 'zhangtralon'
rp_mapping = {}
with mock.patch.object(self.compute.network_api,
'allocate_for_instance',
side_effect = [test.TestingException(),
final_result]):
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
sec_groups,
rp_mapping)
self.assertEqual(final_result, res)
self.assertEqual(1, sleep.call_count)
def test_allocate_network_skip_for_no_allocate(self):
# Ensures that we don't do anything if requested_networks has 'none'
# for the network_id.
req_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='none')])
nwinfo = self.compute._allocate_network_async(
self.context, mock.sentinel.instance, req_networks,
security_groups=['default'], resource_provider_mapping={})
self.assertEqual(0, len(nwinfo))
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_build_and_run_instance')
def _test_max_concurrent_builds(self, mock_dbari):
with mock.patch.object(self.compute,
'_build_semaphore') as mock_sem:
instance = objects.Instance(uuid=uuidutils.generate_uuid())
for i in (1, 2, 3):
self.compute.build_and_run_instance(self.context, instance,
mock.sentinel.image,
mock.sentinel.request_spec,
{})
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_builds_limited(self):
self.flags(max_concurrent_builds=2)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_unlimited(self):
self.flags(max_concurrent_builds=0)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_semaphore_limited(self):
self.flags(max_concurrent_builds=123)
self.assertEqual(123,
manager.ComputeManager()._build_semaphore.balance)
def test_max_concurrent_builds_semaphore_unlimited(self):
self.flags(max_concurrent_builds=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._build_semaphore.balance)
self.assertIsInstance(compute._build_semaphore,
compute_utils.UnlimitedSemaphore)
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.manager.ComputeManager.'
'_snapshot_instance')
def _test_max_concurrent_snapshots(self, mock_si, mock_inst_save):
with mock.patch.object(self.compute,
'_snapshot_semaphore') as mock_sem:
instance = objects.Instance(uuid=uuidutils.generate_uuid())
for i in (1, 2, 3):
self.compute.snapshot_instance(self.context,
mock.sentinel.image,
instance)
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_snapshots_limited(self):
self.flags(max_concurrent_snapshots=2)
self._test_max_concurrent_snapshots()
def test_max_concurrent_snapshots_unlimited(self):
self.flags(max_concurrent_snapshots=0)
self._test_max_concurrent_snapshots()
def test_max_concurrent_snapshots_semaphore_limited(self):
self.flags(max_concurrent_snapshots=123)
self.assertEqual(123,
manager.ComputeManager()._snapshot_semaphore.balance)
def test_max_concurrent_snapshots_semaphore_unlimited(self):
self.flags(max_concurrent_snapshots=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._snapshot_semaphore.balance)
self.assertIsInstance(compute._snapshot_semaphore,
compute_utils.UnlimitedSemaphore)
def test_nil_out_inst_obj_host_and_node_sets_nil(self):
instance = fake_instance.fake_instance_obj(self.context,
uuid=uuids.instance,
host='foo-host',
node='foo-node',
launched_on='foo-host')
self.assertIsNotNone(instance.host)
self.assertIsNotNone(instance.node)
self.assertIsNotNone(instance.launched_on)
self.compute._nil_out_instance_obj_host_and_node(instance)
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.launched_on)
def test_init_host(self):
our_host = self.compute.host
inst = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE,
info_cache=dict(test_instance_info_cache.fake_info_cache,
network_info=None),
security_groups=None)
startup_instances = [inst, inst, inst]
def _make_instance_list(db_list):
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@mock.patch.object(fake_driver.FakeDriver, 'init_host')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(manager.ComputeManager,
'_destroy_evacuated_instances')
@mock.patch.object(manager.ComputeManager,
'_validate_pinning_configuration')
@mock.patch.object(manager.ComputeManager,
'_validate_vtpm_configuration')
@mock.patch.object(manager.ComputeManager, '_init_instance')
@mock.patch.object(self.compute, '_update_scheduler_instance_info')
def _do_mock_calls(mock_update_scheduler, mock_inst_init,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
mock_error_interrupted, mock_get_nodes):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
host='fake-host', uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
self.compute.init_host()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
self.context, {uuids.our_node_uuid: our_node})
mock_inst_init.assert_has_calls(
[mock.call(self.context, inst_list[0]),
mock.call(self.context, inst_list[1]),
mock.call(self.context, inst_list[2])])
mock_init_host.assert_called_once_with(host=our_host)
mock_host_get.assert_called_once_with(self.context, our_host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
mock_update_scheduler.assert_called_once_with(
self.context, inst_list)
mock_error_interrupted.assert_called_once_with(
self.context, {inst.uuid for inst in inst_list},
mock_get_nodes.return_value.keys())
_do_mock_calls()
@mock.patch('nova.compute.manager.ComputeManager._get_nodes')
@mock.patch('nova.compute.manager.ComputeManager.'
'_error_out_instances_whose_build_was_interrupted')
@mock.patch('nova.objects.InstanceList.get_by_host',
return_value=objects.InstanceList())
@mock.patch('nova.compute.manager.ComputeManager.'
'_destroy_evacuated_instances')
@mock.patch('nova.compute.manager.ComputeManager._init_instance',
mock.NonCallableMock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_update_scheduler_instance_info', mock.NonCallableMock())
def test_init_host_no_instances(
self, mock_destroy_evac_instances, mock_get_by_host,
mock_error_interrupted, mock_get_nodes):
"""Tests the case that init_host runs and there are no instances
on this host yet (it's brand new). Uses NonCallableMock for the
methods we assert should not be called.
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
self.compute.init_host()
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
mock_get_nodes.return_value.keys())
mock_get_nodes.assert_called_once_with(
test.MatchType(nova.context.RequestContext))
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
self.compute.init_host()
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
# register_event_listener is called on startup (init_host) and
# in cleanup_host
mock_driver.register_event_listener.assert_has_calls([
mock.call(self.compute.handle_events), mock.call(None)])
mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
def test_cleanup_live_migrations_in_pool_with_record(self):
fake_future = mock.MagicMock()
fake_instance_uuid = uuids.instance
fake_migration = objects.Migration(
uuid=uuids.migration, instance_uuid=fake_instance_uuid)
fake_migration.save = mock.MagicMock()
self.compute._waiting_live_migrations[fake_instance_uuid] = (
fake_migration, fake_future)
with mock.patch.object(self.compute, '_live_migration_executor'
) as mock_migration_pool:
self.compute._cleanup_live_migrations_in_pool()
mock_migration_pool.shutdown.assert_called_once_with(wait=False)
self.assertEqual('cancelled', fake_migration.status)
fake_future.cancel.assert_called_once_with()
self.assertEqual({}, self.compute._waiting_live_migrations)
# test again with Future is None
self.compute._waiting_live_migrations[fake_instance_uuid] = (
None, None)
self.compute._cleanup_live_migrations_in_pool()
mock_migration_pool.shutdown.assert_called_with(wait=False)
self.assertEqual(2, mock_migration_pool.shutdown.call_count)
self.assertEqual({}, self.compute._waiting_live_migrations)
def test_init_virt_events_disabled(self):
self.flags(handle_virt_lifecycle_events=False, group='workarounds')
with mock.patch.object(self.compute.driver,
'register_event_listener') as mock_register:
self.compute.init_virt_events()
self.assertFalse(mock_register.called)
@mock.patch('nova.compute.manager.ComputeManager._get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@mock.patch('nova.scheduler.utils.resources_from_flavor')
@mock.patch.object(manager.ComputeManager, '_get_instances_on_driver')
@mock.patch.object(manager.ComputeManager, 'init_virt_events')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(fake_driver.FakeDriver, 'destroy')
@mock.patch.object(fake_driver.FakeDriver, 'init_host')
@mock.patch('nova.utils.temporary_mutation')
@mock.patch('nova.objects.MigrationList.get_by_filters')
@mock.patch('nova.objects.Migration.save')
def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get,
mock_temp_mut, mock_init_host, mock_destroy, mock_host_get,
mock_admin_ctxt, mock_init_virt, mock_get_inst, mock_resources,
mock_error_interrupted, mock_get_nodes):
our_host = self.compute.host
not_our_host = 'not-' + our_host
deleted_instance = fake_instance.fake_instance_obj(
self.context, host=not_our_host, uuid=uuids.deleted_instance)
migration = objects.Migration(instance_uuid=deleted_instance.uuid)
migration.source_node = 'fake-node'
mock_mig_get.return_value = [migration]
mock_admin_ctxt.return_value = self.context
mock_host_get.return_value = objects.InstanceList()
our_node = objects.ComputeNode(
host=our_host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
mock_resources.return_value = mock.sentinel.my_resources
# simulate failed instance
mock_get_inst.return_value = [deleted_instance]
with test.nested(
mock.patch.object(
self.compute.network_api, 'get_instance_nw_info',
side_effect = exception.InstanceNotFound(
instance_id=deleted_instance['uuid'])),
mock.patch.object(
self.compute.reportclient,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
self.compute.init_host()
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
mock_init_host.assert_called_once_with(host=our_host)
mock_host_get.assert_called_once_with(self.context, our_host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
mock_init_virt.assert_called_once_with()
mock_temp_mut.assert_called_once_with(self.context, read_deleted='yes')
mock_get_inst.assert_called_once_with(self.context)
mock_get_net.assert_called_once_with(self.context, deleted_instance)
# ensure driver.destroy is called so that driver may
# clean up any dangling files
mock_destroy.assert_called_once_with(self.context, deleted_instance,
mock.ANY, mock.ANY, mock.ANY)
mock_save.assert_called_once_with()
mock_error_interrupted.assert_called_once_with(
self.context, {deleted_instance.uuid},
mock_get_nodes.return_value.keys())
@mock.patch('nova.compute.manager.ComputeManager._get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(fake_driver.FakeDriver, 'init_host')
@mock.patch('nova.compute.manager.ComputeManager._init_instance')
@mock.patch('nova.compute.manager.ComputeManager.'
'_destroy_evacuated_instances')
def test_init_host_with_in_progress_evacuations(self, mock_destroy_evac,
mock_init_instance, mock_init_host, mock_host_get,
mock_admin_ctxt, mock_error_interrupted, mock_get_nodes):
"""Assert that init_instance is not called for instances that are
evacuating from the host during init_host.
"""
active_instance = fake_instance.fake_instance_obj(
self.context, host=self.compute.host, uuid=uuids.active_instance)
active_instance.system_metadata = {}
evacuating_instance = fake_instance.fake_instance_obj(
self.context, host=self.compute.host, uuid=uuids.evac_instance)
evacuating_instance.system_metadata = {}
instance_list = objects.InstanceList(self.context,
objects=[active_instance, evacuating_instance])
mock_host_get.return_value = instance_list
mock_admin_ctxt.return_value = self.context
mock_destroy_evac.return_value = {
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
host='fake-host', uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
self.compute.init_host()
mock_init_instance.assert_called_once_with(
self.context, active_instance)
mock_error_interrupted.assert_called_once_with(
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
cn1 = objects.ComputeNode(uuid=uuids.cn1)
cn2 = objects.ComputeNode(uuid=uuids.cn2)
mock_get_by_host_and_node.side_effect = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
mock_get_by_host_and_node.assert_has_calls([
mock.call(self.context, self.compute.host, 'fake-node1'),
mock.call(self.context, self.compute.host, 'fake-node2'),
])
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
objects.ComputeNode, 'get_by_host_and_nodename',
new_callable=mock.NonCallableMock)
@mock.patch.object(
fake_driver.FakeDriver, 'get_available_nodes',
side_effect=exception.VirtDriverNotReady)
def test_get_nodes_driver_not_ready(
self, mock_driver_get_nodes, mock_get_by_host_and_node,
mock_log_warning):
mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
nodes = self.compute._get_nodes(self.context)
self.assertEqual({}, nodes)
mock_log_warning.assert_called_once_with(
"Virt driver is not ready. If this is the first time this service "
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
def test_get_nodes_node_not_found(
self, mock_driver_get_nodes, mock_get_by_host_and_node,
mock_log_warning):
mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
cn2 = objects.ComputeNode(uuid=uuids.cn2)
mock_get_by_host_and_node.side_effect = [
exception.ComputeHostNotFound(host='fake-node1'), cn2]
nodes = self.compute._get_nodes(self.context)
self.assertEqual({uuids.cn2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
mock_get_by_host_and_node.assert_has_calls([
mock.call(self.context, self.compute.host, 'fake-node1'),
mock.call(self.context, self.compute.host, 'fake-node2'),
])
mock_log_warning.assert_called_once_with(
"Compute node %s not found in the database. If this is the first "
"time this service is starting on this host, then you can ignore "
"this warning.", 'fake-node1')
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
mock_validate_pinning):
"""Test that we fail init_host if the pinning configuration check
fails.
"""
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
self.compute.init_host)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
mock_validate_vtpm):
"""Test that we fail init_host if the vTPM configuration check
fails.
"""
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
self.compute.init_host)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_resource_provider')
def test_init_host_with_interrupted_instance_build(
self, mock_get_allocations, mock_get_instances,
mock_instance_save):
active_instance = fake_instance.fake_instance_obj(
self.context, host=self.compute.host, uuid=uuids.active_instance)
evacuating_instance = fake_instance.fake_instance_obj(
self.context, host=self.compute.host, uuid=uuids.evac_instance)
interrupted_instance = fake_instance.fake_instance_obj(
self.context, host=None, uuid=uuids.interrupted_instance,
vm_state=vm_states.BUILDING)
# we have 3 different instances. We need consumers for each instance
# in placement and an extra consumer that is not an instance
allocations = {
uuids.active_instance: "fake-resources-active",
uuids.evac_instance: "fake-resources-evacuating",
uuids.interrupted_instance: "fake-resources-interrupted",
uuids.not_an_instance: "fake-resources-not-an-instance",
}
mock_get_allocations.return_value = report.ProviderAllocInfo(
allocations=allocations)
# get is called with a uuid filter containing interrupted_instance,
# error_instance, and not_an_instance but it will only return the
# interrupted_instance as the error_instance is not in building state
# and not_an_instance does not match with any instance in the db.
mock_get_instances.return_value = objects.InstanceList(
self.context, objects=[interrupted_instance])
# interrupted_instance and error_instance is not in the list passed in
# because it is not assigned to the compute and therefore not processed
# by init_host and init_instance
self.compute._error_out_instances_whose_build_was_interrupted(
self.context,
{inst.uuid for inst in [active_instance, evacuating_instance]},
[uuids.cn_uuid])
mock_get_instances.assert_called_once_with(
self.context,
{'vm_state': 'building',
'uuid': {uuids.interrupted_instance, uuids.not_an_instance}
},
expected_attrs=[])
# this is expected to be called only once for interrupted_instance
mock_instance_save.assert_called_once_with()
self.assertEqual(vm_states.ERROR, interrupted_instance.vm_state)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_resource_provider')
def test_init_host_with_interrupted_instance_build_compute_rp_not_found(
self, mock_get_allocations):
mock_get_allocations.side_effect = [
exception.ResourceProviderAllocationRetrievalFailed(
rp_uuid=uuids.cn1_uuid, error='error'),
report.ProviderAllocInfo(allocations={})
]
self.compute._error_out_instances_whose_build_was_interrupted(
self.context, set(), [uuids.cn1_uuid, uuids.cn2_uuid])
# check that nova skip the node that is not found in placement and
# continue with the next
mock_get_allocations.assert_has_calls(
[
mock.call(self.context, uuids.cn1_uuid),
mock.call(self.context, uuids.cn2_uuid),
]
)
def test_init_instance_with_binding_failed_vif_type(self):
# this instance will plug a 'binding_failed' vif
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
with test.nested(
mock.patch.object(context, 'get_admin_context',
return_value=self.context),
mock.patch.object(objects.Instance, 'get_network_info',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute.driver, 'plug_vifs',
side_effect=exception.VirtualInterfacePlugException(
"Unexpected vif_type=binding_failed")),
mock.patch.object(self.compute, '_set_instance_obj_error_state')
) as (get_admin_context, get_nw_info, plug_vifs, set_error_state):
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
instance_2 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_2)
instance_3 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_3)
instance_4 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_4)
instance_5 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_5)
instance_1.numa_topology = None
numa_wo_pinning = test_instance_numa.get_fake_obj_numa_topology(
self.context)
numa_wo_pinning.cells[0].pcpuset = set()
numa_wo_pinning.cells[1].pcpuset = set()
instance_2.numa_topology = numa_wo_pinning
numa_w_pinning = test_instance_numa.get_fake_obj_numa_topology(
self.context)
numa_w_pinning.cells[0].pin_vcpus((1, 10), (2, 11))
numa_w_pinning.cells[0].cpuset = set()
numa_w_pinning.cells[0].cpu_policy = (
fields.CPUAllocationPolicy.DEDICATED)
numa_w_pinning.cells[1].pin_vcpus((3, 0), (4, 1))
numa_w_pinning.cells[1].cpuset = set()
numa_w_pinning.cells[1].cpu_policy = (
fields.CPUAllocationPolicy.DEDICATED)
instance_3.numa_topology = numa_w_pinning
instance_4.deleted = True
numa_mixed_pinning = test_instance_numa.get_fake_obj_numa_topology(
self.context)
numa_mixed_pinning.cells[0].cpuset = set([5, 6])
numa_mixed_pinning.cells[0].pin_vcpus((1, 8), (2, 9))
numa_mixed_pinning.cells[0].cpu_policy = (
fields.CPUAllocationPolicy.MIXED)
numa_mixed_pinning.cells[1].cpuset = set([7, 8])
numa_mixed_pinning.cells[1].pin_vcpus((3, 10), (4, 11))
numa_mixed_pinning.cells[1].cpu_policy = (
fields.CPUAllocationPolicy.MIXED)
instance_5.numa_topology = numa_mixed_pinning
instance_5.vcpus = 8
instances = objects.InstanceList(objects=[
instance_1, instance_2, instance_3, instance_4, instance_5])
with mock.patch.dict(self.compute.driver.capabilities,
supports_pcpus=supports_pcpus):
self.compute._validate_pinning_configuration(instances)
def test__validate_pinning_configuration_valid_config(self):
"""Test that configuring proper 'cpu_dedicated_set' and
'cpu_shared_set', all tests passed.
"""
self.flags(cpu_shared_set='2-7', cpu_dedicated_set='0-1,8-15',
group='compute')
self._test__validate_pinning_configuration()
def test__validate_pinning_configuration_invalid_unpinned_config(self):
"""Test that configuring only 'cpu_dedicated_set' when there are
unpinned instances on the host results in an error.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
ex = self.assertRaises(
exception.InvalidConfiguration,
self._test__validate_pinning_configuration)
self.assertIn('This host has unpinned instances but has no CPUs '
'set aside for this purpose;',
str(ex))
def test__validate_pinning_configuration_invalid_pinned_config(self):
"""Test that configuring only 'cpu_shared_set' when there are pinned
instances on the host results in an error
"""
self.flags(cpu_shared_set='0-7', group='compute')
ex = self.assertRaises(
exception.InvalidConfiguration,
self._test__validate_pinning_configuration)
self.assertIn('This host has pinned instances but has no CPUs '
'set aside for this purpose;',
str(ex))
@mock.patch.object(manager.LOG, 'warning')
def test__validate_pinning_configuration_warning(self, mock_log):
"""Test that configuring 'cpu_dedicated_set' such that some pinned
cores of the instance are outside the range it specifies results in a
warning.
"""
self.flags(cpu_shared_set='0-7', cpu_dedicated_set='8-15',
group='compute')
self._test__validate_pinning_configuration()
self.assertEqual(1, mock_log.call_count)
self.assertIn('Instance is pinned to host CPUs %(cpus)s '
'but one or more of these CPUs are not included in ',
str(mock_log.call_args[0]))
def test__validate_pinning_configuration_no_config(self):
"""Test that not configuring 'cpu_dedicated_set' or 'cpu_shared_set'
when there are mixed instances on the host results in an error.
"""
ex = self.assertRaises(
exception.InvalidConfiguration,
self._test__validate_pinning_configuration)
self.assertIn("This host has mixed instance requesting both pinned "
"and unpinned CPUs but hasn't set aside unpinned CPUs "
"for this purpose;",
str(ex))
def test__validate_pinning_configuration_not_supported(self):
"""Test that the entire check is skipped if the driver doesn't even
support PCPUs.
"""
self._test__validate_pinning_configuration(supports_pcpus=False)
def _test__validate_vtpm_configuration(self, supports_vtpm):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
instance_2 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_2)
instance_3 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_3)
image_meta = objects.ImageMeta.from_dict({})
instance_2.flavor.extra_specs = {'hw:tpm_version': '2.0'}
instance_3.deleted = True
instances = objects.InstanceList(objects=[
instance_1, instance_2, instance_3])
with test.nested(
mock.patch.dict(
self.compute.driver.capabilities, supports_vtpm=supports_vtpm,
),
mock.patch.object(
objects.ImageMeta, 'from_instance', return_value=image_meta,
),
):
self.compute._validate_vtpm_configuration(instances)
def test__validate_vtpm_configuration_unsupported(self):
"""Test that the check fails if the driver does not support vTPM and
instances request it.
"""
ex = self.assertRaises(
exception.InvalidConfiguration,
self._test__validate_vtpm_configuration,
supports_vtpm=False)
self.assertIn(
'This host has instances with the vTPM feature enabled, but the '
'host is not correctly configured; ',
str(ex))
def test__validate_vtpm_configuration_supported(self):
"""Test that the entire check is skipped if the driver supports
vTPM.
"""
self._test__validate_vtpm_configuration(supports_vtpm=True)
def test__get_power_state_InstanceNotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.InstanceNotFound(instance_id=1)):
self.assertEqual(
power_state.NOSTATE,
self.compute._get_power_state(instance),
)
def test__get_power_state_NotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.NotFound()):
self.assertRaises(exception.NotFound,
self.compute._get_power_state,
instance)
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(fake_driver.FakeDriver, 'plug_vifs')
@mock.patch.object(fake_driver.FakeDriver, 'resume_state_on_host_boot')
@mock.patch.object(manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch.object(manager.ComputeManager, '_set_instance_obj_error_state')
def test_init_instance_failed_resume_sets_error(self, mock_set_inst,
mock_get_inst, mock_resume, mock_plug, mock_get_power):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
self.flags(resume_guests_state_on_host_boot=True)
mock_get_power.side_effect = (power_state.SHUTDOWN,
power_state.SHUTDOWN)
mock_get_inst.return_value = 'fake-bdm'
mock_resume.side_effect = test.TestingException
self.compute._init_instance('fake-context', instance)
mock_get_power.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_plug.assert_called_once_with(instance, mock.ANY)
mock_get_inst.assert_called_once_with(mock.ANY, instance)
mock_resume.assert_called_once_with(mock.ANY, instance, mock.ANY,
'fake-bdm')
mock_set_inst.assert_called_once_with(instance)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'destroy')
@mock.patch.object(objects.Instance, 'obj_load_attr')
@mock.patch.object(objects.quotas, 'ids_from_instance')
def test_init_instance_complete_partial_deletion(
self, mock_ids_from_instance,
mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid,
mock_bdm_destroy):
"""Test to complete deletion for instances in DELETED status but not
marked as deleted in the DB
"""
instance = fake_instance.fake_instance_obj(
self.context,
project_id=fakes.FAKE_PROJECT_ID,
uuid=uuids.instance,
vcpus=1,
memory_mb=64,
power_state=power_state.SHUTDOWN,
vm_state=vm_states.DELETED,
host=self.compute.host,
task_state=None,
deleted=False,
deleted_at=None,
metadata={},
system_metadata={},
expected_attrs=['metadata', 'system_metadata'])
# Make sure instance vm_state is marked as 'DELETED' but instance is
# not destroyed from db.
self.assertEqual(vm_states.DELETED, instance.vm_state)
self.assertFalse(instance.deleted)
def fake_inst_destroy():
instance.deleted = True
instance.deleted_at = timeutils.utcnow()
mock_ids_from_instance.return_value = (instance.project_id,
instance.user_id)
mock_inst_destroy.side_effect = fake_inst_destroy()
self.compute._init_instance(self.context, instance)
# Make sure that instance.destroy method was called and
# instance was deleted from db.
self.assertNotEqual(0, instance.deleted)
@mock.patch('nova.compute.manager.LOG')
def test_init_instance_complete_partial_deletion_raises_exception(
self, mock_log):
instance = fake_instance.fake_instance_obj(
self.context,
project_id=fakes.FAKE_PROJECT_ID,
uuid=uuids.instance,
vcpus=1,
memory_mb=64,
power_state=power_state.SHUTDOWN,
vm_state=vm_states.DELETED,
host=self.compute.host,
task_state=None,
deleted=False,
deleted_at=None,
metadata={},
system_metadata={},
expected_attrs=['metadata', 'system_metadata'])
with mock.patch.object(self.compute,
'_complete_partial_deletion') as mock_deletion:
mock_deletion.side_effect = test.TestingException()
self.compute._init_instance(self, instance)
msg = u'Failed to complete a deletion'
mock_log.exception.assert_called_once_with(msg, instance=instance)
def test_init_instance_stuck_in_deleting(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id=fakes.FAKE_PROJECT_ID,
uuid=uuids.instance,
vcpus=1,
memory_mb=64,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.DELETING)
bdms = []
with test.nested(
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=bdms),
mock.patch.object(self.compute, '_delete_instance'),
mock.patch.object(instance, 'obj_load_attr')
) as (mock_get, mock_delete, mock_load):
self.compute._init_instance(self.context, instance)
mock_get.assert_called_once_with(self.context, instance.uuid)
mock_delete.assert_called_once_with(self.context, instance,
bdms)
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_init_instance_stuck_in_deleting_raises_exception(
self, mock_get_by_instance_uuid, mock_get_by_uuid):
instance = fake_instance.fake_instance_obj(
self.context,
project_id=fakes.FAKE_PROJECT_ID,
uuid=uuids.instance,
vcpus=1,
memory_mb=64,
metadata={},
system_metadata={},
host=self.compute.host,
vm_state=vm_states.ACTIVE,
task_state=task_states.DELETING,
expected_attrs=['metadata', 'system_metadata'])
bdms = []
def _create_patch(name, attr):
patcher = mock.patch.object(name, attr)
mocked_obj = patcher.start()
self.addCleanup(patcher.stop)
return mocked_obj
mock_delete_instance = _create_patch(self.compute, '_delete_instance')
mock_set_instance_error_state = _create_patch(
self.compute, '_set_instance_obj_error_state')
mock_get_by_instance_uuid.return_value = bdms
mock_get_by_uuid.return_value = instance
mock_delete_instance.side_effect = test.TestingException('test')
self.compute._init_instance(self.context, instance)
mock_set_instance_error_state.assert_called_once_with(instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
power_on = True if (not old_vm_state or
old_vm_state == vm_states.ACTIVE) else False
sys_meta = {
'old_vm_state': old_vm_state
}
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ERROR,
task_state=task_states.RESIZE_MIGRATING,
power_state=power_state.SHUTDOWN,
system_metadata=sys_meta,
host=self.compute.host,
expected_attrs=['system_metadata'])
instance.migration_context = objects.MigrationContext(migration_id=42)
migration = objects.Migration(source_compute='fake-host1',
dest_compute='fake-host2')
with test.nested(
mock.patch.object(objects.Instance, 'get_network_info',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute.driver, 'plug_vifs'),
mock.patch.object(self.compute.driver, 'finish_revert_migration'),
mock.patch.object(self.compute, '_get_instance_block_device_info',
return_value=[]),
mock.patch.object(self.compute.driver, 'get_info'),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute, '_retry_reboot',
return_value=(False, None)),
mock.patch.object(objects.Migration, 'get_by_id_and_instance',
return_value=migration)
) as (mock_get_nw, mock_plug, mock_finish, mock_get_inst,
mock_get_info, mock_save, mock_retry, mock_get_mig):
mock_get_info.side_effect = (
hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.SHUTDOWN))
self.compute._init_instance(self.context, instance)
mock_get_mig.assert_called_with(self.context, 42, instance.uuid)
mock_retry.assert_called_once_with(instance, power_state.SHUTDOWN)
mock_get_nw.assert_called_once_with()
mock_plug.assert_called_once_with(instance, [])
mock_get_inst.assert_called_once_with(self.context, instance)
mock_finish.assert_called_once_with(self.context, instance,
[], migration, [], power_on)
mock_save.assert_called_once_with()
mock_get_info.assert_has_calls(
[mock.call(instance, use_cache=False),
mock.call(instance, use_cache=False)])
self.assertIsNone(instance.task_state)
def test_init_instance_reverts_crashed_migration_from_active(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.ACTIVE)
def test_init_instance_reverts_crashed_migration_from_stopped(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.STOPPED)
def test_init_instance_reverts_crashed_migration_no_old_state(self):
self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
def test_init_instance_resets_crashed_live_migration(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.MIGRATING)
migration = objects.Migration(source_compute='fake-host1', id=39,
dest_compute='fake-host2')
with test.nested(
mock.patch.object(instance, 'save'),
mock.patch('nova.objects.Instance.get_network_info',
return_value=network_model.NetworkInfo()),
mock.patch.object(objects.Migration, 'get_by_instance_and_status',
return_value=migration),
mock.patch.object(self.compute, 'live_migration_abort'),
mock.patch.object(self.compute, '_set_migration_status')
) as (save, get_nw_info, mock_get_status, mock_abort, mock_set_migr):
self.compute._init_instance(self.context, instance)
save.assert_called_once_with(expected_task_state=['migrating'])
get_nw_info.assert_called_once_with()
mock_get_status.assert_called_with(self.context, instance.uuid,
'running')
mock_abort.assert_called_with(self.context, instance,
migration.id)
mock_set_migr.assert_called_with(migration, 'error')
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def _test_init_instance_sets_building_error(self, vm_state,
task_state=None):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_state,
host=self.compute.host,
task_state=task_state)
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_error(self):
self._test_init_instance_sets_building_error(vm_states.BUILDING)
def test_init_instance_sets_rebuilding_errors(self):
tasks = [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]
vms = [vm_states.ACTIVE, vm_states.STOPPED]
for vm_state in vms:
for task_state in tasks:
self._test_init_instance_sets_building_error(
vm_state, task_state)
def _test_init_instance_sets_building_tasks_error(self, instance):
instance.host = self.compute.host
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_tasks_error_scheduling(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=None,
task_state=task_states.SCHEDULING)
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_block_device(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = None
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_networking(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = None
instance.task_state = task_states.NETWORKING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_spawning(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = None
instance.task_state = task_states.SPAWNING
self._test_init_instance_sets_building_tasks_error(instance)
def _test_init_instance_cleans_image_states(self, instance):
with mock.patch.object(instance, 'save') as save:
self.compute._get_power_state = mock.Mock()
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.host = self.compute.host
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def _test_init_instance_cleans_task_states(self, powerstate, state,
mock_get_uuid, mock_get_power_state):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.vm_state = vm_states.ACTIVE
instance.task_state = state
instance.host = self.compute.host
mock_get_power_state.return_value = powerstate
self.compute._init_instance(self.context, instance)
return instance
def test_init_instance_cleans_image_state_pending_upload(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_PENDING_UPLOAD
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_uploading(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_UPLOADING
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
self._test_init_instance_cleans_image_states(instance)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_pausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.PAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_unpausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.UNPAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager.unpause_instance')
def test_init_instance_cleans_paused_unpausing(self, mock_unpause):
def fake_unpause(context, instance):
instance.task_state = None
mock_unpause.side_effect = fake_unpause
instance = self._test_init_instance_cleans_task_states(
power_state.PAUSED, task_states.UNPAUSING)
mock_unpause.assert_called_once_with(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_init_instance_deletes_error_deleting_instance(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id=fakes.FAKE_PROJECT_ID,
uuid=uuids.instance,
vcpus=1,
memory_mb=64,
vm_state=vm_states.ERROR,
host=self.compute.host,
task_state=task_states.DELETING)
bdms = []
with test.nested(
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=bdms),
mock.patch.object(self.compute, '_delete_instance'),
mock.patch.object(instance, 'obj_load_attr')
) as (mock_get, mock_delete, mock_load):
self.compute._init_instance(self.context, instance)
mock_get.assert_called_once_with(self.context, instance.uuid)
mock_delete.assert_called_once_with(self.context, instance,
bdms)
def test_init_instance_resize_prep(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.RESIZE_PREP,
power_state=power_state.RUNNING)
with test.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(objects.Instance, 'get_network_info'),
mock.patch.object(instance, 'save', autospec=True)
) as (mock_get_power_state, mock_nw_info, mock_instance_save):
self.compute._init_instance(self.context, instance)
mock_instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
@mock.patch('nova.virt.fake.FakeDriver.power_off')
@mock.patch.object(compute_utils, 'get_value_from_system_metadata',
return_value=CONF.shutdown_timeout)
def test_power_off_values(self, mock_get_metadata, mock_power_off):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
self.compute._power_off_instance(instance, clean_shutdown=True)
mock_power_off.assert_called_once_with(
instance,
CONF.shutdown_timeout,
20)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
@mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch(
'nova.compute.manager.ComputeManager._notify_about_instance_usage')
def test_shutdown_instance_versioned_notifications(self,
mock_notify_unversioned, mock_notify, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ERROR,
task_state=task_states.DELETING)
bdms = [mock.Mock(id=1, is_volume=True)]
self.compute._shutdown_instance(self.context, instance, bdms,
notify=True, try_deallocate_networks=False)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shutdown', phase='start', bdms=bdms),
mock.call(self.context, instance, 'fake-mini',
action='shutdown', phase='end', bdms=bdms)])
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
@mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
def _test_shutdown_instance_exception(self, exc, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
mock_connector.side_effect = exc
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ERROR,
task_state=task_states.DELETING)
bdms = [mock.Mock(id=1, is_volume=True, attachment_id=None)]
self.compute._shutdown_instance(self.context, instance, bdms,
notify=False, try_deallocate_networks=False)
mock_connector.assert_called_once_with(instance)
def test_shutdown_instance_endpoint_not_found(self):
exc = cinder_exception.EndpointNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_client_exception(self):
exc = cinder_exception.ClientException(code=9001)
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_volume_not_found(self):
exc = exception.VolumeNotFound(volume_id=42)
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_disk_not_found(self):
exc = exception.DiskNotFound(location="not\\here")
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_other_exception(self):
exc = Exception('some other exception')
self._test_shutdown_instance_exception(exc)
def _test_init_instance_retries_reboot(self, instance, reboot_type,
return_power_state):
instance.host = self.compute.host
with test.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=return_power_state),
mock.patch.object(self.compute, 'reboot_instance'),
mock.patch.object(objects.Instance, 'get_network_info')
) as (
_get_power_state,
reboot_instance,
get_network_info
):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, block_device_info=None,
reboot_type=reboot_type)
reboot_instance.assert_has_calls([call])
def test_init_instance_retries_reboot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_SOFT_REBOOT:
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'SOFT',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_hard(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_PENDING_HARD
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_soft_became_hard(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
with mock.patch.object(instance, 'save'):
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.SHUTDOWN)
self.assertEqual(task_states.REBOOT_PENDING_HARD,
instance.task_state)
def test_init_instance_retries_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
with mock.patch.object(instance, 'save'):
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def test_init_instance_retries_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def _test_init_instance_cleans_reboot_state(self, instance):
instance.host = self.compute.host
with test.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save', autospec=True),
mock.patch.object(objects.Instance, 'get_network_info')
) as (
_get_power_state,
instance_save,
get_network_info
):
self.compute._init_instance(self.context, instance)
instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def test_init_instance_cleans_image_state_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_cleans_image_state_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_retries_power_off(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
def test_init_instance_retries_power_on(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
def test_init_instance_retries_power_on_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_init_instance_retries_power_off_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_get_power_state(self):
instance = objects.Instance(self.context)
instance.uuid = uuids.instance
instance.id = 1
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.host = self.compute.host
with mock.patch.object(self.compute.driver, 'get_info') as mock_info:
mock_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
res = self.compute._get_power_state(instance)
mock_info.assert_called_once_with(instance, use_cache=False)
self.assertEqual(res, power_state.SHUTDOWN)
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_get_instances_on_driver(self, mock_instance_list):
driver_instances = []
for x in range(10):
driver_instances.append(fake_instance.fake_db_instance())
def _make_instance_list(db_list):
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
driver_uuids = [inst['uuid'] for inst in driver_instances]
mock_instance_list.return_value = _make_instance_list(driver_instances)
with mock.patch.object(self.compute.driver,
'list_instance_uuids') as mock_driver_uuids:
mock_driver_uuids.return_value = driver_uuids
result = self.compute._get_instances_on_driver(self.context)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
expected_filters = {'uuid': driver_uuids}
mock_instance_list.assert_called_with(self.context, expected_filters,
use_slave=True)
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_get_instances_on_driver_empty(self, mock_instance_list):
with mock.patch.object(self.compute.driver,
'list_instance_uuids') as mock_driver_uuids:
mock_driver_uuids.return_value = []
result = self.compute._get_instances_on_driver(self.context)
# Short circuit DB call, get_by_filters should not be called
self.assertEqual(0, mock_instance_list.call_count)
self.assertEqual(1, mock_driver_uuids.call_count)
self.assertEqual([], [x['uuid'] for x in result])
@mock.patch('nova.objects.InstanceList.get_by_filters')
def test_get_instances_on_driver_fallback(self, mock_instance_list):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
filters = {}
self.flags(instance_name_template='inst-%i')
all_instances = []
driver_instances = []
for x in range(10):
instance = fake_instance.fake_db_instance(name='inst-%i' % x,
id=x)
if x % 2:
driver_instances.append(instance)
all_instances.append(instance)
def _make_instance_list(db_list):
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
driver_instance_names = [inst['name'] for inst in driver_instances]
mock_instance_list.return_value = _make_instance_list(all_instances)
with test.nested(
mock.patch.object(self.compute.driver, 'list_instance_uuids'),
mock.patch.object(self.compute.driver, 'list_instances')
) as (
mock_driver_uuids,
mock_driver_instances
):
mock_driver_uuids.side_effect = NotImplementedError()
mock_driver_instances.return_value = driver_instance_names
result = self.compute._get_instances_on_driver(self.context,
filters)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
expected_filters = {'host': self.compute.host}
mock_instance_list.assert_called_with(self.context, expected_filters,
use_slave=True)
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(objects.TaskLog, 'end_task')
@mock.patch.object(objects.TaskLog, 'begin_task')
@mock.patch.object(objects.InstanceList, 'get_active_by_window_joined')
@mock.patch.object(objects.TaskLog, 'get')
def test_instance_usage_audit(self, mock_get, mock_get_active, mock_begin,
mock_end, mock_notify):
instances = [objects.Instance(uuid=uuids.instance)]
def fake_task_log(*a, **k):