OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

14039 lines
638 KiB

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute service."""
import datetime
import fixtures as std_fixtures
from itertools import chain
import operator
import sys
from castellan import key_manager
import ddt
import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import testtools
from testtools import matchers as testtools_matchers
import nova
from nova import availability_zones
from nova import block_device
from nova.compute import api as compute
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.console import type as ctype
from nova import context
from nova.db import api as db
from nova import exception
from nova.image import glance as image_api
from nova.network import model as network_model
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import fields as obj_fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.policies import base as base_policy
from nova.policies import servers as servers_policy
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
from nova.tests.fixtures import cyborg as cyborg_fixture
from nova.tests.unit.compute import eventlet_utils
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_diagnostics
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_server_actions
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_diagnostics
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_instance_numa
from nova.tests.unit.objects import test_migration
from nova.tests.unit import utils as test_utils
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt.block_device import DriverVolumeBlockDevice as driver_bdm_volume
from nova.virt import event
from nova.virt import fake
from nova.virt import hardware
from nova.volume import cinder
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
FAKE_IMAGE_REF = uuids.image_ref
NODENAME = 'fakenode1'
NODENAME2 = 'fakenode2'
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
def get_primitive_instance_by_uuid(context, instance_uuid):
"""Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in instance.items():
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeComputeTaskAPI(object):
def resize_instance(self, ctxt, instance, scheduler_hint, flavor,
reservations=None, clean_shutdown=True,
request_spec=None, host_list=None):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.notifier = self.useFixture(fixtures.NotificationFixture(self))
self.compute = compute_manager.ComputeManager()
# NOTE(gibi): this is a hack to make the fake virt driver use the nodes
# needed for these tests.
self.compute.driver._set_nodes([NODENAME, NODENAME2])
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.rt = fake_rt
def fake_get_compute_nodes_in_db(self, context, *args, **kwargs):
fake_compute_nodes = [{'local_gb': 259,
'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'mapped': 1,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
'numa_topology': '',
'id': 2,
'host': 'fake_phyp1',
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
'disk_allocation_ratio': 1.0,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
for cn in fake_compute_nodes]
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
self.stub_out(
'nova.compute.manager.ComputeManager._get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
self.stub_out('nova.db.api.compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': uuids.kernel_id,
'ramdisk_id': uuids.ramdisk_id,
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
self.useFixture(fixtures.GlanceFixture(self))
self.stub_out('nova.tests.fixtures.GlanceFixture.show', fake_show)
fake_network.set_stub_network_methods(self)
fake_server_actions.stub_out_action_events(self)
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
return network_model.NetworkInfo()
self.stub_out('nova.network.neutron.API.get_instance_nw_info',
fake_get_nw_info)
self.stub_out('nova.network.neutron.API.migrate_instance_start',
lambda *args, **kwargs: None)
self.useFixture(fixtures.NeutronFixture(self))
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute.rt
self.mock_get_allocations = self.useFixture(
fixtures.fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'get_allocations_for_consumer')).mock
self.mock_get_allocations.return_value = {}
self.mock_get_allocs = self.useFixture(
fixtures.fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'get_allocs_for_consumer')).mock
self.mock_get_allocs.return_value = {'allocations': {}}
def tearDown(self):
ctxt = context.get_admin_context()
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
super(BaseTestCase, self).tearDown()
def _fake_instance(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
services=False, ctxt=None):
ctxt = ctxt or self.context
flavor = objects.Flavor.get_by_name(ctxt, type_name)
inst = objects.Instance(context=ctxt)
inst.vm_state = vm_states.ACTIVE
inst.task_state = None
inst.power_state = power_state.RUNNING
inst.image_ref = FAKE_IMAGE_REF
inst.reservation_id = 'r-fakeres'
inst.user_id = self.user_id
inst.project_id = self.project_id
inst.host = self.compute.host
inst.node = NODENAME
inst.instance_type_id = flavor.id
inst.ami_launch_index = 0
inst.memory_mb = 0
inst.vcpus = 0
inst.root_gb = 0
inst.ephemeral_gb = 0
inst.architecture = obj_fields.Architecture.X86_64
inst.os_type = 'Linux'
inst.system_metadata = (
params and params.get('system_metadata', {}) or {})
inst.info_cache = objects.InstanceInfoCache()
inst.info_cache.network_info = network_model.NetworkInfo()
inst.locked = False
inst.created_at = timeutils.utcnow()
inst.updated_at = timeutils.utcnow()
inst.launched_at = timeutils.utcnow()
inst.security_groups = objects.SecurityGroupList(objects=[])
inst.keypairs = objects.KeyPairList(objects=[])
inst.flavor = flavor
inst.old_flavor = None
inst.new_flavor = None
if params:
inst.flavor.update(params.pop('flavor', {}))
inst.update(params)
if services:
_create_service_entries(self.context.elevated(),
[['fake_zone', [inst.host]]])
cell1 = self.cell_mappings[test.CELL1_NAME]
with context.target_cell(ctxt, cell1) as cctxt:
inst._context = cctxt
inst.create()
# Create an instance mapping in cell1 so the API can get the instance.
inst_map = objects.InstanceMapping(
ctxt,
instance_uuid=inst.uuid,
project_id=inst.project_id,
cell_mapping=cell1)
inst_map.create()
return inst
def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
if not aggr:
aggr = self.api.create_aggregate(self.context, aggr_name, zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr.id, host)
return aggr
class ComputeVolumeTestCase(BaseTestCase):
def setUp(self):
super(ComputeVolumeTestCase, self).setUp()
self.fetched_attempts = 0
self.instance = {
'id': 'fake',
'uuid': uuids.instance,
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb',
'connection_info': jsonutils.dumps({})})
self.instance_object = objects.Instance._from_db_object(
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stub_out('nova.volume.cinder.API.get', lambda *a, **kw:
{'id': uuids.volume_id, 'size': 4,
'attach_status': 'detached'})
self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',
lambda *a, **kw: None)
self.stub_out('nova.volume.cinder.API.initialize_connection',
lambda *a, **kw: {})
self.stub_out('nova.volume.cinder.API.terminate_connection',
lambda *a, **kw: None)
self.stub_out('nova.volume.cinder.API.attach',
lambda *a, **kw: None)
self.stub_out('nova.volume.cinder.API.detach',
lambda *a, **kw: None)
self.stub_out('nova.volume.cinder.API.check_availability_zone',
lambda *a, **kw: None)
self.stub_out('eventlet.greenthread.sleep',
lambda *a, **kw: None)
def store_cinfo(context, *args, **kwargs):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
self.stub_out('nova.db.api.block_device_mapping_create', store_cinfo)
self.stub_out('nova.db.api.block_device_mapping_update', store_cinfo)
@mock.patch.object(compute_utils, 'EventReporter')
def test_attach_volume_serial(self, mock_event):
fake_bdm = objects.BlockDeviceMapping(context=self.context,
**self.fake_volume)
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
return_value={})):
instance = self._create_fake_instance_obj()
self.compute.attach_volume(self.context, instance, bdm=fake_bdm)
self.assertEqual(self.cinfo.get('serial'), uuids.volume_id)
mock_event.assert_called_once_with(
self.context, 'compute_attach_volume', CONF.host,
instance.uuid, graceful_exit=False)
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.notify_about_volume_attach_detach')
def test_attach_volume_raises(self, mock_notify, mock_elevate,
mock_event):
mock_elevate.return_value = self.context
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
expected_exception = test.TestingException()
def fake_attach(*args, **kwargs):
raise expected_exception
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'unreserve_volume'),
mock.patch.object(objects.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_unreserve, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, instance, fake_bdm)
self.assertTrue(mock_unreserve.called)
self.assertTrue(mock_destroy.called)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='start',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='error',
volume_id=uuids.volume_id,
exception=expected_exception),
])
mock_event.assert_called_once_with(
self.context, 'compute_attach_volume', CONF.host,
instance.uuid, graceful_exit=False)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.notify_about_volume_attach_detach')
def test_attach_volume_raises_new_flow(self, mock_notify, mock_elevate):
mock_elevate.return_value = self.context
attachment_id = uuids.attachment_id
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
fake_bdm.attachment_id = attachment_id
instance = self._create_fake_instance_obj()
expected_exception = test.TestingException()
def fake_attach(*args, **kwargs):
raise expected_exception
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'attachment_delete'),
mock.patch.object(objects.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_attachment_delete, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, instance, fake_bdm)
self.assertTrue(mock_attachment_delete.called)
self.assertTrue(mock_destroy.called)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='start',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='error',
volume_id=uuids.volume_id,
exception=expected_exception),
])
@mock.patch.object(compute_manager.LOG, 'debug')
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.notify_about_volume_attach_detach')
def test_attach_volume_ignore_VolumeAttachmentNotFound(
self, mock_notify, mock_elevate, mock_event, mock_debug_log):
"""Tests the scenario that the DriverVolumeBlockDevice.attach flow
already deleted the volume attachment before the
ComputeManager.attach_volume flow tries to rollback the attachment
record and delete it, which raises VolumeAttachmentNotFound and is
ignored.
"""
mock_elevate.return_value = self.context
attachment_id = uuids.attachment_id
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
fake_bdm.attachment_id = attachment_id
instance = self._create_fake_instance_obj()
expected_exception = test.TestingException()
def fake_attach(*args, **kwargs):
raise expected_exception
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'attachment_delete'),
mock.patch.object(objects.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_attach_delete, mock_destroy):
mock_attach.side_effect = fake_attach
mock_attach_delete.side_effect = \
exception.VolumeAttachmentNotFound(
attachment_id=attachment_id)
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, instance, fake_bdm)
mock_destroy.assert_called_once_with()
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='start',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='error',
volume_id=uuids.volume_id,
exception=expected_exception),
])
mock_event.assert_called_once_with(
self.context, 'compute_attach_volume', CONF.host,
instance.uuid, graceful_exit=False)
self.assertIsInstance(mock_debug_log.call_args[0][1],
exception.VolumeAttachmentNotFound)
@mock.patch.object(compute_utils, 'EventReporter')
def test_detach_volume_api_raises(self, mock_event):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
with test.nested(
mock.patch.object(driver_bdm_volume, 'driver_detach'),
mock.patch.object(self.compute.volume_api, 'detach'),
mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance'),
mock.patch.object(fake_bdm, 'destroy')
) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
mock_detach.side_effect = test.TestingException
mock_get.return_value = fake_bdm
self.assertRaises(
test.TestingException, self.compute.detach_volume,
self.context, uuids.volume, instance, 'fake_id')
self.assertFalse(mock_destroy.called)
mock_event.assert_called_once_with(
self.context, 'compute_detach_volume', CONF.host,
instance.uuid, graceful_exit=False)
@mock.patch.object(compute_utils, 'EventReporter')
def test_detach_volume_bdm_destroyed(self, mock_event):
# Assert that the BDM is destroyed given a successful call to detach
# the volume from the instance in Cinder.
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
with test.nested(
mock.patch.object(driver_bdm_volume, 'driver_detach'),
mock.patch.object(self.compute.volume_api, 'detach'),
mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance'),
mock.patch.object(fake_bdm, 'destroy')
) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
mock_get.return_value = fake_bdm
self.compute.detach_volume(self.context, uuids.volume_id, instance,
uuids.attachment_id)
mock_detach.assert_called_once_with(mock.ANY, uuids.volume_id,
instance.uuid,
uuids.attachment_id)
self.assertTrue(mock_destroy.called)
mock_event.assert_called_once_with(
self.context, 'compute_detach_volume', CONF.host,
instance.uuid, graceful_exit=False)
def test_await_block_device_created_too_slow(self):
self.flags(block_device_allocate_retries=2)
self.flags(block_device_allocate_retries_interval=0.1)
def never_get(self, context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
self.stub_out('nova.volume.cinder.API.get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1')
def test_await_block_device_created_failed(self):
c = self.compute
fake_result = {'status': 'error', 'id': 'blah'}
with mock.patch.object(c.volume_api, 'get',
return_value=fake_result) as fake_get:
self.assertRaises(exception.VolumeNotCreated,
c._await_block_device_map_created,
self.context, '1')
fake_get.assert_called_once_with(self.context, '1')
def test_await_block_device_created_slow(self):
c = self.compute
self.flags(block_device_allocate_retries=4)
self.flags(block_device_allocate_retries_interval=0.1)
def slow_get(cls, context, vol_id):
if self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
'status': 'creating',
'id': 'blah',
}
return {
'status': 'available',
'id': 'blah',
}
self.stub_out('nova.volume.cinder.API.get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(attempts, 3)
def test_await_block_device_created_retries_zero(self):
c = self.compute
self.flags(block_device_allocate_retries=0)
self.flags(block_device_allocate_retries_interval=0.1)
def volume_get(self, context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
self.stub_out('nova.volume.cinder.API.get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_boot_volume_serial(self):
with (
mock.patch.object(objects.BlockDeviceMapping, 'save')
) as mock_save:
block_device_mapping = [
block_device.BlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': uuids.volume_id,
'device_name': '/dev/vdb',
'volume_size': 55,
'delete_on_termination': False,
'attachment_id': None,
})]
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, block_device_mapping)
prepped_bdm = self.compute._prep_block_device(
self.context, self.instance_object, bdms)
self.assertEqual(2, mock_save.call_count)
volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
self.assertEqual(volume_driver_bdm['connection_info']['serial'],
uuids.volume_id)
def test_boot_volume_metadata(self, metadata=True):
def volume_api_get(*args, **kwargs):
if metadata:
return {
'size': 1,
'volume_image_metadata': {'vol_test_key': 'vol_test_value',
'min_ram': u'128',
'min_disk': u'256',
'size': u'536870912'
},
}
else:
return {}
self.stub_out('nova.volume.cinder.API.get', volume_api_get)
expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': uuids.volume_id,
'delete_on_termination': False,
}]
image_meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
# Test it with new-style BDMs
block_device_mapping = [{
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': uuids.volume_id,
'delete_on_termination': False,
}]
image_meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping,
legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
def test_boot_volume_no_metadata(self):
self.test_boot_volume_metadata(metadata=False)
def test_boot_image_metadata(self, metadata=True):
def image_api_get(*args, **kwargs):
if metadata:
return {
'properties': {'img_test_key': 'img_test_value'}
}
else:
return {}
self.stub_out('nova.image.glance.API.get', image_api_get)
block_device_mapping = [{
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': uuids.image,
'delete_on_termination': True,
}]
image_meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping,
legacy_bdm=False)
if metadata:
self.assertEqual('img_test_value',
image_meta['properties']['img_test_key'])
else:
self.assertEqual(image_meta, {})
def test_boot_image_no_metadata(self):
self.test_boot_image_metadata(metadata=False)
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
fake_instance = mock.Mock(uuid=uuids.volume_instance)
mock_get_by_host.return_value = [fake_instance]
volume_bdm = mock.Mock(id=1, is_volume=True)
not_volume_bdm = mock.Mock(id=2, is_volume=False)
mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
expected_host_bdms = [{'instance': fake_instance,
'instance_bdms': [volume_bdm]}]
got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
mock_get_by_host.assert_called_once_with('fake-context',
self.compute.host,
use_slave=False)
mock_get_by_inst.assert_called_once_with('fake-context',
uuids.volume_instance,
use_slave=False)
self.assertEqual(expected_host_bdms, got_host_bdms)
@mock.patch.object(utils, 'last_completed_audit_period')
@mock.patch.object(compute_manager.ComputeManager, '_get_host_volume_bdms')
def test_poll_volume_usage_disabled(self, mock_get, mock_last):
# None of the mocks should be called.
ctxt = 'MockContext'
self.flags(volume_usage_poll_interval=0)
self.compute._poll_volume_usage(ctxt)
self.assertFalse(mock_get.called)
self.assertFalse(mock_last.called)
@mock.patch.object(compute_manager.ComputeManager, '_get_host_volume_bdms')
@mock.patch.object(fake.FakeDriver, 'get_all_volume_usage')
def test_poll_volume_usage_returns_no_vols(self, mock_get_usage,
mock_get_bdms):
ctxt = 'MockContext'
# Following methods are called.
mock_get_bdms.return_value = []
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(ctxt)
mock_get_bdms.assert_called_once_with(ctxt, use_slave=True)
@mock.patch.object(compute_utils, 'notify_about_volume_usage')
@mock.patch.object(compute_manager.ComputeManager, '_get_host_volume_bdms')
@mock.patch.object(fake.FakeDriver, 'get_all_volume_usage')
def test_poll_volume_usage_with_data(self, mock_get_usage, mock_get_bdms,
mock_notify):
# All the mocks are called
mock_get_bdms.return_value = [1, 2]
mock_get_usage.return_value = [
{'volume': uuids.volume,
'instance': self.instance_object,
'rd_req': 100,
'rd_bytes': 500,
'wr_req': 25,
'wr_bytes': 75}]
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(self.context)
mock_get_bdms.assert_called_once_with(self.context, use_slave=True)
mock_notify.assert_called_once_with(
self.context, test.MatchType(objects.VolumeUsage),
self.compute.host)
@mock.patch.object(compute_utils, 'notify_about_volume_usage')
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.notify_about_volume_attach_detach')
@mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance')
@mock.patch.object(fake.FakeDriver, 'block_stats')
@mock.patch.object(compute_manager.ComputeManager, '_get_host_volume_bdms')
@mock.patch.object(fake.FakeDriver, 'get_all_volume_usage')
@mock.patch.object(fake.FakeDriver, 'instance_exists')
def test_detach_volume_usage(self, mock_exists, mock_get_all,
mock_get_bdms, mock_stats, mock_get,
mock_notify, mock_elevate, mock_notify_usage):
mock_elevate.return_value = self.context
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance_obj()
bdm = objects.BlockDeviceMapping(context=self.context,
id=1, device_name='/dev/vdb',
connection_info='{}',
instance_uuid=instance['uuid'],
source_type='volume',
destination_type='volume',
no_device=False,
disk_bus='foo',
device_type='disk',
volume_size=1,
volume_id=uuids.volume_id,
attachment_id=None)
host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'volume_id': uuids.volume_id}
mock_get.return_value = bdm.obj_clone()
mock_stats.return_value = [1, 30, 1, 20, None]
mock_get_bdms.return_value = host_volume_bdms
mock_get_all.return_value = [{'volume': uuids.volume_id,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}]
mock_exists.return_value = True
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
self.compute.attach_volume(self.context, instance, bdm)
# Poll volume usage & then detach the volume. This will update the
# total fields in the volume usage cache.
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(self.context)
# Check that a volume.usage and volume.attach notification was sent
self.assertEqual(2, len(self.notifier.notifications))
self.compute.detach_volume(self.context, uuids.volume_id, instance,
'attach-id')
# Check that volume.attach, 2 volume.usage, and volume.detach
# notifications were sent
self.assertEqual(4, len(self.notifier.notifications))
msg = self.notifier.notifications[0]
self.assertEqual('compute.instance.volume.attach', msg.event_type)
msg = self.notifier.notifications[2]
self.assertEqual('volume.usage', msg.event_type)
payload = msg.payload
self.assertEqual(instance['uuid'], payload['instance_id'])
self.assertEqual('fake', payload['user_id'])
self.assertEqual('fake', payload['tenant_id'])
self.assertEqual(1, payload['reads'])
self.assertEqual(30, payload['read_bytes'])
self.assertEqual(1, payload['writes'])
self.assertEqual(20, payload['write_bytes'])
self.assertIsNone(payload['availability_zone'])
msg = self.notifier.notifications[3]
self.assertEqual('compute.instance.volume.detach', msg.event_type)
mock_notify_usage.assert_has_calls([
mock.call(self.context, test.MatchType(objects.VolumeUsage),
self.compute.host),
mock.call(self.context, test.MatchType(objects.VolumeUsage),
self.compute.host)])
self.assertEqual(2, mock_notify_usage.call_count)
# Check the database for the
volume_usages = db.vol_get_usage_by_time(self.context, 0)
self.assertEqual(1, len(volume_usages))
volume_usage = volume_usages[0]
self.assertEqual(0, volume_usage['curr_reads'])
self.assertEqual(0, volume_usage['curr_read_bytes'])
self.assertEqual(0, volume_usage['curr_writes'])
self.assertEqual(0, volume_usage['curr_write_bytes'])
self.assertEqual(1, volume_usage['tot_reads'])
self.assertEqual(30, volume_usage['tot_read_bytes'])
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='start',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_attach', phase='end',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_detach', phase='start',
volume_id=uuids.volume_id),
mock.call(self.context, instance, 'fake-mini',
action='volume_detach', phase='end',
volume_id=uuids.volume_id),
])
mock_get.assert_called_once_with(self.context, uuids.volume_id,
instance.uuid)
mock_stats.assert_called_once_with(instance, 'vdb')
mock_get_bdms.assert_called_once_with(self.context, use_slave=True)
mock_get_all(self.context, host_volume_bdms)
mock_exists.assert_called_once_with(mock.ANY)
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, mappings)
expected_result = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': swap_size
},
{
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
},
{
'device_name': '/dev/sdc2',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
}
]
for expected, got in zip(expected_result, preped_bdm):
self.assertThat(expected, matchers.IsSubDictOf(got))
def test_validate_bdm(self):
def fake_get(self, context, res_id):
return {'id': res_id, 'size': 4, 'multiattach': False}
def fake_attachment_create(_self, ctxt, vol_id, *args, **kwargs):
# Return a unique attachment id per volume.
return {'id': getattr(uuids, vol_id)}
self.stub_out('nova.volume.cinder.API.get', fake_get)
self.stub_out('nova.volume.cinder.API.get_snapshot', fake_get)
self.stub_out('nova.volume.cinder.API.attachment_create',
fake_attachment_create)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance_obj()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': 1,
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id,
'device_type': 'disk',
'guest_format': None,
'volume_size': 6,
'boot_index': 0,
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': 2,
'volume_size': 1
}, anon=True)
]
mappings = block_device_obj.block_device_make_list_from_dicts(
self.context, mappings)
# Make sure it passes at first
volumes = {
volume_id: fake_get(None, None, volume_id)
}
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings, {},
volumes)
self.assertEqual(4, mappings[1].volume_size)
self.assertEqual(6, mappings[2].volume_size)
# Boot sequence
mappings[2].boot_index = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings, {}, volumes)
mappings[2].boot_index = 0
# number of local block_devices
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings, {}, volumes)
ephemerals = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}, anon=True)
]
ephemerals = block_device_obj.block_device_make_list_from_dicts(
self.context, ephemerals)
self.flags(max_local_block_devices=4)
# More ephemerals are OK as long as they are not over the size limit
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings_, {},
volumes)
# Ephemerals over the size limit
ephemerals[0].volume_size = 3
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_, {}, volumes)
# Swap over the size limit
mappings[0].volume_size = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings, {}, volumes)
mappings[0].volume_size = 1
additional_swap = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}, anon=True)
]
additional_swap = block_device_obj.block_device_make_list_from_dicts(
self.context, additional_swap)
# More than one swap
mappings_ = mappings[:]
mappings_.objects.extend(additional_swap)
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_, {}, volumes)
image_no_size = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda4',
'source_type': 'image',
'image_id': image_id,
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}, anon=True)
]
image_no_size = block_device_obj.block_device_make_list_from_dicts(
self.context, image_no_size)
mappings_ = mappings[:]
mappings_.objects.extend(image_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_, {}, volumes)
# blank device without a specified size fails
blank_no_size = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda4',
'source_type': 'blank',
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}, anon=True)
]
blank_no_size = block_device_obj.block_device_make_list_from_dicts(
self.context, blank_no_size)
mappings_ = mappings[:]
mappings_.objects.extend(blank_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_, {}, volumes)
def test_validate_bdm_with_more_than_one_default(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_size': 1,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_size': None,
'boot_index': -1}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_size': None,
'boot_index': -1}, anon=True)]
all_mappings = block_device_obj.block_device_make_list_from_dicts(
self.context, all_mappings)
image_cache = volumes = {}
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings, image_cache, volumes)
@mock.patch.object(cinder.API, 'attachment_create',
side_effect=exception.InvalidVolume(reason='error'))
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create):
volume_id = uuids.volume_id
instance_type = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}, anon=True)]
bdms = block_device_obj.block_device_make_list_from_dicts(self.context,
bdms)
# We test a list of invalid status values that should result
# in an InvalidVolume exception being raised.
status_values = (
# First two check that the status is 'available'.
('creating', 'detached'),
('error', 'detached'),
# Checks that the attach_status is 'detached'.
('available', 'attached')
)
for status, attach_status in status_values:
if attach_status == 'attached':
volumes = {volume_id: {'id': volume_id,
'status': status,
'attach_status': attach_status,
'multiattach': False,
'attachments': {}}}
else:
volumes = {volume_id: {'id': volume_id,
'status': status,
'attach_status': attach_status,
'multiattach': False}}
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance_object,
instance_type, bdms, {}, volumes)
@mock.patch.object(cinder.API, 'check_availability_zone')
@mock.patch.object(cinder.API, 'attachment_create',
return_value={'id': uuids.attachment_id})
def test_validate_bdm_media_service_valid(self, mock_att_create,
mock_check_av_zone):
volume_id = uuids.volume_id
instance_type = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}, anon=True)]
bdms = block_device_obj.block_device_make_list_from_dicts(self.context,
bdms)
volume = {'id': volume_id,
'status': 'available',
'attach_status': 'detached',
'multiattach': False}
image_cache = {}
volumes = {volume_id: volume}
self.compute_api._validate_bdm(self.context, self.instance_object,
instance_type, bdms, image_cache,
volumes)
mock_check_av_zone.assert_not_called()
mock_att_create.assert_called_once_with(
self.context, volume_id, self.instance_object.uuid)
def test_volume_snapshot_create(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
@mock.patch.object(compute_manager.LOG, 'debug')
def test_volume_snapshot_create_instance_not_running(self, mock_debug):
with mock.patch.object(self.compute.driver,
'volume_snapshot_create') as mock_create:
exc = exception.InstanceNotRunning(instance_id=uuids.instance)
mock_create.side_effect = exc
self.compute.volume_snapshot_create(self.context,
self.instance_object, uuids.volume, {})
mock_debug.assert_called_once_with('Instance disappeared during '
'volume snapshot create', instance=self.instance_object)
def test_volume_snapshot_delete(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, uuids.volume, uuids.snapshot, {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, uuids.volume, uuids.snapshot, {})
@mock.patch.object(compute_manager.LOG, 'debug')
def test_volume_snapshot_delete_instance_not_running(self, mock_debug):
with mock.patch.object(self.compute.driver,
'volume_snapshot_delete') as mock_delete:
exc = exception.InstanceNotRunning(instance_id=uuids.instance)
mock_delete.side_effect = exc
self.compute.volume_snapshot_delete(self.context,
self.instance_object, uuids.volume, uuids.snapshot, {})
mock_debug.assert_called_once_with('Instance disappeared during '
'volume snapshot delete', instance=self.instance_object)
@mock.patch.object(cinder.API, 'create',
side_effect=exception.OverQuota(overs='something'))
def test_prep_block_device_over_quota_failure(self, mock_create):
instance = self._create_fake_instance_obj()
bdms = [
block_device.BlockDeviceDict({
'boot_index': 0,
'guest_format': None,
'connection_info': None,
'device_type': u'disk',
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 1,
'image_id': 1,
'device_name': '/dev/vdb',
})]
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, bdms)
self.assertRaises(exception.OverQuota,
compute_manager.ComputeManager()._prep_block_device,
self.context, instance, bdms)
self.assertTrue(mock_create.called)
@mock.patch.object(nova.virt.block_device, 'get_swap')
@mock.patch.object(nova.virt.block_device, 'convert_blanks')
@mock.patch.object(nova.virt.block_device, 'convert_images')
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
convert_swap, convert_ephemerals,
convert_volumes, convert_snapshots,
convert_images, convert_blanks,
get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': uuids.block_device_instance,
'source_type': 'image',
'destination_type': 'volume',
'image_id': uuids.image,
'volume_size': 1,
'boot_index': 0}))
blank_volume1 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': uuids.block_device_instance,
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 1}))
blank_volume2 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': uuids.block_device_instance,
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 2}))
bdms = [blank_volume1, blank_volume2, root_volume]
def fake_attach_block_devices(bdm, *args, **kwargs):
return bdm
convert_swap.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
convert_images.return_value = [root_volume]
convert_blanks.return_value = []
attach_block_devices.side_effect = fake_attach_block_devices
get_swap.return_value = []
expected_block_device_info = {
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
'block_device_mapping': bdms
}
manager = compute_manager.ComputeManager()
manager.use_legacy_block_device_info = False
mock_bdm_saves = [mock.patch.object(bdm, 'save') for bdm in bdms]
with test.nested(*mock_bdm_saves):
block_device_info = manager._prep_block_device(self.context,
instance, bdms)
for bdm in bdms:
bdm.save.assert_called_once_with()
self.assertIsNotNone(bdm.device_name)
convert_swap.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
convert_snapshots.assert_called_once_with(bdm_args)
convert_images.assert_called_once_with(bdm_args)
convert_blanks.assert_called_once_with(bdm_args)
self.assertEqual(expected_block_device_info, block_device_info)
self.assertEqual(1, attach_block_devices.call_count)
get_swap.assert_called_once_with([])
class ComputeTestCase(BaseTestCase,
test_diagnostics.DiagnosticsComparisonMixin,
fake_resource_tracker.RTMockMixin):
def setUp(self):
# This needs to go before we call setUp because the thread pool
# executor is created in ComputeManager.__init__, which is called
# during setUp.
self.useFixture(fixtures.SynchronousThreadPoolExecutorFixture())
super(ComputeTestCase, self).setUp()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.image_api = image_api.API()
self.default_flavor = objects.Flavor.get_by_name(self.context,
'm1.small')
self.tiny_flavor = objects.Flavor.get_by_name(self.context, 'm1.tiny')
def test_wrap_instance_fault(self):
inst = {"uuid": uuids.instance}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stub_out('nova.compute.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_instance_in_args(self):
inst = {"uuid": uuids.instance}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stub_out('nova.compute.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst = {"uuid": uuids.instance}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stub_out('nova.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst)
self.assertFalse(called['fault_added'])
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [],
block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [],
block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
# Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
params = {"flavor": {"memory_mb": 999999999999}}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, [],
block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(999999999999, cn.memory_mb_used)
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, [], block_device_mapping=[])
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
limits = {'memory_mb': 4096, 'disk_gb': 1000}
params = {"flavor": {"memory_mb": 1024, "root_gb": 128,
"ephemeral_gb": 128}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [], block_device_mapping=[], limits=limits)
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(1024, cn.memory_mb_used)
self.assertEqual(256, cn.local_gb_used)
params = {"flavor": {"memory_mb": 2048, "root_gb": 256,
"ephemeral_gb": 256}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [], block_device_mapping=[], limits=limits)
self.assertEqual(3072, cn.memory_mb_used)
self.assertEqual(768, cn.local_gb_used)
params = {"flavor": {"memory_mb": 8192, "root_gb": 8192,
"ephemeral_gb": 8192}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance,
{}, {}, {}, [], block_device_mapping=[], limits=limits)
# NOTE(danms): Since we no longer claim memory and disk, this should
# complete normally. In reality, this would have been rejected by
# placement/scheduler before the instance got here.
self.assertEqual(11264, cn.memory_mb_used)
self.assertEqual(17152, cn.local_gb_used)
def test_create_multiple_instance_with_neutron_port(self):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
instance_type=self.default_flavor,
image_href=None,
max_count=2,
requested_networks=requested_networks)
def test_create_instance_with_oversubscribed_ram(self):
# Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"flavor": {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}}
instance = self._create_fake_instance_obj(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, [], block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(instance_mb, cn.memory_mb_used)
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# both total_mem_mb and the oversubscribed limit:
params = {"flavor": {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}}
instance = self._create_fake_instance_obj(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.compute.build_and_run_instance(self.context, instance,
{}, {}, filter_properties, [],
block_device_mapping=[])
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"flavor": {"root_gb": instance_gb, "memory_mb": 10}}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, [], block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(instance_gb, cn.local_gb_used)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance_obj({'node': None})
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
# Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self)
instance = self._create_fake_instance_obj()
orig_update = self.compute._instance_update
# Stub out allocate_for_instance to return a fake network_info list of
# VIFs
ipv4_ip = network_model.IP(version=4, address='192.168.1.100')
ipv4_subnet = network_model.Subnet(ips=[ipv4_ip])
ipv6_ip = network_model.IP(version=6, address='2001:db8:0:1::1')
ipv6_subnet = network_model.Subnet(ips=[ipv6_ip])
network = network_model.Network(
label='test1', subnets=[ipv4_subnet, ipv6_subnet])
network_info = [network_model.VIF(network=network)]
allocate_for_inst_mock = mock.Mock(return_value=network_info)
self.compute.network_api.allocate_for_instance = allocate_for_inst_mock
# mock out deallocate_for_instance since we don't need it now
self.compute.network_api.deallocate_for_instance = mock.Mock()
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
def _instance_update(self, ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager._instance_update',
_instance_update)
try:
self.compute.build_and_run_instance(self.context, instance, {},
{}, {}, [], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1::1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = self._create_fake_instance_obj()
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [], block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertFalse(instance['access_ip_v4'])
self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
# check the persistence of the ERROR(scheduling) state.
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance_obj(params=params)
# check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
"""block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, cleaning the task state
"""
def fake(*args, **kwargs):
raise exception.InvalidBDM()
self.stub_out('nova.compute.manager.ComputeManager'
'._prep_block_device', fake)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, image={},
request_spec={}, accel_uuids=[],
block_device_mapping=[],
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
node=None)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
side_effect=exception.OverQuota(overs='volumes'))
def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
"""block device mapping over quota failure test.
Make sure when we're over volume quota according to Cinder client, the
appropriate exception is raised and the instances to ERROR state,
cleaning the task state.
"""
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, accel_uuids=[],
requested_networks=[],
injected_files=None, admin_password=None,
node=None, block_device_mapping=[], image={})
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.assertTrue(mock_prep_block_dev.called)
def test_run_instance_spawn_fail(self):
"""spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, cleaning the task state.
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stub_out('nova.virt.fake.FakeDriver.spawn', fake)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, accel_uuids=[],
requested_networks=[],
injected_files=None, admin_password=None,
block_device_mapping=[], image={}, node=None)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated
"""
instance = self._create_fake_instance_obj()
def fake(*args, **kwargs):
raise exception.InstanceNotFound(instance_id="fake")
with test.nested(
mock.patch.object(self.compute, '_deallocate_network'),
mock.patch.object(self.compute.driver, 'spawn')
) as (mock_deallocate, mock_spawn):
mock_spawn.side_effect = fake
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [],
block_device_mapping=[])
mock_deallocate.assert_called_with(mock.ANY, mock.ANY, None)
self.assertTrue(mock_spawn.called)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
instance = self._create_fake_instance_obj()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = exception.InstanceNotFound(instance_id=1)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [],
block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_run_instance_bails_on_deleting_instance(self):
# Make sure that run_instance() will quickly ignore a deleting instance
instance = self._create_fake_instance_obj()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = exception.UnexpectedDeletingTaskStateError(
instance_uuid=instance['uuid'],
expected={'task_state': 'bar'},
actual={'task_state': 'foo'})
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [],
block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
# check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance_obj(params=params)
self.compute.terminate_instance(self.context, instance, [])
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
# Double check it's not there for admins, either.
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
# Make sure it is possible to run and terminate instance.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance, [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
admin_deleted_context = context.get_admin_context(
read_deleted="only")
instance = db.instance_get_by_uuid(admin_deleted_context,
instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.DELETED)
self.assertIsNone(instance['task_state'])
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def fake_check_availability_zone(*args, **kwargs):
pass
def fake_attachment_create(*args, **kwargs):
return {'id': uuids.attachment_id}
def fake_volume_get(self, context, volume_id):
return {'id': volume_id,
'attach_status': 'attached',
'attachments': {instance.uuid: {
'attachment_id': uuids.attachment_id
}
},
'multiattach': False
}
def fake_terminate_connection(self, context, volume_id, connector):
pass
def fake_detach(self, context, volume_id, instance_uuid):
pass
bdms = []
def fake_rpc_reserve_block_device_name(self, context, instance, device,
volume_id, **kwargs):
bdm = objects.BlockDeviceMapping(
**{'context': context,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': uuids.volume_id,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create()
bdms.append(bdm)
return bdm
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
self.stub_out('nova.volume.cinder.API.check_availability_zone',
fake_check_availability_zone)
self.stub_out('nova.volume.cinder.API.attachment_create',
fake_attachment_create)
self.stub_out('nova.volume.cinder.API.terminate_connection',
fake_terminate_connection)
self.stub_out('nova.volume.cinder.API.detach', fake_detach)
self.stub_out('nova.compute.rpcapi.ComputeAPI.'
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context,
instance, bdms)
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_run_terminate_no_image(self):
"""Make sure instance started without image (from volume)
can be termintad without issues
"""
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
self.compute.terminate_instance(self.context, instance, [])
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance, [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = self._create_fake_instance_obj()
instance['launched_at'] = None
self.assertIsNone(instance['launched_at'])
self.assertIsNone(instance['deleted_at'])
launch = timeutils.utcnow()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instance.refresh()
self.assertGreater(instance['launched_at'].replace(tzinfo=None),
launch)
self.assertIsNone(instance['deleted_at'])
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context, instance, [])
with utils.temporary_mutation(self.context, read_deleted='only'):
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertTrue(instance['launched_at'].replace(
tzinfo=None) < terminate)
self.assertGreater(instance['deleted_at'].replace(
tzinfo=None), terminate)
def test_run_terminate_deallocate_net_failure_sets_error_state(self):
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
self.stub_out('nova.compute.manager.ComputeManager.'
'_deallocate_network', _fake_deallocate_network)
self.assertRaises(test.TestingException,
self.compute.terminate_instance,
self.context, instance, [])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_stop(self, mock_notify):
# Ensure instance can be stopped.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, [], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
mock_notify.assert_has_calls([
mock.call(self.context, inst_obj, 'fake-mini', action='power_off',
phase='start'),
mock.call(self.context, inst_obj, 'fake-mini', action='power_off',
phase='end')])
self.compute.terminate_instance(self.context, instance, [])
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_start(self, mock_notify):
# Ensure instance can be started.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save()
self.compute.start_instance(self.context, instance=inst_obj)
mock_notify.assert_has_calls([
mock.call(self.context, inst_obj, 'fake-mini', action='power_on',
phase='start'),
mock.call(self.context, inst_obj, 'fake-mini', action='power_on',
phase='end')])
self.compute.terminate_instance(self.context, instance, [])
def test_start_shelved_instance(self):
# Ensure shelved instance can be started.
self.deleted_image_id = None
def fake_delete(self_, ctxt, image_id):
self.deleted_image_id = image_id
self.useFixture(fixtures.GlanceFixture(self))
self.stub_out('nova.tests.fixtures.GlanceFixture.delete', fake_delete)
instance = self._create_fake_instance_obj()
image = {'id': uuids.image}
# Adding shelved information to instance system metadata.
shelved_time = timeutils.utcnow().isoformat()
instance.system_metadata['shelved_at'] = shelved_time
instance.system_metadata['shelved_image_id'] = image['id']
instance.system_metadata['shelved_host'] = 'fake-mini'
instance.save()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
[], block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],