OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

11700 lines
514 KiB

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute service."""
import base64
import contextlib
import datetime
import operator
import sys
import time
import traceback
import uuid
from eventlet import greenthread
import mock
from mox3 import mox
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
import testtools
from testtools import matchers as testtools_matchers
import nova
from nova import availability_zones
from nova import block_device
from nova import compute
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
from nova.console import type as ctype
from nova import context
from nova import db
from nova import exception
from nova.image import api as image_api
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova import policy
from nova import quota
from nova.scheduler import client as scheduler_client
from nova import test
from nova.tests import fixtures
from nova.tests.unit.compute import eventlet_utils
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_instance_numa_topology
from nova.tests.unit.objects import test_migration
from nova.tests.unit import utils as test_utils
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import event
from nova.virt import fake
from nova.virt import hardware
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
def get_primitive_instance_by_uuid(context, instance_uuid):
"""Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in six.iteritems(instance):
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeComputeTaskAPI(object):
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME])
self.flags(use_local=True, group='conductor')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
'numa_topology': '',
'id': 2,
'host': 'fake_phyp1',
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
for cn in fake_compute_nodes]
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
self.stubs.Set(db, 'compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
self.none_quotas = objects.Quotas.from_reservations(
self.context, None)
def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_taskapi = FakeComputeTaskAPI()
self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
fake_network.set_stub_network_methods(self.stubs)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
return network_model.NetworkInfo()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
self.assertFalse(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_allocate_for_instance)
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
timeutils.clear_time_override()
ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _fake_instance(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
services=False, context=None):
flavor = flavors.get_flavor_by_name(type_name)
inst = objects.Instance(context=context or self.context)
inst.vm_state = vm_states.ACTIVE
inst.task_state = None
inst.power_state = power_state.RUNNING
inst.image_ref = FAKE_IMAGE_REF
inst.reservation_id = 'r-fakeres'
inst.user_id = self.user_id
inst.project_id = self.project_id
inst.host = 'fake_host'
inst.node = NODENAME
inst.instance_type_id = flavor.id
inst.ami_launch_index = 0
inst.memory_mb = 0
inst.vcpus = 0
inst.root_gb = 0
inst.ephemeral_gb = 0
inst.architecture = arch.X86_64
inst.os_type = 'Linux'
inst.system_metadata = (
params and params.get('system_metadata', {}) or {})
inst.locked = False
inst.created_at = timeutils.utcnow()
inst.updated_at = timeutils.utcnow()
inst.launched_at = timeutils.utcnow()
inst.security_groups = objects.SecurityGroupList(objects=[])
inst.flavor = flavor
inst.old_flavor = None
inst.new_flavor = None
if params:
inst.update(params)
if services:
_create_service_entries(self.context.elevated(),
[['fake_zone', [inst.host]]])
inst.create()
return inst
def _create_instance_type(self, params=None):
"""Create a test instance type."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = 1024
inst['vcpus'] = 1
inst['root_gb'] = 20
inst['ephemeral_gb'] = 10
inst['flavorid'] = '1'
inst['swap'] = 2048
inst['rxtx_factor'] = 1
inst.update(params)
return db.flavor_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _stub_migrate_server(self):
def _fake_migrate_server(*args, **kwargs):
pass
self.stubs.Set(conductor_manager.ComputeTaskManager,
'migrate_server', _fake_migrate_server)
def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
if not aggr:
aggr = self.api.create_aggregate(self.context, aggr_name, zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
return aggr
class ComputeVolumeTestCase(BaseTestCase):
def setUp(self):
super(ComputeVolumeTestCase, self).setUp()
self.volume_id = 'fake'
self.fetched_attempts = 0
self.instance = {
'id': 'fake',
'uuid': 'fake',
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
self.instance_object = objects.Instance._from_db_object(
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
{'id': self.volume_id, 'size': 4,
'attach_status': 'detached'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
lambda *a, **kw: {})
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'attach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'detach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'check_attach',
lambda *a, **kw: None)
self.stubs.Set(greenthread, 'sleep',
lambda *a, **kw: None)
def store_cinfo(context, *args, **kwargs):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
def test_attach_volume_serial(self):
fake_bdm = objects.BlockDeviceMapping(context=self.context,
**self.fake_volume)
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
return_value={})):
instance = self._create_fake_instance_obj()
self.compute.attach_volume(self.context, instance, bdm=fake_bdm)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_attach_volume_raises(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
def fake_attach(*args, **kwargs):
raise test.TestingException
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'unreserve_volume'),
mock.patch.object(objects.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_unreserve, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, instance, fake_bdm)
self.assertTrue(mock_unreserve.called)
self.assertTrue(mock_destroy.called)
def test_detach_volume_api_raises(self):
fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance_obj()
with contextlib.nested(
mock.patch.object(self.compute, '_driver_detach_volume'),
mock.patch.object(self.compute.volume_api, 'detach'),
mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_id'),
mock.patch.object(fake_bdm, 'destroy')
) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
mock_detach.side_effect = test.TestingException
mock_get.return_value = fake_bdm
self.assertRaises(
test.TestingException, self.compute.detach_volume,
self.context, 'fake', instance)
mock_internal_detach.assert_called_once_with(self.context,
instance,
fake_bdm)
self.assertTrue(mock_destroy.called)
def test_await_block_device_created_too_slow(self):
self.flags(block_device_allocate_retries=2)
self.flags(block_device_allocate_retries_interval=0.1)
def never_get(context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
self.stubs.Set(self.compute.volume_api, 'get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1')
def test_await_block_device_created_failed(self):
c = self.compute
fake_result = {'status': 'error', 'id': 'blah'}
with mock.patch.object(c.volume_api, 'get',
return_value=fake_result) as fake_get:
self.assertRaises(exception.VolumeNotCreated,
c._await_block_device_map_created,
self.context, '1')
fake_get.assert_called_once_with(self.context, '1')
def test_await_block_device_created_slow(self):
c = self.compute
self.flags(block_device_allocate_retries=4)
self.flags(block_device_allocate_retries_interval=0.1)
def slow_get(context, vol_id):
if self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
'status': 'creating',
'id': 'blah',
}
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(attempts, 3)
def test_await_block_device_created_retries_negative(self):
c = self.compute
self.flags(block_device_allocate_retries=-1)
self.flags(block_device_allocate_retries_interval=0.1)
def volume_get(context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_await_block_device_created_retries_zero(self):
c = self.compute
self.flags(block_device_allocate_retries=0)
self.flags(block_device_allocate_retries_interval=0.1)
def volume_get(context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_boot_volume_serial(self):
with (
mock.patch.object(objects.BlockDeviceMapping, 'save')
) as mock_save:
block_device_mapping = [
block_device.BlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': '/dev/vdb',
'volume_size': 55,
'delete_on_termination': False,
})]
prepped_bdm = self.compute._prep_block_device(
self.context, self.instance_object, block_device_mapping)
self.assertEqual(2, mock_save.call_count)
volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
self.assertEqual(volume_driver_bdm['connection_info']['serial'],
self.volume_id)
def test_boot_volume_metadata(self, metadata=True):
def volume_api_get(*args, **kwargs):
if metadata:
return {
'size': 1,
'volume_image_metadata': {'vol_test_key': 'vol_test_value',
'min_ram': u'128',
'min_disk': u'256',
'size': u'536870912'
},
}
else:
return {}
self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
# Test it with new-style BDMs
block_device_mapping = [{
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['properties']['vol_test_key'],
'vol_test_value')
self.assertEqual(128, image_meta['min_ram'])
self.assertEqual(256, image_meta['min_disk'])
self.assertEqual(units.Gi, image_meta['size'])
else:
self.assertEqual(expected_no_metadata, image_meta)
def test_boot_volume_no_metadata(self):
self.test_boot_volume_metadata(metadata=False)
def test_boot_image_metadata(self, metadata=True):
def image_api_get(*args, **kwargs):
if metadata:
return {
'properties': {'img_test_key': 'img_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
block_device_mapping = [{
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': "fake-image",
'delete_on_termination': True,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual('img_test_value',
image_meta['properties']['img_test_key'])
else:
self.assertEqual(image_meta, {})
def test_boot_image_no_metadata(self):
self.test_boot_image_metadata(metadata=False)
def test_poll_bandwidth_usage_not_implemented(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(time, 'time')
self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
# Following methods will be called
utils.last_completed_audit_period().AndReturn((0, 0))
time.time().AndReturn(10)
# Note - time called two more times from Log
time.time().AndReturn(20)
time.time().AndReturn(21)
objects.InstanceList.get_by_host(ctxt, 'fake-mini',
use_slave=True).AndReturn([])
self.compute.driver.get_all_bw_counters([]).AndRaise(
NotImplementedError)
self.mox.ReplayAll()
self.flags(bandwidth_poll_interval=1)
self.compute._poll_bandwidth_usage(ctxt)
# A second call won't call the stubs again as the bandwidth
# poll is now disabled
self.compute._poll_bandwidth_usage(ctxt)
self.mox.UnsetStubs()
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
fake_instance = mock.Mock(uuid='fake-instance-uuid')
mock_get_by_host.return_value = [fake_instance]
volume_bdm = mock.Mock(id=1, is_volume=True)
not_volume_bdm = mock.Mock(id=2, is_volume=False)
mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
expected_host_bdms = [{'instance': fake_instance,
'instance_bdms': [volume_bdm]}]
got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
mock_get_by_host.assert_called_once_with('fake-context',
self.compute.host,
use_slave=False)
mock_get_by_inst.assert_called_once_with('fake-context',
'fake-instance-uuid',
use_slave=False)
self.assertEqual(expected_host_bdms, got_host_bdms)
def test_poll_volume_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=0)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_returns_no_vols(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# Following methods are called.
utils.last_completed_audit_period().AndReturn((0, 0))
self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_with_data(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
lambda x, y: [3, 4])
# All the mocks are called
utils.last_completed_audit_period().AndReturn((10, 20))
self.compute._get_host_volume_bdms(ctxt,
use_slave=True).AndReturn([1, 2])
self.compute._update_volume_usage_cache(ctxt, [3, 4])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_detach_volume_usage(self):
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance_obj()
bdm = objects.BlockDeviceMapping(context=self.context,
id=1, device_name='/dev/vdb',
connection_info='{}',
instance_uuid=instance['uuid'],
source_type='volume',
destination_type='volume',
no_device=False,
disk_bus='foo',
device_type='disk',
volume_size=1,
volume_id=1)
host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'volume_id': 1}
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# The following methods will be called
objects.BlockDeviceMapping.get_by_volume_id(self.context, 1).AndReturn(
bdm.obj_clone())
self.compute.driver.block_stats(instance, 'vdb').\
AndReturn([1, 30, 1, 20, None])
self.compute._get_host_volume_bdms(self.context,
use_slave=True).AndReturn(
host_volume_bdms)
self.compute.driver.get_all_volume_usage(
self.context, host_volume_bdms).AndReturn(
[{'volume': 1,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}])
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
self.compute.attach_volume(self.context, instance, bdm)
# Poll volume usage & then detach the volume. This will update the
# total fields in the volume usage cache.
self.flags(volume_usage_poll_interval=10)
self.compute._poll_volume_usage(self.context)
# Check that a volume.usage and volume.attach notification was sent
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
self.compute.detach_volume(self.context, 1, instance)
# Check that volume.attach, 2 volume.usage, and volume.detach
# notifications were sent
self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.instance.volume.attach', msg.event_type)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual('volume.usage', msg.event_type)
payload = msg.payload
self.assertEqual(instance['uuid'], payload['instance_id'])
self.assertEqual('fake', payload['user_id'])
self.assertEqual('fake', payload['tenant_id'])
self.assertEqual(1, payload['reads'])
self.assertEqual(30, payload['read_bytes'])
self.assertEqual(1, payload['writes'])
self.assertEqual(20, payload['write_bytes'])
self.assertIsNone(payload['availability_zone'])
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEqual('compute.instance.volume.detach', msg.event_type)
# Check the database for the
volume_usages = db.vol_get_usage_by_time(self.context, 0)
self.assertEqual(1, len(volume_usages))
volume_usage = volume_usages[0]
self.assertEqual(0, volume_usage['curr_reads'])
self.assertEqual(0, volume_usage['curr_read_bytes'])
self.assertEqual(0, volume_usage['curr_writes'])
self.assertEqual(0, volume_usage['curr_write_bytes'])
self.assertEqual(1, volume_usage['tot_reads'])
self.assertEqual(30, volume_usage['tot_read_bytes'])
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, mappings)
expected_result = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': swap_size
},
{
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
},
{
'device_name': '/dev/sdc2',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
}
]
for expected, got in zip(expected_result, preped_bdm):
self.assertThat(expected, matchers.IsSubDictOf(got))
def test_validate_bdm(self):
def fake_get(self, context, res_id):
return {'id': res_id, 'size': 4}
def fake_check_attach(*args, **kwargs):
pass
self.stubs.Set(cinder.API, 'get', fake_get)
self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
self.stubs.Set(cinder.API, 'check_attach',
fake_check_attach)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance_obj()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': 1,
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id,
'device_type': 'disk',
'guest_format': None,
'volume_size': 6,
'boot_index': 0,
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': 2,
'volume_size': 1
}, anon=True)
]
mappings = block_device_obj.block_device_make_list_from_dicts(
self.context, mappings)
# Make sure it passes at first
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings)
self.assertEqual(4, mappings[1].volume_size)
self.assertEqual(6, mappings[2].volume_size)
# Boot sequence
mappings[2].boot_index = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[2].boot_index = 0
# number of local block_devices
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
ephemerals = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}, anon=True),
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}, anon=True)
]
ephemerals = block_device_obj.block_device_make_list_from_dicts(
self.context, ephemerals)
self.flags(max_local_block_devices=4)
# More ephemerals are OK as long as they are not over the size limit
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings_)
# Ephemerals over the size limit
ephemerals[0].volume_size = 3
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_)
# Swap over the size limit
mappings[0].volume_size = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[0].volume_size = 1
additional_swap = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}, anon=True)
]
additional_swap = block_device_obj.block_device_make_list_from_dicts(
self.context, additional_swap)
# More than one swap
mappings_ = mappings[:]
mappings_.objects.extend(additional_swap)
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_)
image_no_size = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda4',
'source_type': 'image',
'image_id': image_id,
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}, anon=True)
]
image_no_size = block_device_obj.block_device_make_list_from_dicts(
self.context, image_no_size)
mappings_ = mappings[:]
mappings_.objects.extend(image_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_)
# blank device without a specified size fails
blank_no_size = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sda4',
'source_type': 'blank',
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}, anon=True)
]
blank_no_size = block_device_obj.block_device_make_list_from_dicts(
self.context, blank_no_size)
mappings_ = mappings[:]
mappings_.objects.extend(blank_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings_)
def test_validate_bdm_media_service_exceptions(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}, anon=True)]
all_mappings = block_device_obj.block_device_make_list_from_dicts(
self.context, all_mappings)
# First we test a list of invalid status values that should result
# in an InvalidVolume exception being raised.
status_values = (
# First two check that the status is 'available'.
('creating', 'detached'),
('error', 'detached'),
# Checks that the attach_status is 'detached'.
('available', 'attached')
)
for status, attach_status in status_values:
def fake_volume_get(self, ctxt, volume_id):
return {'id': volume_id,
'status': status,
'attach_status': attach_status}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Now we test a 404 case that results in InvalidBDMVolume.
def fake_volume_get_not_found(self, context, volume_id):
raise exception.VolumeNotFound(volume_id)
self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume status is 'available' and attach_status is
# 'detached' and accept the request if so
def fake_volume_get_ok(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
self.compute_api._validate_bdm(self.context, self.instance,
instance_type, all_mappings)
def test_volume_snapshot_create(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
def test_volume_snapshot_delete(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
@mock.patch.object(cinder.API, 'create',
side_effect=exception.OverQuota(overs='volumes'))
def test_prep_block_device_over_quota_failure(self, mock_create):
instance = self._create_fake_instance_obj()
bdms = [
block_device.BlockDeviceDict({
'boot_index': 0,
'guest_format': None,
'connection_info': None,
'device_type': u'disk',
'source_type': 'image',
'destination_type': 'volume',
'volume_size': 1,
'image_id': 1,
'device_name': '/dev/vdb',
})]
self.assertRaises(exception.VolumeLimitExceeded,
compute_manager.ComputeManager()._prep_block_device,
self.context, instance, bdms)
self.assertTrue(mock_create.called)
@mock.patch.object(nova.virt.block_device, 'get_swap')
@mock.patch.object(nova.virt.block_device, 'convert_blanks')
@mock.patch.object(nova.virt.block_device, 'convert_images')
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
convert_swap, convert_ephemerals,
convert_volumes, convert_snapshots,
convert_images, convert_blanks,
get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'image',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'volume_size': 1,
'boot_index': 0}))
blank_volume1 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 1}))
blank_volume2 = objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'instance_uuid': 'fake-instance',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 1,
'boot_index': 2}))
bdms = [blank_volume1, blank_volume2, root_volume]
def fake_attach_block_devices(bdm, *args, **kwargs):
return bdm
convert_swap.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
convert_images.return_value = [root_volume]
convert_blanks.return_value = []
attach_block_devices.side_effect = fake_attach_block_devices
get_swap.return_value = []
expected_block_device_info = {
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
'block_device_mapping': bdms
}
manager = compute_manager.ComputeManager()
manager.use_legacy_block_device_info = False
block_device_info = manager._prep_block_device(self.context, instance,
bdms)
convert_swap.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
convert_snapshots.assert_called_once_with(bdm_args)
convert_images.assert_called_once_with(bdm_args)
convert_blanks.assert_called_once_with(bdm_args)
self.assertEqual(expected_block_device_info, block_device_info)
self.assertEqual(1, attach_block_devices.call_count)
get_swap.assert_called_once_with([])
class ComputeTestCase(BaseTestCase):
def setUp(self):
super(ComputeTestCase, self).setUp()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_instance_in_args(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst)
self.assertFalse(called['fault_added'])
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
pass
fake_event(self.compute, self.context, instance=inst)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event_return(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
return True
retval = fake_event(self.compute, self.context, instance=inst)
self.assertTrue(retval)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
def test_wrap_instance_event_log_exception(self, mock_finish, mock_start):
inst = {"uuid": "fake_uuid"}
@compute_manager.wrap_instance_event
def fake_event(self2, context, instance):
raise exception.NovaException()
self.assertRaises(exception.NovaException, fake_event,
self.compute, self.context, instance=inst)
self.assertTrue(mock_start.called)
self.assertTrue(mock_finish.called)
args, kwargs = mock_finish.call_args
self.assertIsInstance(kwargs['exc_val'], exception.NovaException)
def test_object_compat(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance):
self.assertIsInstance(instance, objects.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
self.assertEqual(instance.metadata, db_inst['metadata'])
self.assertEqual(instance.system_metadata,
db_inst['system_metadata'])
test_fn(None, self.context, instance=db_inst)
def test_object_compat_no_metas(self):
# Tests that we don't try to set metadata/system_metadata on the
# instance object using fields that aren't in the db object.
db_inst = fake_instance.fake_db_instance()
db_inst.pop('metadata', None)
db_inst.pop('system_metadata', None)
@compute_manager.object_compat
def test_fn(_self, context, instance):
self.assertIsInstance(instance, objects.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
self.assertNotIn('metadata', instance)
self.assertNotIn('system_metadata', instance)
test_fn(None, self.context, instance=db_inst)
def test_object_compat_more_positional_args(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
self.assertIsInstance(instance, objects.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
self.assertEqual(instance.metadata, db_inst['metadata'])
self.assertEqual(instance.system_metadata,
db_inst['system_metadata'])
self.assertEqual(pos_arg_1, 'fake_pos_arg1')
self.assertEqual(pos_arg_2, 'fake_pos_arg2')
test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
# Make sure create associates a config drive.
instance = self._create_fake_instance_obj(
params={'config_drive': '1234', })
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
# Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties,
block_device_mapping=[])
self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
limits = {'memory_mb': 4096, 'disk_gb': 1000}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
self.assertEqual(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance,
{}, {}, {}, block_device_mapping=[], limits=limits)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
def test_create_multiple_instance_with_neutron_port(self):
instance_type = flavors.get_default_flavor()
def fake_is_neutron():
return True
self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id='adadds')])
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
max_count=2,
requested_networks=requested_networks)
def test_create_instance_with_oversubscribed_ram(self):
# Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# both total_mem_mb and the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.compute.build_and_run_instance(self.context, instance,
{}, {}, filter_properties, block_device_mapping=[])
def test_create_instance_with_oversubscribed_cpu(self):
# Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context,
instance=instance)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
limits = {'vcpu': 3}
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance_obj({'node': None})
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
# Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
instance = self._create_fake_instance_obj()
orig_update = self.compute._instance_update
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
def _instance_update(ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
self.stubs.Set(self.compute, '_instance_update', _instance_update)
try:
self.compute.build_and_run_instance(self.context, instance, {},
{}, {}, block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
self.assertEqual(instance['access_ip_v6'],
'2001:db8:0:1:dcad:beff:feef:1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = self._create_fake_instance_obj()
try:
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertFalse(instance['access_ip_v4'])
self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
# check the persistence of the ERROR(scheduling) state.
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance_obj(params=params)
# check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
"""block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, cleaning the task state
"""
def fake(*args, **kwargs):
raise exception.InvalidBDM()
self.stubs.Set(nova.compute.manager.ComputeManager,
'_prep_block_device', fake)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, image={},
request_spec={}, block_device_mapping=[],
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
node=None)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
@mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
side_effect=exception.OverQuota(overs='volumes'))
def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
"""block device mapping over quota failure test.
Make sure when we're over volume quota according to Cinder client, the
appropriate exception is raised and the instances to ERROR state,
cleaning the task state.
"""
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
node=None, block_device_mapping=[], image={})
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.assertTrue(mock_prep_block_dev.called)
def test_run_instance_spawn_fail(self):
"""spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, cleaning the task state.
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'spawn', fake)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
block_device_mapping=[], image={}, node=None)
# check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated
"""
instance = self._create_fake_instance_obj()
def fake(*args, **kwargs):
raise exception.InstanceNotFound(instance_id="fake")
self.stubs.Set(self.compute.driver, 'spawn', fake)
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
instance = self._create_fake_instance_obj()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = exception.InstanceNotFound(instance_id=1)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_run_instance_bails_on_deleting_instance(self):
# Make sure that run_instance() will quickly ignore a deleting instance
instance = self._create_fake_instance_obj()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = exception.UnexpectedDeletingTaskStateError(
instance_uuid=instance['uuid'],
expected={'task_state': 'bar'},
actual={'task_state': 'foo'})
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
self.assertTrue(mock_save.called)
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
# check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance_obj(params=params)
self.compute.terminate_instance(self.context, instance, [], [])
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
# Double check it's not there for admins, either.
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
# Make sure it is possible to run and terminate instance.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
admin_deleted_context = context.get_admin_context(
read_deleted="only")
instance = db.instance_get_by_uuid(admin_deleted_context,
instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.DELETED)
self.assertIsNone(instance['task_state'])
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def fake_check_attach(*args, **kwargs):
pass
def fake_reserve_volume(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
def fake_terminate_connection(self, context, volume_id, connector):
pass
def fake_detach(self, context, volume_id):
pass
bdms = []
def fake_rpc_reserve_block_device_name(self, context, instance, device,
volume_id, **kwargs):
bdm = objects.BlockDeviceMapping(
**{'context': context,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create()
bdms.append(bdm)
return bdm
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(cinder.API, 'terminate_connection',
fake_terminate_connection)
self.stubs.Set(cinder.API, 'detach', fake_detach)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context,
instance, bdms, [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_run_terminate_no_image(self):
"""Make sure instance started without image (from volume)
can be termintad without issues
"""
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance, [], [])
instances = db.instance_get_all(self.context)
LOG.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = self._create_fake_instance_obj()
instance['launched_at'] = None
self.assertIsNone(instance['launched_at'])
self.assertIsNone(instance['deleted_at'])
launch = timeutils.utcnow()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instance.refresh()
self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch)
self.assertIsNone(instance['deleted_at'])
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context, instance, [], [])
with utils.temporary_mutation(self.context, read_deleted='only'):
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertTrue(instance['launched_at'].replace(
tzinfo=None) < terminate)
self.assertTrue(instance['deleted_at'].replace(
tzinfo=None) > terminate)
def test_run_terminate_deallocate_net_failure_sets_error_state(self):
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instances = db.instance_get_all(self.context)
LOG.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_deallocate_network',
_fake_deallocate_network)
try:
self.compute.terminate_instance(self.context, instance, [], [])
except test.TestingException:
pass
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_stop(self):
# Ensure instance can be stopped.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
extra = ['system_metadata', 'metadata']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
self.compute.terminate_instance(self.context, instance, [], [])
def test_start(self):
# Ensure instance can be started.
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save()
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance, [], [])
def test_start_shelved_instance(self):
# Ensure shelved instance can be started.
self.deleted_image_id = None
def fake_delete(self_, ctxt, image_id):
self.deleted_image_id = image_id
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
instance = self._create_fake_instance_obj()
image = {'id': 'fake_id'}
# Adding shelved information to instance system metadata.
shelved_time = timeutils.strtime(at=timeutils.utcnow())
instance.system_metadata['shelved_at'] = shelved_time
instance.system_metadata['shelved_image_id'] = image['id']
instance.system_metadata['shelved_host'] = 'fake-mini'
instance.save()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF,
"vm_state": vm_states.SHELVED})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save()
self.compute.start_instance(self.context, instance=inst_obj)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertNotIn('shelved_at', inst_obj.system_metadata)
self.assertNotIn('shelved_image_id', inst_obj.system_metadata)
self.assertNotIn('shelved_host', inst_obj.system_metadata)
self.compute.terminate_instance(self.context, instance, [], [])
def test_stop_start_no_image(self):
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = objects.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj,
clean_shutdown=True)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save()
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue(self):
# Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
instance.task_state = task_states.RESCUING
instance.save()
self.compute.rescue_instance(self.context, instance, None, None, True)
self.assertTrue(called['rescued'])
instance.task_state = task_states.UNRESCUING
instance.save()
self.compute.unrescue_instance(self.context, instance)
self.assertTrue(called['unrescued'])
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.RESCUING
instance.save()
self.compute.rescue_instance(self.context, instance, None, True, True)
expected_notifications = ['compute.instance.rescue.start',
'compute.instance.exists',
'compute.instance.rescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertIn('rescue_image_name', msg.payload)
self.compute.terminate_instance(self.context, instance, [], [])
def test_unrescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_unrescue(self, instance_ref, network_info):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
fake_notifier.NOTIFICATIONS = []
instance.task_state = task_states.UNRESCUING
instance.save()
self.compute.unrescue_instance(self.context, instance)
expected_notifications = ['compute.instance.unrescue.start',
'compute.instance.unrescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rescue_handle_err(self):
# If the driver fails to rescue, instance state should got to ERROR
# and the exception should be converted to InstanceNotRescuable
inst_obj = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
self.compute._get_rescue_image(
mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
).AndRaise(RuntimeError("Try again later"))
self.mox.ReplayAll()
expected_message = ('Instance %s cannot be rescued: '
'Driver Error: Try again later' % inst_obj.uuid)
with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=inst_obj,
rescue_password='password', rescue_image_ref=None,
clean_shutdown=True)
self.assertEqual(vm_states.ERROR, inst_obj.vm_state)
@mock.patch.object(image_api.API, "get")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_image_specified(self, mock_rescue,
mock_image_get):
image_ref = "image-ref"
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING}
instance = self._create_fake_instance_obj(params=params)
ctxt = context.get_admin_context()
mock_context = mock.Mock()
mock_context.elevated.return_value = ctxt
mock_image_get.return_value = rescue_image_meta
self.compute.rescue_instance(mock_context, instance=instance,
rescue_password="password", rescue_image_ref=image_ref,
clean_shutdown=True)
mock_image_get.assert_called_with(ctxt, image_ref)
mock_rescue.assert_called_with(ctxt, instance, [],
rescue_image_meta, 'password')
self.compute.terminate_instance(ctxt, instance, [], [])
@mock.patch.object(image_api.API, "get")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_base_image_when_image_not_specified(self,
mock_rescue, mock_image_get):
image_ref = "image-ref"
system_meta = {"image_base_image_ref": image_ref}
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING,