Move instance delete to new-world BDM objects
Move all the code paths related to instance deletion to new-world objects. This includes soft- and hard- deletes, instance init on compute startup, as well as the reclaim_queued_deletes and _cleanup_running_deleted_instances periodic tasks. It introduces an additional database call in the _shutdown_instance method as there are still paths that call this method but will not pass it BDM objects. This will be removed as soon as _reschedule_on_error (basically meaning instance spawn) code paths are moved to BDM objects. Part of blueprint: icehouse-objects Part of blueprint: clean-up-legacy-block-device-mapping Change-Id: Ic63a3afb77362f0797c546a9505fa4bd7a9b986e
This commit is contained in:
parent
4897b18a40
commit
f5071bd1ac
@ -1355,9 +1355,8 @@ class API(base.Base):
|
||||
return
|
||||
|
||||
host = instance['host']
|
||||
bdms = block_device.legacy_mapping(
|
||||
self.db.block_device_mapping_get_all_by_instance(
|
||||
context, instance.uuid))
|
||||
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
context, instance.uuid)
|
||||
reservations = None
|
||||
|
||||
if context.is_admin and context.project_id != instance.project_id:
|
||||
@ -1593,7 +1592,7 @@ class API(base.Base):
|
||||
|
||||
# cleanup volumes
|
||||
for bdm in bdms:
|
||||
if bdm['volume_id']:
|
||||
if bdm.is_volume:
|
||||
# NOTE(vish): We don't have access to correct volume
|
||||
# connector info, so just pass a fake
|
||||
# connector. This can be improved when we
|
||||
@ -1601,15 +1600,15 @@ class API(base.Base):
|
||||
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
|
||||
try:
|
||||
self.volume_api.terminate_connection(context,
|
||||
bdm['volume_id'],
|
||||
bdm.volume_id,
|
||||
connector)
|
||||
self.volume_api.detach(elevated, bdm['volume_id'])
|
||||
if bdm['delete_on_termination']:
|
||||
self.volume_api.delete(context, bdm['volume_id'])
|
||||
self.volume_api.detach(elevated, bdm.volume_id)
|
||||
if bdm.delete_on_termination:
|
||||
self.volume_api.delete(context, bdm.volume_id)
|
||||
except Exception as exc:
|
||||
err_str = _("Ignoring volume cleanup failure due to %s")
|
||||
LOG.warn(err_str % exc, instance=instance)
|
||||
self.db.block_device_mapping_destroy(context, bdm['id'])
|
||||
bdm.destroy(context)
|
||||
cb(context, instance, bdms, local=True)
|
||||
sys_meta = instance.system_metadata
|
||||
instance.destroy()
|
||||
|
@ -411,7 +411,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
|
||||
class ComputeManager(manager.Manager):
|
||||
"""Manages the running instances from creation to destruction."""
|
||||
|
||||
target = messaging.Target(version='3.21')
|
||||
target = messaging.Target(version='3.22')
|
||||
|
||||
def __init__(self, compute_driver=None, *args, **kwargs):
|
||||
"""Load configuration options and connect to the hypervisor."""
|
||||
@ -589,7 +589,8 @@ class ComputeManager(manager.Manager):
|
||||
deleted in the DB
|
||||
"""
|
||||
instance.destroy()
|
||||
bdms = self._get_instance_volume_bdms(context, instance)
|
||||
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
context, instance.uuid)
|
||||
quotas = quotas_obj.Quotas()
|
||||
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
|
||||
quotas.reserve(context, project_id=project_id, user_id=user_id,
|
||||
@ -603,21 +604,12 @@ class ComputeManager(manager.Manager):
|
||||
|
||||
def _complete_deletion(self, context, instance, bdms,
|
||||
quotas, system_meta):
|
||||
|
||||
if quotas:
|
||||
quotas.commit()
|
||||
# ensure block device mappings are not leaked
|
||||
self.conductor_api.block_device_mapping_destroy(context, bdms)
|
||||
|
||||
# NOTE(ndipanov): Delete the dummy image BDM as well. This will not
|
||||
# be needed once the manager code is using the image
|
||||
if instance.image_ref:
|
||||
# Do not convert to legacy here - we want them all
|
||||
leftover_bdm = \
|
||||
self.conductor_api.block_device_mapping_get_all_by_instance(
|
||||
context, obj_base.obj_to_primitive(instance), legacy=False)
|
||||
self.conductor_api.block_device_mapping_destroy(context,
|
||||
leftover_bdm)
|
||||
# ensure block device mappings are not leaked
|
||||
for bdm in bdms:
|
||||
bdm.destroy()
|
||||
|
||||
self._notify_about_instance_usage(context, instance, "delete.end",
|
||||
system_metadata=system_meta)
|
||||
@ -688,7 +680,8 @@ class ComputeManager(manager.Manager):
|
||||
'the deletion now.'), instance=instance)
|
||||
instance.obj_load_attr('metadata')
|
||||
instance.obj_load_attr('system_metadata')
|
||||
bdms = self._get_instance_volume_bdms(context, instance)
|
||||
bdms = (block_device_obj.BlockDeviceMappingList.
|
||||
get_by_instance_uuid(context, instance.uuid))
|
||||
self._delete_instance(context, instance, bdms)
|
||||
except Exception:
|
||||
# we don't want that an exception blocks the init_host
|
||||
@ -1862,6 +1855,15 @@ class ComputeManager(manager.Manager):
|
||||
def _shutdown_instance(self, context, instance,
|
||||
bdms, requested_networks=None, notify=True):
|
||||
"""Shutdown an instance on this host."""
|
||||
# TODO(ndipanov): Remove this once all code using this method is
|
||||
# converted to BDM objects. Currently this means only
|
||||
# _reschedule_or_error
|
||||
if (bdms and
|
||||
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
|
||||
for bdm in bdms)):
|
||||
bdms = (block_device_obj.BlockDeviceMappingList.
|
||||
get_by_instance_uuid(context, instance.uuid))
|
||||
|
||||
context = context.elevated()
|
||||
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
|
||||
context=context, instance=instance)
|
||||
@ -1878,9 +1880,9 @@ class ComputeManager(manager.Manager):
|
||||
network_info = network_model.NetworkInfo()
|
||||
|
||||
# NOTE(vish) get bdms before destroying the instance
|
||||
vol_bdms = self._get_volume_bdms(bdms)
|
||||
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
|
||||
block_device_info = self._get_instance_volume_block_device_info(
|
||||
context, instance)
|
||||
context, instance, bdms=bdms)
|
||||
|
||||
# NOTE(melwitt): attempt driver destroy before releasing ip, may
|
||||
# want to keep ip allocated for certain failures
|
||||
@ -1903,12 +1905,12 @@ class ComputeManager(manager.Manager):
|
||||
for bdm in vol_bdms:
|
||||
try:
|
||||
# NOTE(vish): actual driver detach done in driver.destroy, so
|
||||
# just tell nova-volume that we are done with it.
|
||||
# just tell cinder that we are done with it.
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
self.volume_api.terminate_connection(context,
|
||||
bdm['volume_id'],
|
||||
bdm.volume_id,
|
||||
connector)
|
||||
self.volume_api.detach(context, bdm['volume_id'])
|
||||
self.volume_api.detach(context, bdm.volume_id)
|
||||
except exception.DiskNotFound as exc:
|
||||
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
|
||||
instance=instance)
|
||||
@ -2009,6 +2011,15 @@ class ComputeManager(manager.Manager):
|
||||
@wrap_instance_fault
|
||||
def terminate_instance(self, context, instance, bdms, reservations):
|
||||
"""Terminate an instance on this host."""
|
||||
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
|
||||
# db again, as this means they are sent in the old format and we want
|
||||
# to avoid converting them back when we can just get them.
|
||||
# Remove this when we bump the RPC major version to 4.0
|
||||
if (bdms and
|
||||
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
|
||||
for bdm in bdms)):
|
||||
bdms = (block_device_obj.BlockDeviceMappingList.
|
||||
get_by_instance_uuid(context, instance.uuid))
|
||||
|
||||
@utils.synchronized(instance['uuid'])
|
||||
def do_terminate_instance(instance, bdms):
|
||||
@ -5080,9 +5091,8 @@ class ComputeManager(manager.Manager):
|
||||
use_slave=True)
|
||||
for instance in instances:
|
||||
if self._deleted_old_enough(instance, interval):
|
||||
capi = self.conductor_api
|
||||
bdms = capi.block_device_mapping_get_all_by_instance(
|
||||
context, instance)
|
||||
bdms = (block_device_obj.BlockDeviceMappingList.
|
||||
get_by_instance_uuid(context, instance.uuid))
|
||||
LOG.info(_('Reclaiming deleted instance'), instance=instance)
|
||||
# NOTE(comstud): Quotas were already accounted for when
|
||||
# the instance was soft deleted, so there's no need to
|
||||
@ -5162,9 +5172,8 @@ class ComputeManager(manager.Manager):
|
||||
# NOTE(sirp): admin contexts don't ordinarily return deleted records
|
||||
with utils.temporary_mutation(context, read_deleted="yes"):
|
||||
for instance in self._running_deleted_instances(context):
|
||||
capi = self.conductor_api
|
||||
bdms = capi.block_device_mapping_get_all_by_instance(
|
||||
context, instance)
|
||||
bdms = (block_device_obj.BlockDeviceMappingList.
|
||||
get_by_instance_uuid(context, instance.uuid))
|
||||
|
||||
if action == "log":
|
||||
LOG.warning(_("Detected instance with name label "
|
||||
|
@ -239,6 +239,7 @@ class ComputeAPI(object):
|
||||
3.19 - Update pre_live_migration to take instance object
|
||||
3.20 - Make restore_instance take an instance object
|
||||
3.21 - Made rebuild take new-world BDM objects
|
||||
3.22 - Made terminate_instance take new-world BDM objects
|
||||
'''
|
||||
|
||||
VERSION_ALIASES = {
|
||||
@ -855,12 +856,16 @@ class ComputeAPI(object):
|
||||
|
||||
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
|
||||
# NOTE(russellb) Havana compat
|
||||
version = self._get_compat_version('3.0', '2.35')
|
||||
bdms_p = jsonutils.to_primitive(bdms)
|
||||
if self.client.can_send_version('3.22'):
|
||||
version = '3.22'
|
||||
else:
|
||||
version = self._get_compat_version('3.0', '2.35')
|
||||
bdms = block_device.legacy_mapping(bdms)
|
||||
bdms = jsonutils.to_primitive(objects_base.obj_to_primitive(bdms))
|
||||
cctxt = self.client.prepare(server=_compute_host(None, instance),
|
||||
version=version)
|
||||
cctxt.cast(ctxt, 'terminate_instance',
|
||||
instance=instance, bdms=bdms_p,
|
||||
instance=instance, bdms=bdms,
|
||||
reservations=reservations)
|
||||
|
||||
def unpause_instance(self, ctxt, instance):
|
||||
|
@ -1638,6 +1638,14 @@ class ComputeTestCase(BaseTestCase):
|
||||
def fake_volume_get(self, context, volume_id):
|
||||
return {'id': volume_id}
|
||||
|
||||
def fake_terminate_connection(self, context, volume_id, connector):
|
||||
pass
|
||||
|
||||
def fake_detach(self, context, volume_id):
|
||||
pass
|
||||
|
||||
bdms = []
|
||||
|
||||
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
|
||||
bdm = block_device_obj.BlockDeviceMapping(
|
||||
**{'source_type': 'volume',
|
||||
@ -1646,11 +1654,15 @@ class ComputeTestCase(BaseTestCase):
|
||||
'instance_uuid': instance['uuid'],
|
||||
'device_name': '/dev/vdc'})
|
||||
bdm.create(context)
|
||||
bdms.append(bdm)
|
||||
|
||||
self.stubs.Set(cinder.API, 'get', fake_volume_get)
|
||||
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
|
||||
self.stubs.Set(cinder.API, 'reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.stubs.Set(cinder.API, 'terminate_connection',
|
||||
fake_terminate_connection)
|
||||
self.stubs.Set(cinder.API, 'detach', fake_detach)
|
||||
self.stubs.Set(compute_rpcapi.ComputeAPI,
|
||||
'reserve_block_device_name',
|
||||
fake_rpc_reserve_block_device_name)
|
||||
@ -1659,7 +1671,7 @@ class ComputeTestCase(BaseTestCase):
|
||||
'/dev/vdc')
|
||||
|
||||
self.compute.terminate_instance(self.context,
|
||||
self._objectify(instance), [], [])
|
||||
self._objectify(instance), bdms, [])
|
||||
|
||||
instances = db.instance_get_all(self.context)
|
||||
LOG.info(_("After terminating instances: %s"), instances)
|
||||
@ -5456,14 +5468,19 @@ class ComputeTestCase(BaseTestCase):
|
||||
self.assertIn("Unrecognized value", six.text_type(e))
|
||||
|
||||
def test_cleanup_running_deleted_instances_reap(self):
|
||||
bdms = []
|
||||
|
||||
ctxt, inst1, inst2 = self._test_cleanup_running('reap')
|
||||
bdms = block_device_obj.block_device_make_list(ctxt, [])
|
||||
|
||||
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
|
||||
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
|
||||
"get_by_instance_uuid")
|
||||
# Simulate an error and make sure cleanup proceeds with next instance.
|
||||
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
|
||||
AndRaise(test.TestingException)
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
|
||||
inst1.uuid).AndReturn(bdms)
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
|
||||
inst2.uuid).AndReturn(bdms)
|
||||
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
|
||||
AndReturn(None)
|
||||
|
||||
@ -6061,6 +6078,7 @@ class ComputeTestCase(BaseTestCase):
|
||||
admin_context = context.get_admin_context()
|
||||
instance = instance_obj.Instance()
|
||||
instance.id = 1
|
||||
instance.uuid = 'fake-uuid'
|
||||
instance.vm_state = vm_states.DELETED
|
||||
instance.task_state = None
|
||||
instance.system_metadata = {'fake_key': 'fake_value'}
|
||||
@ -6075,8 +6093,7 @@ class ComputeTestCase(BaseTestCase):
|
||||
|
||||
self.stubs.Set(instance, 'destroy', fake_destroy)
|
||||
|
||||
self.stubs.Set(self.compute,
|
||||
'_get_instance_volume_bdms',
|
||||
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
|
||||
lambda *a, **k: None)
|
||||
|
||||
self.stubs.Set(self.compute,
|
||||
@ -6216,8 +6233,8 @@ class ComputeTestCase(BaseTestCase):
|
||||
self.mox.StubOutWithMock(instance_obj.InstanceList,
|
||||
'get_by_filters')
|
||||
self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
|
||||
self.mox.StubOutWithMock(self.compute.conductor_api,
|
||||
'block_device_mapping_get_all_by_instance')
|
||||
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid')
|
||||
self.mox.StubOutWithMock(self.compute, '_delete_instance')
|
||||
|
||||
instance_obj.InstanceList.get_by_filters(
|
||||
@ -6228,15 +6245,15 @@ class ComputeTestCase(BaseTestCase):
|
||||
|
||||
# The first instance delete fails.
|
||||
self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
|
||||
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
|
||||
ctxt, instance1).AndReturn(None)
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
ctxt, instance1.uuid).AndReturn([])
|
||||
self.compute._delete_instance(ctxt, instance1,
|
||||
None).AndRaise(test.TestingException)
|
||||
|
||||
# The second instance delete that follows.
|
||||
self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
|
||||
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
|
||||
ctxt, instance2).AndReturn(None)
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
ctxt, instance2.uuid).AndReturn([])
|
||||
self.compute._delete_instance(ctxt, instance2,
|
||||
None)
|
||||
|
||||
@ -8609,20 +8626,23 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance_obj()
|
||||
|
||||
img_bdm = {'instance_uuid': instance['uuid'],
|
||||
'device_name': '/dev/vda',
|
||||
'source_type': 'image',
|
||||
'destination_type': 'local',
|
||||
'delete_on_termination': False,
|
||||
'boot_index': 0,
|
||||
'image_id': 'fake_image'}
|
||||
'device_name': '/dev/vda',
|
||||
'source_type': 'image',
|
||||
'destination_type': 'local',
|
||||
'delete_on_termination': False,
|
||||
'boot_index': 0,
|
||||
'image_id': 'fake_image'}
|
||||
vol_bdm = {'instance_uuid': instance['uuid'],
|
||||
'device_name': '/dev/vdc',
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'delete_on_termination': False,
|
||||
'volume_id': 'fake_vol'}
|
||||
'device_name': '/dev/vdc',
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'delete_on_termination': False,
|
||||
'volume_id': 'fake_vol'}
|
||||
bdms = []
|
||||
for bdm in img_bdm, vol_bdm:
|
||||
db.block_device_mapping_create(admin, bdm, legacy=False)
|
||||
bdm_obj = block_device_obj.BlockDeviceMapping(**bdm)
|
||||
bdm_obj.create(admin)
|
||||
bdms.append(bdm_obj)
|
||||
|
||||
self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
|
||||
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
|
||||
@ -8630,7 +8650,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
None, True, None, False)
|
||||
|
||||
self.compute.terminate_instance(self.context,
|
||||
self._objectify(instance), [], [])
|
||||
self._objectify(instance), bdms, [])
|
||||
|
||||
bdms = db.block_device_mapping_get_all_by_instance(admin,
|
||||
instance['uuid'])
|
||||
|
@ -481,8 +481,8 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
if delete_type == 'soft_delete':
|
||||
updates['deleted_at'] = delete_time
|
||||
self.mox.StubOutWithMock(inst, 'save')
|
||||
self.mox.StubOutWithMock(db,
|
||||
'block_device_mapping_get_all_by_instance')
|
||||
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid')
|
||||
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
|
||||
self.mox.StubOutWithMock(self.context, 'elevated')
|
||||
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
|
||||
@ -513,7 +513,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
|
||||
self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
|
||||
|
||||
db.block_device_mapping_get_all_by_instance(
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
self.context, inst.uuid).AndReturn([])
|
||||
inst.save()
|
||||
self.compute_api._create_reservations(
|
||||
@ -667,8 +667,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
).AndReturn(None)
|
||||
|
||||
if self.cell_type == 'api':
|
||||
rpcapi.terminate_instance(self.context, inst, [],
|
||||
reservations=None)
|
||||
rpcapi.terminate_instance(
|
||||
self.context, inst,
|
||||
mox.IsA(block_device_obj.BlockDeviceMappingList),
|
||||
reservations=None)
|
||||
else:
|
||||
compute_utils.notify_about_instance_usage(
|
||||
self.compute_api.notifier, self.context,
|
||||
@ -693,8 +695,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.assertEqual(inst[k], v)
|
||||
|
||||
def test_local_delete_with_deleted_volume(self):
|
||||
bdms = [{'id': 'bmd_id', 'volume_id': 'volume_id',
|
||||
'delete_on_termiantion': False}]
|
||||
bdms = [block_device_obj.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'id': 42, 'volume_id': 'volume_id',
|
||||
'source_type': 'volume', 'destination_type': 'volume',
|
||||
'delete_on_termination': False}))]
|
||||
|
||||
def _fake_do_delete(context, instance, bdms,
|
||||
rservations=None, local=False):
|
||||
@ -713,7 +718,8 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
'notify_about_instance_usage')
|
||||
self.mox.StubOutWithMock(self.compute_api.volume_api,
|
||||
'terminate_connection')
|
||||
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
|
||||
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
|
||||
'destroy')
|
||||
|
||||
inst.info_cache.delete()
|
||||
compute_utils.notify_about_instance_usage(
|
||||
@ -727,7 +733,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.compute_api.volume_api.terminate_connection(
|
||||
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
|
||||
AndRaise(exception. VolumeNotFound('volume_id'))
|
||||
db.block_device_mapping_destroy(self.context, mox.IgnoreArg())
|
||||
bdms[0].destroy(self.context)
|
||||
|
||||
inst.destroy()
|
||||
compute_utils.notify_about_instance_usage(
|
||||
|
@ -29,6 +29,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova.network import model as network_model
|
||||
from nova.objects import base as obj_base
|
||||
from nova.objects import block_device as block_device_obj
|
||||
from nova.objects import instance as instance_obj
|
||||
from nova.openstack.common import importutils
|
||||
from nova.openstack.common import uuidutils
|
||||
@ -288,15 +289,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
instance.vm_state = vm_states.ACTIVE
|
||||
instance.task_state = task_states.DELETING
|
||||
|
||||
self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdms')
|
||||
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid')
|
||||
self.mox.StubOutWithMock(self.compute, '_delete_instance')
|
||||
self.mox.StubOutWithMock(instance, 'obj_load_attr')
|
||||
|
||||
bdms = []
|
||||
instance.obj_load_attr('metadata')
|
||||
instance.obj_load_attr('system_metadata')
|
||||
self.compute._get_instance_volume_bdms(self.context,
|
||||
instance).AndReturn(bdms)
|
||||
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
self.context, instance.uuid).AndReturn(bdms)
|
||||
self.compute._delete_instance(self.context, instance, bdms)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
@ -730,7 +730,7 @@ class ComputeRpcAPITestCase(test.TestCase):
|
||||
def test_terminate_instance(self):
|
||||
self._test_compute_api('terminate_instance', 'cast',
|
||||
instance=self.fake_instance, bdms=[],
|
||||
reservations=['uuid1', 'uuid2'])
|
||||
reservations=['uuid1', 'uuid2'], version='3.22')
|
||||
|
||||
# NOTE(russellb) Havana compat
|
||||
self.flags(compute='havana', group='upgrade_levels')
|
||||
|
Loading…
Reference in New Issue
Block a user