Make volume attach use objects

This patch does 3 things really. Firstly it adds disk_bus and
device_type parameters to reserve_block_device_name compute rpc method,
thus laying the groundwork for the ability to specify those when calling
attach. Further to that - it makes the attach volume code use new-world
objects. These two changes were done in a single commit to avoid
unnecessary rpc version bumps.

In addition to this, attach_volume method of the compute manager has
been refactored to use DriverBlockDevice objects and their method - thus
getting rid of some code duplication.

Finally we move block_device module utility method
instance_block_mapping to use the v2 block device data format and thus
also objects, and do some cleanup around how it was used elsewhere in
the code.

Part of the blueprint: icehouse-objects
Part of the blueprint: clean-up-legacy-block-device-mapping
Part of the blueprint: use-new-bdm-format-in-attach

Change-Id: I3eb341efcd6397f1bfbb98a546e10e248a45460e
This commit is contained in:
Nikola Dipanov 2013-12-19 16:36:05 +01:00
parent 6dab229484
commit 61e6f5b1e1
16 changed files with 316 additions and 192 deletions

View File

@ -860,7 +860,8 @@ class CloudController(object):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
'at %(device)s'),
{'volume_id': volume_id,

View File

@ -31,6 +31,7 @@ from nova import conductor
from nova import context
from nova import network
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.objects import security_group as secgroup_obj
from nova.openstack.common.gettextutils import _
@ -132,7 +133,7 @@ class InstanceMetadata():
self.security_groups = secgroup_obj.SecurityGroupList.get_by_instance(
ctxt, instance)
self.mappings = _format_instance_mapping(capi, ctxt, instance)
self.mappings = _format_instance_mapping(ctxt, instance)
if instance.get('user_data', None) is not None:
self.userdata_raw = base64.b64decode(instance['user_data'])
@ -476,9 +477,9 @@ def get_metadata_by_instance_id(conductor_api, instance_id, address,
return InstanceMetadata(instance, address)
def _format_instance_mapping(conductor_api, ctxt, instance):
bdms = conductor_api.block_device_mapping_get_all_by_instance(
ctxt, instance)
def _format_instance_mapping(ctxt, instance):
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
return block_device.instance_block_mapping(instance, bdms)

View File

@ -405,7 +405,8 @@ class VolumeAttachmentController(wsgi.Controller):
context=context)
try:
instance = self.compute_api.get(context, server_id)
instance = self.compute_api.get(context, server_id,
want_objects=True)
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound:

View File

@ -137,7 +137,8 @@ class ExtendedVolumesController(wsgi.Controller):
'server_id': server_id},
context=context)
instance = common.get_instance(self.compute_api, context, server_id)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)

View File

@ -472,33 +472,34 @@ def instance_block_mapping(instance, bdms):
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
if bdm['no_device']:
continue
# ebs volume case
if (bdm['volume_id'] or bdm['snapshot_id']):
ebs_devices.append(bdm['device_name'])
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
virtual_name = bdm['virtual_name']
if not virtual_name:
continue
if is_swap_or_ephemeral(virtual_name):
mappings[virtual_name] = bdm['device_name']
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
nebs = 0
ebs_devices.sort()
for ebs in ebs_devices:
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
nebs += 1
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings

View File

@ -2715,7 +2715,8 @@ class API(base.Base):
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
@ -2729,17 +2730,19 @@ class API(base.Base):
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
context, device=device, instance=instance, volume_id=volume_id,
disk_bus=disk_bus, device_type=device_type)
volume_bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
volume_bdm.destroy(context)
return device

View File

@ -27,7 +27,6 @@ from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova import rpc
@ -421,19 +420,17 @@ class ComputeCellsAPI(compute_api.API):
@wrap_check_policy
@check_instance_cell
def attach_volume(self, context, instance, volume_id, device=None):
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
return self._call_to_cells(context, instance, 'attach_volume',
volume_id, device)
volume_id, device, disk_bus, device_type)
@check_instance_cell
def _detach_volume(self, context, instance, volume):

View File

@ -61,6 +61,7 @@ from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
@ -413,7 +414,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.15')
target = messaging.Target(version='3.16')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -3826,104 +3827,74 @@ class ComputeManager(manager.Manager):
return console_info['port'] == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id):
volume_id, disk_bus=None, device_type=None):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance['uuid'])
def do_reserve():
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
context, instance)
bdms = (
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
'volume_id': volume_id or 'reserved',
'device_name': device_name}
self.conductor_api.block_device_mapping_create(context, values)
bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create(context)
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint, instance):
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
try:
return self._attach_volume(context, volume_id,
mountpoint, instance)
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
capi = self.conductor_api
capi.block_device_mapping_destroy_by_instance_and_device(
context, instance, mountpoint)
bdm.destroy(context)
def _attach_volume(self, context, volume_id, mountpoint, instance):
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': volume_id, 'mountpoint': mountpoint},
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume_id,
connector)
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to connect to volume %(volume_id)s "
"while attaching at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': mountpoint},
LOG.exception(_("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, volume_id)
self.volume_api.unreserve_volume(context, bdm.volume_id)
if 'serial' not in connection_info:
connection_info['serial'] = volume_id
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
try:
self.driver.attach_volume(context,
connection_info,
instance,
mountpoint,
encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to attach volume %(volume_id)s "
"at %(mountpoint)s") %
{'volume_id': volume_id,
'mountpoint': mountpoint},
context=context, instance=instance)
self.volume_api.terminate_connection(context,
volume_id,
connector)
self.volume_api.attach(context,
volume_id,
instance['uuid'],
mountpoint)
values = {
'instance_uuid': instance['uuid'],
'connection_info': jsonutils.dumps(connection_info),
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.conductor_api.block_device_mapping_update_or_create(context,
values)
info = dict(volume_id=volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)

View File

@ -229,6 +229,9 @@ class ComputeAPI(object):
3.13 - Update remove_fixed_ip_from_instance() to take an object
3.14 - Update post_live_migration_at_destination() to take an object
3.15 - Adds filter_properties and node to unshelve_instance()
3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
'''
VERSION_ALIASES = {
@ -306,15 +309,21 @@ class ComputeAPI(object):
instance=instance_p, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None):
# NOTE(ndipanov): Remove volume_id and mountpoint on the next major
# version bump - they are not needed when using bdm objects.
version = '3.16'
kw = {'instance': instance, 'volume_id': volume_id,
'mountpoint': mountpoint, 'bdm': bdm}
if not self.client.can_send_version(version):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
del kw['bdm']
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume',
instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint)
cctxt.cast(ctxt, 'attach_volume', **kw)
def change_instance_metadata(self, ctxt, instance, diff):
if self.client.can_send_version('3.7'):
@ -769,15 +778,24 @@ class ComputeAPI(object):
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.3')
instance_p = jsonutils.to_primitive(instance)
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
version = '3.16'
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type}
if not self.client.can_send_version(version):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.3')
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
del kw['disk_bus']
del kw['device_type']
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'reserve_block_device_name',
instance=instance_p, device=device,
volume_id=volume_id)
return cctxt.call(ctxt, 'reserve_block_device_name', **kw)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):

View File

@ -30,6 +30,7 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_network
@ -857,9 +858,11 @@ class CinderCloudTestCase(test.TestCase):
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_detached(vol)
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.cloud.compute_api.attach_volume(self.context,
instance,
inst_obj,
volume_id=vol2_uuid,
device='/dev/sdc')

View File

@ -81,6 +81,7 @@ from nova.tests.objects import test_flavor
from nova.tests.objects import test_migration
from nova.tests.objects import test_network
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import event
from nova.virt import fake
from nova.volume import cinder
@ -354,6 +355,9 @@ class ComputeVolumeTestCase(BaseTestCase):
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
self.instance_object = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(),
fake_instance.fake_db_instance())
@ -372,8 +376,9 @@ class ComputeVolumeTestCase(BaseTestCase):
self.stubs.Set(self.compute.volume_api, 'check_attach',
lambda *a, **kw: None)
def store_cinfo(context, *args):
def store_cinfo(context, *args, **kwargs):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update',
@ -381,17 +386,54 @@ class ComputeVolumeTestCase(BaseTestCase):
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
store_cinfo)
self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
def test_attach_volume_serial(self):
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
return_value={})
) as mock_get_vol_enc:
instance = self._create_fake_instance()
self.compute.attach_volume(self.context, self.volume_id,
'/dev/vdb', instance, bdm=fake_bdm)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_attach_volume_raises(self):
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance()
self.compute.attach_volume(self.context, self.volume_id,
'/dev/vdb', instance)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def fake_attach(*args, **kwargs):
raise test.TestingException
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'unreserve_volume'),
mock.patch.object(block_device_obj.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_unreserve, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, 'fake', '/dev/vdb',
instance, bdm=fake_bdm)
self.assertTrue(mock_unreserve.called)
self.assertTrue(mock_destroy.called)
def test_attach_volume_no_bdm(self):
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance()
with contextlib.nested(
mock.patch.object(block_device_obj.BlockDeviceMapping,
'get_by_volume_id', return_value=fake_bdm),
mock.patch.object(self.compute, '_attach_volume')
) as (mock_get_by_id, mock_attach):
self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
instance, bdm=None)
mock_get_by_id.assert_called_once_with(self.context, 'fake')
self.assertTrue(mock_attach.called)
def test_await_block_device_created_to_slow(self):
@ -631,31 +673,37 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_detach_volume_usage(self):
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance()
bdm = {'id': 1,
'device_name': '/dev/vdb',
'connection_info': '{}',
'instance_uuid': instance['uuid'],
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 1})
legacy_bdm = {'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'volume_id': 1}
self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdm')
self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
self.mox.StubOutWithMock(self.compute, '_get_instance_volume_bdm')
# The following methods will be called
self.compute._get_instance_volume_bdm(self.context, instance, 1).\
db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
AndReturn(bdm)
self.compute.driver.block_stats(instance['name'], 'vdb').\
AndReturn([1L, 30L, 1L, 20L, None])
self.compute._get_host_volume_bdms(self.context, 'fake-mini').\
AndReturn(bdm)
self.compute.driver.get_all_volume_usage(self.context, bdm).\
AndReturn(legacy_bdm)
self.compute.driver.get_all_volume_usage(self.context, legacy_bdm).\
AndReturn([{'volume': 1,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}])
self.compute._get_instance_volume_bdm(
self.context, instance, 1).AndReturn(legacy_bdm)
self.mox.ReplayAll()
@ -1591,7 +1639,13 @@ class ComputeTestCase(BaseTestCase):
return {'id': volume_id}
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
pass
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(context)
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
@ -3934,13 +3988,24 @@ class ComputeTestCase(BaseTestCase):
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
'volume_id': volume_id}
'id': volume_id}
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': volume_id,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(self.context)
# stub out volume attach
def fake_volume_get(self, context, volume):
def fake_volume_get(self, context, volume_id):
return volume
self.stubs.Set(cinder.API, "get", fake_volume_get)
def fake_volume_check_attach(self, context, volume_id, instance):
pass
self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
@ -3957,7 +4022,7 @@ class ComputeTestCase(BaseTestCase):
'data': orig_connection_data,
}
def fake_init_conn(self, context, volume, session):
def fake_init_conn(self, context, volume_id, session):
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
@ -3979,8 +4044,8 @@ class ComputeTestCase(BaseTestCase):
# attach volume to instance
instance_p = obj_base.obj_to_primitive(instance)
self.compute.attach_volume(self.context, volume['volume_id'],
'/dev/vdc', instance_p)
self.compute.attach_volume(self.context, volume['id'],
'/dev/vdc', instance_p, bdm=bdm)
# assert volume attached correctly
self.assertEqual(volume['device_name'], '/dev/vdc')
@ -6359,6 +6424,32 @@ class ComputeTestCase(BaseTestCase):
{}, bdms)
self.assertEqual(bdms[1]['device_name'], '/dev/vdb')
def test_reserve_block_device_name(self):
instance = self._create_fake_instance_obj(
params={'root_device_name': '/dev/vda'})
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'image', 'destination_type': 'local',
'image_id': 'fake-image-id', 'device_name': '/dev/vda',
'instance_uuid': instance.uuid})
bdm.create(self.context)
dev = self.compute.reserve_block_device_name(
self.context, instance, '/dev/vdb',
'fake-volume-id', 'virtio', 'disk')
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid)
bdms = list(bdms)
self.assertEqual(len(bdms), 2)
bdms.sort(key=operator.attrgetter('device_name'))
vol_bdm = bdms[1]
self.assertEqual(vol_bdm.source_type, 'volume')
self.assertEqual(vol_bdm.destination_type, 'volume')
self.assertEqual(vol_bdm.device_name, '/dev/vdb')
self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
self.assertEqual(vol_bdm.disk_bus, 'virtio')
self.assertEqual(vol_bdm.device_type, 'disk')
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
@ -8289,43 +8380,46 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(self.compute.driver._interfaces, {})
def test_attach_volume(self):
# Ensure instance can be soft rebooted.
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
called['fake_rpc_reserve_block_device_name'] = True
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdb')
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
fake_volume = {'id': 'fake-volume-id'}
with contextlib.nested(
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
mock.patch.object(cinder.API, 'check_attach'),
mock.patch.object(cinder.API, 'reserve_volume'),
mock.patch.object(compute_rpcapi.ComputeAPI,
'reserve_block_device_name', return_value='/dev/vdb'),
mock.patch.object(db, 'block_device_mapping_get_by_volume_id',
return_value=fake_bdm),
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
mock_bdm_get, mock_attach):
self.compute_api.attach_volume(
self.context, instance, 'fake-volume-id',
'/dev/vdb', 'ide', 'cdrom')
mock_reserve_bdm.assert_called_once_with(
self.context, device='/dev/vdb', instance=instance,
volume_id='fake-volume-id', disk_bus='ide',
device_type='cdrom')
mock_bdm_get.assert_called_once_with(
self.context, 'fake-volume-id', [])
self.assertEqual(mock_get.call_args,
mock.call(self.context, 'fake-volume-id'))
self.assertEqual(mock_check_attach.call_args,
mock.call(
self.context, fake_volume, instance=instance))
mock_reserve_vol.assert_called_once_with(
self.context, 'fake-volume-id')
a, kw = mock_attach.call_args
self.assertEqual(kw['volume_id'], 'fake-volume-id')
self.assertEqual(kw['mountpoint'], '/dev/vdb')
self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
def test_attach_volume_no_device(self):
@ -8357,6 +8451,12 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'get_by_volume_id')
block_device_obj.BlockDeviceMapping.get_by_volume_id(
self.context, mox.IgnoreArg()).AndReturn('fake-bdm')
self.mox.ReplayAll()
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, device=None)
self.assertTrue(called.get('fake_check_attach'))

View File

@ -28,11 +28,13 @@ from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import rpc
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_network
@ -90,19 +92,22 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self.flavor.items()]
def _validate_device(self, device=None):
bdms = db.block_device_mapping_get_all_by_instance(
self.context, self.instance['uuid'])
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, self.instance['uuid'])
return compute_utils.get_device_name_for_instance(
self.context, self.instance, bdms, device)
@staticmethod
def _fake_bdm(device):
return {
return fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'volume',
'destination_type': 'volume',
'device_name': device,
'no_device': None,
'volume_id': 'fake',
'snapshot_id': None
}
'snapshot_id': None,
'guest_format': None
})
def test_wrap(self):
self.data = []

View File

@ -26,6 +26,7 @@ from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import fake_block_device
CONF = cfg.CONF
@ -38,6 +39,11 @@ class ComputeRpcAPITestCase(test.TestCase):
inst = db.instance_create(self.context, {'host': 'fake_host',
'instance_type_id': 1})
self.fake_instance = jsonutils.to_primitive(inst)
self.fake_volume_bdm = jsonutils.to_primitive(
fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance['uuid'],
'volume_id': 'fake-volume-id'}))
def test_serialized_instance_has_name(self):
self.assertIn('name', self.fake_instance)
@ -124,7 +130,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance, volume_id='id', mountpoint='mp')
instance=self.fake_instance, volume_id='id', mountpoint='mp',
bdm=self.fake_volume_bdm, version='3.16')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
@ -497,7 +504,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance, device='device', volume_id='id')
instance=self.fake_instance, device='device', volume_id='id',
disk_bus='ide', device_type='cdrom', version='3.16')
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')

View File

@ -42,6 +42,7 @@ from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.network import api as network_api
from nova.objects import block_device as block_device_obj
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@ -3815,6 +3816,8 @@ class VolumeAttachmentsSampleJsonTest(VolumeAttachmentsSampleBase):
self.stubs.Set(compute_manager.ComputeManager,
'attach_volume',
lambda *a, **k: None)
self.stubs.Set(block_device_obj.BlockDeviceMapping, 'get_by_volume_id',
classmethod(lambda *a, **k: None))
volume = fakes.stub_volume_get(None, context.get_admin_context(),
'a26887c6-c47b-4654-abb5-dfadf7d3f803')

View File

@ -17,6 +17,7 @@ from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova.objects import block_device as block_device_obj
from nova.tests.api.openstack import fakes
from nova.tests.integrated.v3 import test_servers
from nova.volume import cinder
@ -78,6 +79,8 @@ class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
self.stubs.Set(compute_manager.ComputeManager,
'attach_volume',
lambda *a, **k: None)
self.stubs.Set(block_device_obj.BlockDeviceMapping, 'get_by_volume_id',
classmethod(lambda *a, **k: None))
volume = fakes.stub_volume_get(None, context.get_admin_context(),
'a26887c6-c47b-4654-abb5-dfadf7d3f803')

View File

@ -44,6 +44,7 @@ from nova import exception
from nova.network import api as network_api
from nova.objects import instance as instance_obj
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests.objects import test_security_group
@ -189,21 +190,27 @@ class MetadataTestCase(test.TestCase):
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = {'id': 0,
instance_ref0 = instance_obj.Instance(**{'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None}
instance_ref1 = {'id': 0,
'root_device_name': None,
'default_ephemeral_device': None,
'default_swap_device': None})
instance_ref1 = instance_obj.Instance(**{'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1'}
'root_device_name': '/dev/sda1',
'default_ephemeral_device': None,
'default_swap_device': None})
def fake_bdm_get(ctxt, uuid):
return [{'volume_id': 87654321,
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
'device_name': '/dev/sdh'},
'device_name': '/dev/sdh'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
@ -211,7 +218,8 @@ class MetadataTestCase(test.TestCase):
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
'device_name': '/dev/sdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
@ -219,7 +227,7 @@ class MetadataTestCase(test.TestCase):
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'}]
'device_name': '/dev/sdb'})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdm_get)
@ -232,9 +240,9 @@ class MetadataTestCase(test.TestCase):
capi = conductor_api.LocalAPI()
self.assertEqual(base._format_instance_mapping(capi, ctxt,
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref0), block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(capi, ctxt,
self.assertEqual(base._format_instance_mapping(ctxt,
instance_ref1), expected)
def test_pubkey(self):