[libvirt] Allow multiple volume attachments

Add multiattach support to libvirt driver, by updating the
xml configuration info if the multiattach support is turned
on for a volume and set the virt driver capability
'support_multiattach' to true. This capability is set to false
for all the other drivers.

Also the attach function in nova/virt/block_device.py is updated
to call out to Cinder in case of each attachment request for
multiattach volumes, which is needed for Cinder in order to track
all attachments for a volume and be able to detach properly.

Co-Authored-By: Matt Riedemann <mriedem.os@gmail.com>

Partially-implements: blueprint multi-attach-volume
Change-Id: I947bf0ad34a48e9182a3dc016f47f0c9f71c9d7b
This commit is contained in:
Ildiko Vancsa 2017-12-17 17:38:11 -05:00
parent 21ad3402f1
commit baa8278ca7
14 changed files with 218 additions and 6 deletions

View File

@ -554,7 +554,9 @@ Possible cache modes:
barriers), then data integrity can be ensured. However, because the host
page cache is disabled, the read performance in the guest would not be as
good as in the modes where the host page cache is enabled, such as
writethrough mode.
writethrough mode. Shareable disk devices, like for a multi-attachable block
storage volume, will have their cache mode set to 'none' regardless of
configuration.
* writethrough: writethrough mode is the default caching mode. With
caching set to writethrough mode, the host page cache is enabled, but the
disk write cache is disabled for the guest. Consequently, this caching mode

View File

@ -250,6 +250,16 @@ class VolumeAttachFailed(Invalid):
"Reason: %(reason)s")
class MultiattachNotSupportedByVirtDriver(NovaException):
# This exception indicates the compute hosting the instance does not
# support multiattach volumes. This should generally be considered a
# 409 HTTPConflict error in the API since we expect all virt drivers to
# eventually support multiattach volumes.
msg_fmt = _("Volume %(volume_id)s has 'multiattach' set, "
"which is not supported for this instance.")
code = 409
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"

View File

@ -14106,6 +14106,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_shareable(self):
"""Tests that when conf.shareable is True, the configuration is
ignored and the driver_cache is forced to 'none'.
"""
self.flags(disk_cachemodes=['block=writethrough'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.shareable = True
fake_conf.source_type = 'block'
drvr._set_cache_mode(fake_conf)
self.assertEqual('none', fake_conf.driver_cache)
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@ -19694,3 +19706,51 @@ class LVMSnapshotTests(_BaseSnapshotTests):
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
class TestLibvirtMultiattach(test.NoDBTestCase):
"""Libvirt driver tests for volume multiattach support."""
def setUp(self):
super(TestLibvirtMultiattach, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
return_value=True)
def test_init_host_supports_multiattach_new_enough_libvirt(self, min_ver):
"""Tests that the driver supports multiattach because libvirt>=3.10.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._set_multiattach_support()
self.assertTrue(drvr.capabilities['supports_multiattach'])
min_ver.assert_called_once_with(
lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH)
@mock.patch('nova.virt.libvirt.host.Host.has_min_version',
side_effect=[False, False])
def test_init_host_supports_multiattach_old_enough_qemu(self, min_ver):
"""Tests that the driver supports multiattach because qemu<2.10.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._set_multiattach_support()
self.assertTrue(drvr.capabilities['supports_multiattach'])
calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
mock.call(hv_ver=(2, 10, 0))]
min_ver.assert_has_calls(calls)
# FIXME(mriedem): This test intermittently fails when run at the same time
# as LibvirtConnTestCase, presumably because of shared global state on the
# version check.
# @mock.patch('nova.virt.libvirt.host.Host.has_min_version',
# side_effect=[False, True])
# def test_init_host_supports_multiattach_no_support(self,
# has_min_version):
# """Tests that the driver does not support multiattach because
# qemu>=2.10 and libvirt<3.10.
# """
# drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
# drvr._set_multiattach_support()
# self.assertFalse(drvr.capabilities['supports_multiattach'])
# calls = [mock.call(lv_ver=libvirt_driver.MIN_LIBVIRT_MULTIATTACH),
# mock.call(hv_ver=(2, 10, 0))]
# has_min_version.assert_has_calls(calls)

View File

@ -268,6 +268,33 @@ class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase):
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
def test_libvirt_volume_multiattach(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_host)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'rw',
},
'multiattach': True,
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
shareable = tree.find('./shareable')
self.assertIsNotNone(shareable)
connection_info['multiattach'] = False
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
shareable = tree.find('./shareable')
self.assertIsNone(shareable)
@mock.patch('nova.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true(self, mock_has_min_version):
# Check the discard attrib is present in driver section

View File

@ -439,7 +439,13 @@ class TestDriverBlockDevice(test.NoDBTestCase):
driver_attach=False, fail_driver_attach=False,
volume_attach=True, fail_volume_attach=False,
access_mode='rw', availability_zone=None,
multiattach=False, driver_multi_attach=False,
fail_with_virt_driver=False,
include_shared_targets=False):
if driver_multi_attach:
self.virt_driver.capabilities['supports_multiattach'] = True
else:
self.virt_driver.capabilities['supports_multiattach'] = False
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
@ -453,6 +459,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
if multiattach and driver_multi_attach:
expected_conn_info['multiattach'] = True
enc_data = {'fake': 'enc_data'}
if include_shared_targets:
@ -484,6 +492,11 @@ class TestDriverBlockDevice(test.NoDBTestCase):
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
if fail_with_virt_driver:
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if self.attachment_id is None:
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
@ -1220,3 +1233,44 @@ class TestDriverBlockDeviceNewFlow(TestDriverBlockDevice):
where a volume BDM has an attachment_id.
"""
attachment_id = uuids.attachment_id
def test_volume_attach_multiattach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, multiattach=True,
driver_multi_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_multiattach_no_virt_driver_support(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, _ = self._test_volume_attach(test_bdm, self.volume_bdm,
volume, multiattach=True,
fail_with_virt_driver=True)
self.mox.ReplayAll()
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
test_bdm.attach, self.context, instance,
self.volume_api, self.virt_driver)

View File

@ -495,6 +495,13 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
if self.volume_size is None:
self.volume_size = volume.get('size')
vol_multiattach = volume.get('multiattach', False)
virt_multiattach = virt_driver.capabilities['supports_multiattach']
if vol_multiattach and not virt_multiattach:
raise exception.MultiattachNotSupportedByVirtDriver(
volume_id=volume_id)
LOG.debug("Updating existing volume attachment record: %s",
attachment_id, instance=instance)
connection_info = volume_api.attachment_update(
@ -503,6 +510,18 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
if vol_multiattach:
# This will be used by the volume driver to determine the proper
# disk configuration.
# TODO(mriedem): Long-term we should stop stashing the multiattach
# flag in the bdm.connection_info since that should be an untouched
# set of values we can refresh from Cinder as needed. Putting the
# multiattach flag on the bdm directly will require schema and
# online data migrations, plus some refactoring to anything that
# needs to get a block device disk config, like spawn/migrate/swap
# and the LibvirtLiveMigrateBDMInfo would also need to store the
# value.
connection_info['multiattach'] = True
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(

View File

@ -130,6 +130,7 @@ class ComputeDriver(object):
"supports_tagged_attach_interface": False,
"supports_tagged_attach_volume": False,
"supports_extend_volume": False,
"supports_multiattach": False
}
requires_allocation_refresh = False

View File

@ -129,6 +129,7 @@ class FakeDriver(driver.ComputeDriver):
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
"supports_multiattach": True
}
# Since we don't have a real hypervisor, pretend we have lots of

View File

@ -98,6 +98,7 @@ class HyperVDriver(driver.ComputeDriver):
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_multiattach": False
}
def __init__(self, virtapi):

View File

@ -132,7 +132,8 @@ class IronicDriver(virt_driver.ComputeDriver):
capabilities = {"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True
"supports_attach_interface": True,
"supports_multiattach": False
}
# Needed for exiting instances to have allocations for custom resource

View File

@ -298,6 +298,11 @@ PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
# Mediated devices support
MIN_LIBVIRT_MDEV_SUPPORT = (3, 4, 0)
# libvirt>=3.10 is required for volume multiattach if qemu<2.10.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1378242
# for details.
MIN_LIBVIRT_MULTIATTACH = (3, 10, 0)
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
@ -309,6 +314,9 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
# Multiattach support is conditional on qemu and libvirt versions
# determined in init_host.
"supports_multiattach": False
}
def __init__(self, virtapi, read_only=False):
@ -424,9 +432,14 @@ class LibvirtDriver(driver.ComputeDriver):
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
# Shareable disks like for a multi-attach volume need to have the
# driver cache disabled.
if getattr(conf, 'shareable', False):
conf.driver_cache = 'none'
else:
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
@ -465,6 +478,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._supported_perf_events = self._get_supported_perf_events()
self._set_multiattach_support()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning("Running libvirt-lxc without user namespaces is "
@ -547,6 +562,20 @@ class LibvirtDriver(driver.ComputeDriver):
'libvirt_ver': self._version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch))})
def _set_multiattach_support(self):
# Check to see if multiattach is supported. Based on bugzilla
# https://bugzilla.redhat.com/show_bug.cgi?id=1378242 and related
# clones, the shareable flag on a disk device will only work with
# qemu<2.10 or libvirt>=3.10. So check those versions here and set
# the capability appropriately.
if (self._host.has_min_version(lv_ver=MIN_LIBVIRT_MULTIATTACH) or
not self._host.has_min_version(hv_ver=(2, 10, 0))):
self.capabilities['supports_multiattach'] = True
else:
LOG.debug('Volume multiattach is not supported based on current '
'versions of QEMU and libvirt. QEMU must be less than '
'2.10 or libvirt must be greater than or equal to 3.10.')
def _prepare_migration_flags(self):
migration_flags = 0

View File

@ -104,6 +104,11 @@ class LibvirtBaseVolumeDriver(object):
# specified.
conf.device_addr.unit = disk_info['unit']
if connection_info.get('multiattach', False):
# Note that driver_cache should be disabled (none) when using
# a shareable disk.
conf.shareable = True
return conf
def connect_volume(self, connection_info, instance):

View File

@ -65,7 +65,8 @@ class VMwareVCDriver(driver.ComputeDriver):
"has_imagecache": True,
"supports_recreate": False,
"supports_migrate_to_same_host": True,
"supports_attach_interface": True
"supports_attach_interface": True,
"supports_multiattach": False
}
# Legacy nodename is of the form: <mo id>(<cluster name>)

View File

@ -73,6 +73,7 @@ class XenAPIDriver(driver.ComputeDriver):
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_multiattach": False
}
def __init__(self, virtapi, read_only=False):