libvirt: Load and cache volume drivers on-demand

This change completes a long standing TODO in the libvirt driver where
volume drivers had previously been loaded at startup. While quick this
can result in drivers being dropped from the resulting list on hosts
that don't support the underlying connector protocols as checked in
os-brick.

We should instead load and cache these drivers on-demand ensuring the
same instance is reused by repeat callers to _get_volume_driver.

An additional TODO is left in place to make this dict configurable in
the future, allowing drivers to be replaced by different volume driver
implementations. An example being the replacment of the os-brick `iscsi`
volume driver with a native `iscsi` QEMU driver.

Change-Id: If02a263055376442a116d00c18a1599b436f21d2
This commit is contained in:
Lee Yarwood 2020-07-09 17:00:22 +01:00
parent 0f558749d1
commit 684780ac0d
3 changed files with 147 additions and 65 deletions

View File

@ -694,6 +694,11 @@ class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class VolumeDriverNotSupported(VolumeDriverNotFound):
msg_fmt = _("The %(volume_driver)s volume driver is not supported on this "
"platform.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")

View File

@ -24846,31 +24846,78 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertRaises(fakelibvirt.libvirtError,
self.drvr.trigger_crash_dump, instance)
@mock.patch.object(libvirt_driver.LOG, 'debug')
def test_get_volume_driver_invalid_connector_exception(self, mock_debug):
"""Tests that the driver doesn't fail to initialize if one of the
imported volume drivers raises InvalidConnectorProtocol from os-brick.
"""
# make a copy of the normal list and add a volume driver that raises
# the handled os-brick exception when imported.
libvirt_volume_drivers_copy = copy.copy(
libvirt_driver.libvirt_volume_drivers)
libvirt_volume_drivers_copy.append(
'invalid=nova.tests.unit.virt.libvirt.test_driver.'
'FakeInvalidVolumeDriver'
def test_get_volume_driver_invalid_type(self):
# Assert that VolumeDriverNotFound is raised when an invalid type is
# provided in the connection_info
self.assertRaises(
exception.VolumeDriverNotFound,
self.drvr._get_volume_driver,
{'driver_volume_type': 'foo'}
)
with mock.patch.object(libvirt_driver, 'libvirt_volume_drivers',
libvirt_volume_drivers_copy):
drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True
)
# make sure we didn't register the invalid volume driver
self.assertNotIn('invalid', drvr.volume_drivers)
# make sure we logged something
mock_debug.assert_called_with(
('Unable to load volume driver %s. '
'It is not supported on this host.'),
'nova.tests.unit.virt.libvirt.test_driver.FakeInvalidVolumeDriver'
# Assert that VolumeDriverNotFound is raised when driver_volume_type
# is missing in the provided connection_info.
self.assertRaises(
exception.VolumeDriverNotFound,
self.drvr._get_volume_driver,
{}
)
@mock.patch('oslo_utils.importutils.import_class')
def test_get_volume_driver(self, mock_import_class):
# Assert the behaviour of _get_volume_driver and the driver cache
# Use a valid VOLUME_DRIVERS key but mock out the import of the class
driver_volume_type = 'iscsi'
connection_info = {
'driver_volume_type': driver_volume_type
}
mock_driver = mock.Mock()
mock_class = mock.Mock(return_value=mock_driver)
mock_import_class.return_value = mock_class
# Assert that the cache is empty
self.assertEqual(0, len(self.drvr.volume_drivers))
# Assert that repeat calls return the same instance of mock_driver
self.assertEqual(
mock_driver,
self.drvr._get_volume_driver(connection_info)
)
self.assertEqual(
mock_driver,
self.drvr._get_volume_driver(connection_info)
)
# Assert that we only imported the class once
mock_import_class.assert_called_once_with(
libvirt_driver.VOLUME_DRIVERS.get('iscsi'))
# Assert that the cache only contains the mock_driver instance
self.assertEqual(1, len(self.drvr.volume_drivers))
self.assertEqual(
mock_driver,
self.drvr.volume_drivers.get(driver_volume_type)
)
@mock.patch('oslo_utils.importutils.import_class')
def test_get_volume_driver_unsupported_protocol(self, mock_import_class):
# Assert that VolumeDriverNotSupported is raised when os-brick raises
# InvalidConnectorProtocol while building the connector
connection_info = {
'driver_volume_type': 'iscsi'
}
mock_driver = mock.Mock(
side_effect=brick_exception.InvalidConnectorProtocol)
mock_class = mock.Mock(side_effect=mock_driver)
mock_import_class.return_value = mock_class
ex = self.assertRaises(
exception.VolumeDriverNotSupported,
self.drvr._get_volume_driver,
connection_info
)
self.assertIn('volume driver is not supported on this platform',
str(ex)
)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'

View File

@ -127,6 +127,7 @@ from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import fs
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import remotefs
from nova.virt.libvirt.volume import volume
from nova.virt import netutils
from nova.volume import cinder
@ -172,25 +173,27 @@ class InjectionInfo(collections.namedtuple(
'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'vzstorage='
'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
'storpool=nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver',
'nvmeof=nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
]
# NOTE(lyarwood): Dict of volume drivers supported by the libvirt driver, keyed
# by the connection_info['driver_volume_type'] returned by Cinder for each
# volume type it supports
# TODO(lyarwood): Add host configurables to allow this list to be changed.
# Allowing native iSCSI to be reintroduced etc.
VOLUME_DRIVERS = {
'iscsi': 'nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser': 'nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local': 'nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake': 'nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd': 'nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs': 'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs': 'nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'fibre_channel': 'nova.virt.libvirt.volume.fibrechannel.LibvirtFibreChannelVolumeDriver', # noqa:E501
'gpfs': 'nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte': 'nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'scaleio': 'nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'vzstorage': 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', # noqa:E501
'storpool': 'nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', # noqa:E501
'nvmeof': 'nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
}
def patch_tpool_proxy():
@ -319,11 +322,8 @@ class LibvirtDriver(driver.ComputeDriver):
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
# TODO(mriedem): Long-term we should load up the volume drivers on
# demand as needed rather than doing this on startup, as there might
# be unsupported volume drivers in this list based on the underlying
# platform.
self.volume_drivers = self._get_volume_drivers()
# NOTE(lyarwood): Volume drivers are loaded on-demand
self.volume_drivers: ty.Dict[str, volume.LibvirtBaseVolumeDriver] = {}
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
@ -465,20 +465,6 @@ class LibvirtDriver(driver.ComputeDriver):
align=ns['daxregion']['align'])
return vpmems_host
def _get_volume_drivers(self):
driver_registry = dict()
for driver_str in libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
try:
driver_registry[driver_type] = driver_class(self._host)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver)
return driver_registry
@property
def disk_cachemode(self):
# It can be confusing to understand the QEMU cache mode
@ -1580,11 +1566,55 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.InternalError as e:
LOG.debug(e, instance=instance)
def _get_volume_driver(self, connection_info):
def _get_volume_driver(
self, connection_info: ty.Dict[str, ty.Any]
) -> 'volume.LibvirtBaseVolumeDriver':
"""Fetch the nova.virt.libvirt.volume driver
Based on the provided connection_info return a nova.virt.libvirt.volume
driver. This will call out to os-brick to construct an connector and
check if the connector is valid on the underlying host.
:param connection_info: The connection_info associated with the volume
:raises: VolumeDriverNotFound if no driver is found or if the host
doesn't support the requested driver. This retains legacy behaviour
when only supported drivers were loaded on startup leading to a
VolumeDriverNotFound being raised later if an invalid driver was
requested.
"""
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
# If the driver_type isn't listed in the supported type list fail
if driver_type not in VOLUME_DRIVERS:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
# Return the cached driver
if driver_type in self.volume_drivers:
return self.volume_drivers.get(driver_type)
@utils.synchronized('cache_volume_driver')
def _cache_volume_driver(driver_type):
# Check if another request cached the driver while we waited
if driver_type in self.volume_drivers:
return self.volume_drivers.get(driver_type)
try:
driver_class = importutils.import_class(
VOLUME_DRIVERS.get(driver_type))
self.volume_drivers[driver_type] = driver_class(self._host)
return self.volume_drivers.get(driver_type)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver_type)
# NOTE(lyarwood): This exception is a subclass of
# VolumeDriverNotFound to ensure no callers have to change
# their error handling code after the move to on-demand loading
# of the volume drivers and associated os-brick connectors.
raise exception.VolumeDriverNotSupported(
volume_driver=VOLUME_DRIVERS.get(driver_type))
# Cache the volume driver if it hasn't already been
return _cache_volume_driver(driver_type)
def _connect_volume(self, context, connection_info, instance,
encryption=None):