Merge "libvirt: Ensure all volume drivers log the instance whenever possible"

This commit is contained in:
Zuul 2022-01-17 17:39:15 +00:00 committed by Gerrit Code Review
commit 58fda2ead5
6 changed files with 30 additions and 25 deletions

View File

@ -50,9 +50,9 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance): def connect_volume(self, connection_info, instance):
"""Attach the volume to instance_name.""" """Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach FC Volume") LOG.debug("Calling os-brick to attach FC Volume", instance=instance)
device_info = self.connector.connect_volume(connection_info['data']) device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached FC volume %s", device_info) LOG.debug("Attached FC volume %s", device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path'] connection_info['data']['device_path'] = device_info['path']
if 'multipath_id' in device_info: if 'multipath_id' in device_info:

View File

@ -60,9 +60,9 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance): def connect_volume(self, connection_info, instance):
"""Attach the volume to instance_name.""" """Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume") LOG.debug("Calling os-brick to attach iSCSI Volume", instance=instance)
device_info = self.connector.connect_volume(connection_info['data']) device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info) LOG.debug("Attached iSCSI volume %s", device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path'] connection_info['data']['device_path'] = device_info['path']

View File

@ -290,15 +290,17 @@ class _HostMountState(object):
'options=%(options)s) generation %(gen)s', 'options=%(options)s) generation %(gen)s',
{'fstype': fstype, 'export': export, 'vol_name': vol_name, {'fstype': fstype, 'export': export, 'vol_name': vol_name,
'mountpoint': mountpoint, 'options': options, 'mountpoint': mountpoint, 'options': options,
'gen': self.generation}) 'gen': self.generation}, instance=instance)
with self._get_locked(mountpoint) as mount: with self._get_locked(mountpoint) as mount:
if os.path.ismount(mountpoint): if os.path.ismount(mountpoint):
LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, ' LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '
'mountpoint already mounted'), 'mountpoint already mounted'),
{'mountpoint': mountpoint, 'gen': self.generation}) {'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
else: else:
LOG.debug('Mounting %(mountpoint)s generation %(gen)s', LOG.debug('Mounting %(mountpoint)s generation %(gen)s',
{'mountpoint': mountpoint, 'gen': self.generation}) {'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
fileutils.ensure_tree(mountpoint) fileutils.ensure_tree(mountpoint)
@ -316,7 +318,7 @@ class _HostMountState(object):
'%(mountpoint)s. Continuing because mountpount is ' '%(mountpoint)s. Continuing because mountpount is '
'mounted despite this.', 'mounted despite this.',
{'fstype': fstype, 'export': export, {'fstype': fstype, 'export': export,
'mountpoint': mountpoint}) 'mountpoint': mountpoint}, instance=instance)
else: else:
# If the mount failed there's no reason for us to keep # If the mount failed there's no reason for us to keep
# a record of it. It will be created again if the # a record of it. It will be created again if the
@ -331,7 +333,8 @@ class _HostMountState(object):
LOG.debug('_HostMountState.mount() for %(mountpoint)s ' LOG.debug('_HostMountState.mount() for %(mountpoint)s '
'generation %(gen)s completed successfully', 'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation}) {'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
def umount(self, vol_name, mountpoint, instance): def umount(self, vol_name, mountpoint, instance):
"""Mark an attachment as no longer in use, and unmount its mountpoint """Mark an attachment as no longer in use, and unmount its mountpoint
@ -345,16 +348,15 @@ class _HostMountState(object):
LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, ' LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
'mountpoint=%(mountpoint)s) generation %(gen)s', 'mountpoint=%(mountpoint)s) generation %(gen)s',
{'vol_name': vol_name, 'mountpoint': mountpoint, {'vol_name': vol_name, 'mountpoint': mountpoint,
'gen': self.generation}) 'gen': self.generation}, instance=instance)
with self._get_locked(mountpoint) as mount: with self._get_locked(mountpoint) as mount:
try: try:
mount.remove_attachment(vol_name, instance.uuid) mount.remove_attachment(vol_name, instance.uuid)
except KeyError: except KeyError:
LOG.warning("Request to remove attachment " LOG.warning("Request to remove attachment (%(vol_name)s from "
"(%(vol_name)s, %(instance)s) from "
"%(mountpoint)s, but we don't think it's in use.", "%(mountpoint)s, but we don't think it's in use.",
{'vol_name': vol_name, 'instance': instance.uuid, {'vol_name': vol_name, 'mountpoint': mountpoint},
'mountpoint': mountpoint}) instance=instance)
if not mount.in_use(): if not mount.in_use():
mounted = os.path.ismount(mountpoint) mounted = os.path.ismount(mountpoint)
@ -368,7 +370,8 @@ class _HostMountState(object):
LOG.debug('_HostMountState.umount() for %(mountpoint)s ' LOG.debug('_HostMountState.umount() for %(mountpoint)s '
'generation %(gen)s completed successfully', 'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation}) {'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
def _real_umount(self, mountpoint): def _real_umount(self, mountpoint):
# Unmount and delete a mountpoint. # Unmount and delete a mountpoint.

View File

@ -39,15 +39,14 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info = self.connector.connect_volume( device_info = self.connector.connect_volume(
connection_info['data']) connection_info['data'])
LOG.debug( LOG.debug("Connecting NVMe volume with device_info %s",
"Connecting NVMe volume with device_info %s", device_info, instance=instance)
device_info)
connection_info['data']['device_path'] = device_info['path'] connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, instance): def disconnect_volume(self, connection_info, instance):
"""Detach the volume from the instance.""" """Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk") LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume( self.connector.disconnect_volume(
connection_info['data'], None) connection_info['data'], None)
super(LibvirtNVMEVolumeDriver, super(LibvirtNVMEVolumeDriver,

View File

@ -158,9 +158,9 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
def connect_volume(self, connection_info, instance): def connect_volume(self, connection_info, instance):
"""Connect the volume.""" """Connect the volume."""
if is_systemd(): if is_systemd():
LOG.debug("systemd detected.") LOG.debug("systemd detected.", instance=instance)
else: else:
LOG.debug("No systemd detected.") LOG.debug("No systemd detected.", instance=instance)
data = connection_info['data'] data = connection_info['data']
quobyte_volume = self._normalize_export(data['export']) quobyte_volume = self._normalize_export(data['export'])
@ -171,7 +171,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
except nova_exception.StaleVolumeMount: except nova_exception.StaleVolumeMount:
mounted = False mounted = False
LOG.info('Fixing previous mount %s which was not ' LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path) 'unmounted correctly.', mount_path, instance=instance)
umount_volume(mount_path) umount_volume(mount_path)
except nova_exception.InvalidVolume: except nova_exception.InvalidVolume:
mounted = False mounted = False
@ -185,7 +185,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
validate_volume(mount_path) validate_volume(mount_path)
except (nova_exception.InvalidVolume, except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex: nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex) LOG.error("Could not mount Quobyte volume: %s", nex,
instance=instance)
@utils.synchronized('connect_qb_volume') @utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance): def disconnect_volume(self, connection_info, instance):
@ -196,7 +197,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
validate_volume(mount_path) validate_volume(mount_path)
except (nova_exception.InvalidVolume, except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc: nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc) LOG.warning("Could not disconnect Quobyte volume mount: %s", exc,
instance=instance)
else: else:
umount_volume(mount_path) umount_volume(mount_path)

View File

@ -53,7 +53,8 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance): def connect_volume(self, connection_info, instance):
device_info = self.connector.connect_volume(connection_info['data']) device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached ScaleIO volume %s.", device_info) LOG.debug("Attached ScaleIO volume %s.", device_info,
instance=instance)
connection_info['data']['device_path'] = device_info['path'] connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, instance): def disconnect_volume(self, connection_info, instance):