Browse Source

libvirt: Ensure all volume drivers log the instance whenever possible

This trivial change ensures the instance is logged within the volume
drivers whenever possible to ease debugging.

Change-Id: Ib61ba7266ad58b311adcac566a96149839cb688e
changes/60/780260/2
Lee Yarwood 1 year ago
parent
commit
fcbba7d2ae
  1. 4
      nova/virt/libvirt/volume/fibrechannel.py
  2. 4
      nova/virt/libvirt/volume/iscsi.py
  3. 25
      nova/virt/libvirt/volume/mount.py
  4. 7
      nova/virt/libvirt/volume/nvme.py
  5. 12
      nova/virt/libvirt/volume/quobyte.py
  6. 3
      nova/virt/libvirt/volume/scaleio.py

4
nova/virt/libvirt/volume/fibrechannel.py

@ -50,9 +50,9 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach FC Volume")
LOG.debug("Calling os-brick to attach FC Volume", instance=instance)
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached FC volume %s", device_info)
LOG.debug("Attached FC volume %s", device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
if 'multipath_id' in device_info:

4
nova/virt/libvirt/volume/iscsi.py

@ -60,9 +60,9 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
LOG.debug("Calling os-brick to attach iSCSI Volume", instance=instance)
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
LOG.debug("Attached iSCSI volume %s", device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']

25
nova/virt/libvirt/volume/mount.py

@ -290,15 +290,17 @@ class _HostMountState(object):
'options=%(options)s) generation %(gen)s',
{'fstype': fstype, 'export': export, 'vol_name': vol_name,
'mountpoint': mountpoint, 'options': options,
'gen': self.generation})
'gen': self.generation}, instance=instance)
with self._get_locked(mountpoint) as mount:
if os.path.ismount(mountpoint):
LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '
'mountpoint already mounted'),
{'mountpoint': mountpoint, 'gen': self.generation})
{'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
else:
LOG.debug('Mounting %(mountpoint)s generation %(gen)s',
{'mountpoint': mountpoint, 'gen': self.generation})
{'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
fileutils.ensure_tree(mountpoint)
@ -316,7 +318,7 @@ class _HostMountState(object):
'%(mountpoint)s. Continuing because mountpount is '
'mounted despite this.',
{'fstype': fstype, 'export': export,
'mountpoint': mountpoint})
'mountpoint': mountpoint}, instance=instance)
else:
# If the mount failed there's no reason for us to keep
# a record of it. It will be created again if the
@ -331,7 +333,8 @@ class _HostMountState(object):
LOG.debug('_HostMountState.mount() for %(mountpoint)s '
'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation})
{'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
def umount(self, vol_name, mountpoint, instance):
"""Mark an attachment as no longer in use, and unmount its mountpoint
@ -345,16 +348,15 @@ class _HostMountState(object):
LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
'mountpoint=%(mountpoint)s) generation %(gen)s',
{'vol_name': vol_name, 'mountpoint': mountpoint,
'gen': self.generation})
'gen': self.generation}, instance=instance)
with self._get_locked(mountpoint) as mount:
try:
mount.remove_attachment(vol_name, instance.uuid)
except KeyError:
LOG.warning("Request to remove attachment "
"(%(vol_name)s, %(instance)s) from "
LOG.warning("Request to remove attachment (%(vol_name)s from "
"%(mountpoint)s, but we don't think it's in use.",
{'vol_name': vol_name, 'instance': instance.uuid,
'mountpoint': mountpoint})
{'vol_name': vol_name, 'mountpoint': mountpoint},
instance=instance)
if not mount.in_use():
mounted = os.path.ismount(mountpoint)
@ -368,7 +370,8 @@ class _HostMountState(object):
LOG.debug('_HostMountState.umount() for %(mountpoint)s '
'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation})
{'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
def _real_umount(self, mountpoint):
# Unmount and delete a mountpoint.

7
nova/virt/libvirt/volume/nvme.py

@ -39,15 +39,14 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
device_info = self.connector.connect_volume(
connection_info['data'])
LOG.debug(
"Connecting NVMe volume with device_info %s",
device_info)
LOG.debug("Connecting NVMe volume with device_info %s",
device_info, instance=instance)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, instance):
"""Detach the volume from the instance."""
LOG.debug("Disconnecting NVMe disk")
LOG.debug("Disconnecting NVMe disk", instance=instance)
self.connector.disconnect_volume(
connection_info['data'], None)
super(LibvirtNVMEVolumeDriver,

12
nova/virt/libvirt/volume/quobyte.py

@ -158,9 +158,9 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
LOG.debug("systemd detected.", instance=instance)
else:
LOG.debug("No systemd detected.")
LOG.debug("No systemd detected.", instance=instance)
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
@ -171,7 +171,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
'unmounted correctly.', mount_path, instance=instance)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
@ -185,7 +185,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
LOG.error("Could not mount Quobyte volume: %s", nex,
instance=instance)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
@ -196,7 +197,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc,
instance=instance)
else:
umount_volume(mount_path)

3
nova/virt/libvirt/volume/scaleio.py

@ -53,7 +53,8 @@ class LibvirtScaleIOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
def connect_volume(self, connection_info, instance):
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached ScaleIO volume %s.", device_info)
LOG.debug("Attached ScaleIO volume %s.", device_info,
instance=instance)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, instance):

Loading…
Cancel
Save