Conform to OpenStack log/exception I18N guidelines

Corrections to conform to guidelines here:
http://docs.openstack.org/developer/oslo.i18n/guidelines.html#

Change-Id: I808c9547115dfa7b85e3a0915c9fcc56771cb577
This commit is contained in:
Eric Fried 2015-05-14 17:45:07 -05:00
parent d9d511120a
commit f8ef71301a
9 changed files with 92 additions and 90 deletions

View File

@ -510,10 +510,14 @@ class TestPowerVMDriver(test.TestCase):
inst = objects.Instance(**powervm.TEST_INSTANCE)
drv._log_operation('fake_op', inst)
entry = ('Operation: fake_op. Virtual machine display '
'name: Fake Instance, name: instance-00000001, '
'UUID: 49629a5c-f4c4-4721-9511-9725786ff2e5')
mock_log.info.assert_called_with(entry)
entry = (r'Operation: %(op)s. Virtual machine display '
'name: %(display_name)s, name: %(name)s, '
'UUID: %(uuid)s')
msg_dict = {'uuid': '49629a5c-f4c4-4721-9511-9725786ff2e5',
'display_name': u'Fake Instance',
'name': 'instance-00000001',
'op': 'fake_op'}
mock_log.info.assert_called_with(entry, msg_dict)
def test_host_resources(self):
# Set the return value of None so we use the cached value in the drv

View File

@ -20,7 +20,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from nova import exception as nova_exc
from nova.i18n import _LI, _LE
from nova.i18n import _, _LI, _LE
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
@ -54,8 +54,8 @@ CONF.register_opts(localdisk_opts)
class VGNotFound(disk.AbstractDiskException):
msg_fmt = _LE('Unable to locate the volume group \'%(vg_name)s\' '
'for this operation.')
msg_fmt = _("Unable to locate the volume group '%(vg_name)s' for this "
"operation.")
class LocalStorage(disk_dvr.DiskAdapter):
@ -67,8 +67,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Query to get the Volume Group UUID
self.vg_name = CONF.volume_group_name
self.vios_uuid, self.vg_uuid = self._get_vg_uuid(self.vg_name)
LOG.info(_LI('Local Storage driver initialized: '
'volume group: \'%s\'') % self.vg_name)
LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"),
self.vg_name)
@property
def capacity(self):
@ -102,8 +102,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
# resides in the scsi map from the volume group.
existing_vds = vg_wrap.virtual_disks
for removal in storage_elems:
LOG.info(_LI('Deleting disk: %s') % removal.name,
instance=instance)
LOG.info(_LI('Deleting disk: %s'), removal.name, instance=instance)
# Can't just call direct on remove, because attribs are off.
# May want to evaluate change in pypowervm for this.
@ -204,7 +203,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
break
if not disk_found:
LOG.error(_LE('Disk %s not found during resize.') % vol_name,
LOG.error(_LE('Disk %s not found during resize.'), vol_name,
instance=instance)
raise nova_exc.DiskNotFound(
location=self.vg_name + '/' + vol_name)
@ -217,7 +216,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Get the disk name based on the instance and type
vol_name = self._get_disk_name(disk_info['type'], instance)
LOG.info(_LI('Extending disk: %s') % vol_name)
LOG.info(_LI('Extending disk: %s'), vol_name)
try:
_extend()
except pvm_exc.Error:
@ -252,7 +251,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
child_type=pvm_stg.VG.schema_type)
vol_grps = pvm_stg.VG.wrap(resp)
for vol_grp in vol_grps:
LOG.debug('Volume group: %s' % vol_grp.name)
LOG.debug('Volume group: %s', vol_grp.name)
if name == vol_grp.name:
return vios_wrap.uuid, vol_grp.uuid

View File

@ -20,7 +20,7 @@ from oslo_config import cfg
import oslo_log.log as logging
from nova import image
from nova.i18n import _LI, _LE
from nova.i18n import _, _LI, _LE
import nova_powervm.virt.powervm.disk as disk
from nova_powervm.virt.powervm.disk import driver as disk_drv
from nova_powervm.virt.powervm import vm
@ -46,22 +46,22 @@ CONF.register_opts(ssp_opts)
class ClusterNotFoundByName(disk.AbstractDiskException):
msg_fmt = _LE("Unable to locate the Cluster '%(clust_name)s' for this "
"operation.")
msg_fmt = _("Unable to locate the Cluster '%(clust_name)s' for this "
"operation.")
class NoConfigNoClusterFound(disk.AbstractDiskException):
msg_fmt = _LE('Unable to locate any Cluster for this operation.')
msg_fmt = _('Unable to locate any Cluster for this operation.')
class TooManyClustersFound(disk.AbstractDiskException):
msg_fmt = _LE("Unexpectedly found %(clust_count)d Clusters "
"matching name '%(clust_name)s'.")
msg_fmt = _("Unexpectedly found %(clust_count)d Clusters "
"matching name '%(clust_name)s'.")
class NoConfigTooManyClusters(disk.AbstractDiskException):
msg_fmt = _LE("No cluster_name specified. Refusing to select one of the "
"%(clust_count)d Clusters found.")
msg_fmt = _("No cluster_name specified. Refusing to select one of the "
"%(clust_count)d Clusters found.")
class SSPDiskAdapter(disk_drv.DiskAdapter):
@ -92,8 +92,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self.image_api = image.API()
LOG.info(_LI("SSP Storage driver initialized. "
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'")
% {'clust_name': self.clust_name, 'ssp_name': self.ssp_name})
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'"),
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name})
@property
def capacity(self):
@ -168,7 +168,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:returns: The backing pypowervm LU storage object that was created.
"""
LOG.info(_LI('SSP: Create %(image_type)s disk from image %(image_id)s '
'for instance %(instance_uuid)s.') %
'for instance %(instance_uuid)s.'),
dict(image_type=image_type, image_id=img_meta['id'],
instance_uuid=instance.uuid))
@ -204,14 +204,14 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
ssp = self._ssp
for lu in ssp.logical_units:
if lu.lu_type == pvm_stg.LUType.IMAGE and lu.name == luname:
LOG.info(_LI('SSP: Using already-uploaded image LU %s.') %
LOG.info(_LI('SSP: Using already-uploaded image LU %s.'),
luname)
return lu
# We don't have it yet. Create it and upload the glance image to it.
# Make the image LU only as big as the image.
stream = self._get_image_upload(context, img_meta)
LOG.info(_LI('SSP: Uploading new image LU %s.') % luname)
LOG.info(_LI('SSP: Uploading new image LU %s.'), luname)
lu, f_wrap = tsk_stg.upload_new_lu(self._any_vios_uuid(), ssp, stream,
luname, img_meta['size'])
return lu

View File

@ -116,14 +116,14 @@ class PowerVMDriver(driver.ComputeDriver):
_("Expected exactly one host; found %d"), len(syswraps))
self.host_wrapper = syswraps[0]
self.host_uuid = self.host_wrapper.uuid
LOG.info(_LI("Host UUID is:%s") % self.host_uuid)
LOG.info(_LI("Host UUID is:%s"), self.host_uuid)
@staticmethod
def _log_operation(op, instance):
"""Log entry point of driver operations
"""
LOG.info(_LI('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s, UUID: %(uuid)s') %
'%(display_name)s, name: %(name)s, UUID: %(uuid)s'),
{'op': op, 'display_name': instance.display_name,
'name': instance.name, 'uuid': instance.uuid})
@ -560,8 +560,8 @@ class PowerVMDriver(driver.ComputeDriver):
# If the crt_cna flag is true, then actually kick off the create
if crt_cna:
LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
'%(inst)s') % {'mac': vif['address'],
'inst': instance.name},
'%(inst)s'), {'mac': vif['address'],
'inst': instance.name},
instance=instance)
vm.crt_vif(self.adapter, instance, self.host_uuid, vif)
@ -693,7 +693,7 @@ class PowerVMDriver(driver.ComputeDriver):
@pvm_retry.retry(delay_func=_delay)
def _update_vm():
LOG.debug('Resizing instance %s.' % instance.name,
LOG.debug('Resizing instance %s.', instance.name,
instance=instance)
entry = vm.get_instance_wrapper(self.adapter, instance,
self.host_uuid)

View File

@ -16,7 +16,7 @@
import abc
from nova.api.metadata import base as instance_metadata
from nova.i18n import _LE, _LI, _LW
from nova.i18n import _, _LI, _LW
from nova.virt import configdrive
import os
@ -48,9 +48,9 @@ class AbstractMediaException(Exception):
class NoMediaRepoVolumeGroupFound(AbstractMediaException):
msg_fmt = _LE('Unable to locate the volume group %(vol_grp)s to store the '
'virtual optical media within. Unable to create the '
'media repository.')
msg_fmt = _('Unable to locate the volume group %(vol_grp)s to store the '
'virtual optical media within. Unable to create the '
'media repository.')
class ConfigDrivePowerVM(object):
@ -89,7 +89,7 @@ class ConfigDrivePowerVM(object):
:return iso_path: The path to the ISO
:return file_name: The file name for the ISO
"""
LOG.info(_LI("Creating config drive for instance: %s") % instance.name)
LOG.info(_LI("Creating config drive for instance: %s"), instance.name)
extra_md = {}
if admin_pass is not None:
extra_md['admin_pass'] = admin_pass
@ -108,7 +108,7 @@ class ConfigDrivePowerVM(object):
iso_path = os.path.join(CONF.image_meta_local_path, file_name)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
LOG.info(_LI("Config drive ISO being built for instance %(inst)s "
"building to path %(iso_path)s.") %
"building to path %(iso_path)s."),
{'inst': instance.name, 'iso_path': iso_path})
cdb.make_drive(iso_path)
return iso_path, file_name
@ -217,7 +217,7 @@ class ConfigDrivePowerVM(object):
break
except Exception:
LOG.warn(_LW('Unable to read volume groups for Virtual '
'I/O Server %s') % vio_wrap.name)
'I/O Server %s'), vio_wrap.name)
pass
# If we didn't find a volume group...raise the exception. It should

View File

@ -54,7 +54,7 @@ class ConnectVolume(task.Task):
requires=['lpar_wrap'])
def execute(self, lpar_wrap):
LOG.info(_LI('Connecting volume %(vol)s to instance %(inst)s') %
LOG.info(_LI('Connecting volume %(vol)s to instance %(inst)s'),
{'vol': self.vol_id, 'inst': self.instance.name})
return self.vol_drv.connect_volume(self.adapter, self.host_uuid,
lpar_wrap.uuid, self.instance,
@ -68,7 +68,7 @@ class ConnectVolume(task.Task):
return
LOG.warn(_LW('Volume %(vol)s for instance %(inst)s to be '
'disconnected') %
'disconnected'),
{'vol': self.vol_id, 'inst': self.instance.name})
return self.vol_drv.disconnect_volume(self.adapter, self.host_uuid,
@ -106,7 +106,7 @@ class DisconnectVolume(task.Task):
self.vol_id)
def execute(self):
LOG.info(_LI('Disconnecting volume %(vol)s from instance %(inst)s') %
LOG.info(_LI('Disconnecting volume %(vol)s from instance %(inst)s'),
{'vol': self.vol_id, 'inst': self.instance.name})
return self.vol_drv.disconnect_volume(self.adapter, self.host_uuid,
self.vm_uuid, self.instance,
@ -120,7 +120,7 @@ class DisconnectVolume(task.Task):
return
LOG.warn(_LW('Volume %(vol)s for instance %(inst)s to be '
're-connected') %
're-connected'),
{'vol': self.vol_id, 'inst': self.instance.name})
return self.vol_drv.connect_volume(self.adapter, self.host_uuid,
self.vm_uuid, self.instance,
@ -155,7 +155,7 @@ class CreateDiskForImg(task.Task):
self.image_type = image_type
def execute(self):
LOG.info(_LI('Creating disk for instance: %s') % self.instance.name)
LOG.info(_LI('Creating disk for instance: %s'), self.instance.name)
return self.disk_dvr.create_disk_from_image(
self.context, self.instance, self.image_meta, self.disk_size,
image_type=self.image_type)
@ -163,7 +163,7 @@ class CreateDiskForImg(task.Task):
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warn(_LW('Image for instance %s to be deleted') %
LOG.warn(_LW('Image for instance %s to be deleted'),
self.instance.name)
if result is None or isinstance(result, task_fail.Failure):
# No result means no disk to clean up.
@ -197,12 +197,12 @@ class ConnectDisk(task.Task):
self.instance = instance
def execute(self, lpar_wrap, disk_dev_info):
LOG.info(_LI('Connecting disk to instance: %s') % self.instance.name)
LOG.info(_LI('Connecting disk to instance: %s'), self.instance.name)
self.disk_dvr.connect_disk(self.context, self.instance, disk_dev_info,
lpar_wrap.uuid)
def revert(self, lpar_wrap, disk_dev_info, result, flow_failures):
LOG.warn(_LW('Disk image being disconnected from instance %s') %
LOG.warn(_LW('Disk image being disconnected from instance %s'),
self.instance.name)
self.disk_dvr.disconnect_image_disk(self.context, self.instance,
lpar_wrap.uuid)
@ -238,7 +238,7 @@ class CreateAndConnectCfgDrive(task.Task):
self.mb = None
def execute(self, lpar_wrap):
LOG.info(_LI('Creating Config Drive for instance: %s') %
LOG.info(_LI('Creating Config Drive for instance: %s'),
self.instance.name)
self.mb = media.ConfigDrivePowerVM(self.adapter, self.host_uuid)
self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
@ -275,8 +275,8 @@ class DeleteVOpt(task.Task):
self.lpar_uuid = lpar_uuid
def execute(self):
LOG.info(_LI('Deleting Virtual Optical Media for instance %s')
% self.instance.name)
LOG.info(_LI('Deleting Virtual Optical Media for instance %s'),
self.instance.name)
media_builder = media.ConfigDrivePowerVM(self.adapter, self.host_uuid)
media_builder.dlt_vopt(self.lpar_uuid)
@ -306,8 +306,8 @@ class DetachDisk(task.Task):
self.disk_type = disk_type
def execute(self):
LOG.info(_LI('Detaching disk storage adapters for instance %s')
% self.instance.name)
LOG.info(_LI('Detaching disk storage adapters for instance %s'),
self.instance.name)
return self.disk_dvr.disconnect_image_disk(self.context, self.instance,
self.lpar_uuid,
disk_type=self.disk_type)
@ -332,7 +332,7 @@ class DeleteDisk(task.Task):
self.instance = instance
def execute(self, stor_adpt_mappings):
LOG.info(_LI('Deleting storage disk for instance %s.') %
LOG.info(_LI('Deleting storage disk for instance %s.'),
self.instance.name)
self.disk_dvr.delete_disks(self.context, self.instance,
stor_adpt_mappings)

View File

@ -70,7 +70,7 @@ class Create(task.Task):
self.flavor = flavor
def execute(self):
LOG.info(_LI('Creating instance: %s') % self.instance.name)
LOG.info(_LI('Creating instance: %s'), self.instance.name)
wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
self.flavor)
return wrap
@ -78,25 +78,25 @@ class Create(task.Task):
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warn(_LW('Instance %s to be undefined off host') %
LOG.warn(_LW('Instance %s to be undefined off host'),
self.instance.name)
if isinstance(result, task_fail.Failure):
# No response, nothing to do
LOG.info(_LI('Create failed. No delete of LPAR needed for '
'instance %s') % self.instance.name)
'instance %s'), self.instance.name)
return
if result is None:
# No response, nothing to do
LOG.info(_LI('Instance %s not found on host. No update needed.') %
LOG.info(_LI('Instance %s not found on host. No update needed.'),
self.instance.name)
return
# The result is a lpar wrapper.
lpar = result
vm.dlt_lpar(self.adapter, lpar.uuid)
LOG.info(_LI('Instance %s removed from system') % self.instance.name)
LOG.info(_LI('Instance %s removed from system'), self.instance.name)
class PowerOn(task.Task):
@ -120,11 +120,11 @@ class PowerOn(task.Task):
self.pwr_opts = pwr_opts
def execute(self, lpar_wrap):
LOG.info(_LI('Powering on instance: %s') % self.instance.name)
LOG.info(_LI('Powering on instance: %s'), self.instance.name)
power.power_on(lpar_wrap, self.host_uuid, add_parms=self.pwr_opts)
def revert(self, lpar_wrap, result, flow_failures):
LOG.info(_LI('Powering off instance: %s') % self.instance.name)
LOG.info(_LI('Powering off instance: %s'), self.instance.name)
if isinstance(result, task_fail.Failure):
# The power on itself failed...can't power off.
@ -152,8 +152,7 @@ class PowerOff(task.Task):
self.instance = instance
def execute(self):
LOG.info(_LI('Powering off instance %s.')
% self.instance.name)
LOG.info(_LI('Powering off instance %s.'), self.instance.name)
vm.power_off(self.adapter, self.instance, self.host_uuid,
add_parms=dict(immediate='true'))
@ -174,7 +173,7 @@ class Delete(task.Task):
self.instance = instance
def execute(self):
LOG.info(_LI('Deleting instance %s from system.') % self.instance.name)
LOG.info(_LI('Deleting instance %s from system.'), self.instance.name)
vm.dlt_lpar(self.adapter, self.lpar_uuid)
# Delete the lpar from the cache so if it gets rebuilt it won't
# have the old lpar uuid.

View File

@ -85,7 +85,7 @@ class NPIVVolumeAdapter(v_driver.PowerVMVolumeAdapter):
# This method should no-op if the mappings are already attached to
# the instance...so it won't duplicate the settings every time an
# attach volume is called.
LOG.info(_LI("Adding NPIV mapping for instance %s") % instance.name)
LOG.info(_LI("Adding NPIV mapping for instance %s"), instance.name)
pvm_wwpn.add_npiv_port_mappings(adapter, host_uuid, vm_uuid,
npiv_port_mappings)
@ -134,7 +134,7 @@ class NPIVVolumeAdapter(v_driver.PowerVMVolumeAdapter):
# Now that we've collapsed all of the varying fabrics' port mappings
# into one list, we can call down into pypowervm to remove them in one
# action.
LOG.info(_LI("Removing NPIV mapping for instance %s") % instance.name)
LOG.info(_LI("Removing NPIV mapping for instance %s"), instance.name)
pvm_wwpn.remove_npiv_port_mappings(adapter, host_uuid,
npiv_port_mappings)

View File

@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from nova.i18n import _LI, _LW, _LE
from nova.i18n import _, _LI, _LW, _LE
from oslo_config import cfg
from oslo_log import log as logging
@ -82,6 +82,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
volume_id = connection_info['data']['volume_id']
lun = connection_info['data']['target_lun']
hdisk_found = False
device_name = None
i_wwpns = it_map.keys()
t_wwpns = []
@ -105,7 +106,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
hdisk.LUAStatus.DEVICE_AVAILABLE,
hdisk.LUAStatus.FOUND_ITL_ERR]:
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
'volume %(volume_id)s. Status code: %(status)s.') %
'volume %(volume_id)s. Status code: %(status)s.'),
{'hdisk': device_name, 'vios': vio_wrap.name,
'volume_id': volume_id, 'status': str(status)})
self._add_mapping(adapter, host_uuid, vm_uuid, vio_wrap.uuid,
@ -121,15 +122,15 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
'vios': vio_wrap.name, 'status': str(status)})
# A valid hdisk was not found so log and exit
if not hdisk_found:
msg = (_LE('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
msg = (_('Failed to discover valid hdisk on any Virtual I/O '
'Server for volume %(volume_id)s.') %
{'volume_id': volume_id})
LOG.error(msg)
if device_name is None:
device_name = 'None'
ex_args = {'backing_dev': device_name,
'instance_name': instance.name,
'reason': six.text_type(msg)}
'reason': msg}
raise pexc.VolumeAttachFailed(**ex_args)
def disconnect_volume(self, adapter, host_uuid, vm_uuid, instance,
@ -170,7 +171,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Iterate through host vios list to find hdisks to disconnect.
for vio_wrap in vios_feed:
LOG.debug("vios uuid %s" % vio_wrap.uuid)
LOG.debug("vios uuid %s", vio_wrap.uuid)
try:
volume_udid = self._get_udid(instance, vio_wrap.uuid,
volume_id)
@ -180,28 +181,28 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
LOG.info(_LI(u"Disconnect Volume: No mapped device "
"found on vios %(vios)s for volume "
"%(volume_id)s. volume_uid: "
"%(volume_uid)s ")
% {'volume_uid': volume_udid,
'volume_id': volume_id,
'vios': vio_wrap.name})
"%(volume_uid)s "),
{'volume_uid': volume_udid,
'volume_id': volume_id,
'vios': vio_wrap.name})
continue
except Exception as e:
LOG.error(_LE(u"Disconnect Volume: Failed to find disk "
"on vios %(vios_name)s for volume "
"%(volume_id)s. volume_uid: %(volume_uid)s."
"Error: %(error)s")
% {'error': e, 'volume_uid': volume_udid,
'volume_id': volume_id,
'vios_name': vio_wrap.name})
"Error: %(error)s"),
{'error': e, 'volume_uid': volume_udid,
'volume_id': volume_id,
'vios_name': vio_wrap.name})
continue
# We have found the device name
LOG.info(_LI(u"Disconnect Volume: Discovered the device "
"%(hdisk)s on vios %(vios_name)s for volume "
"%(volume_id)s. volume_uid: %(volume_uid)s.")
% {'volume_uid': volume_udid, 'volume_id': volume_id,
'vios_name': vio_wrap.name, 'hdisk': device_name})
"%(volume_id)s. volume_uid: %(volume_uid)s."),
{'volume_uid': volume_udid, 'volume_id': volume_id,
'vios_name': vio_wrap.name, 'hdisk': device_name})
partition_id = vm.get_vm_id(adapter, vm_uuid)
tsk_map.remove_pv_mapping(adapter, vio_wrap.uuid,
partition_id, device_name)
@ -212,19 +213,18 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
vio_wrap.uuid)
except Exception as e:
# If there is a failure, log it, but don't stop the process
msg = (_LW("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server.") %
{'disk': device_name})
LOG.warn(msg)
LOG.warn(_LW("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server."),
{'disk': device_name})
LOG.warn(e)
# Disconnect volume complete, now remove key
self._delete_udid_key(instance, vio_wrap.uuid, volume_id)
except Exception as e:
LOG.error(_LE('Cannot detach volumes from virtual machine: %s') %
LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
vm_uuid)
LOG.exception(_LE(u'Error: %s') % e)
LOG.exception(_LE(u'Error: %s'), e)
ex_args = {'backing_dev': device_name,
'instance_name': instance.name,
'reason': six.text_type(e)}
@ -280,7 +280,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
udid_key = self._build_udid_key(vios_uuid, volume_id)
return instance.system_metadata[udid_key]
except (KeyError, ValueError) as e:
LOG.exception(_LE(u'Failed to retrieve deviceid key: %s') % e)
LOG.exception(_LE(u'Failed to retrieve deviceid key: %s'), e)
return None
def _set_udid(self, instance, vios_uuid, volume_id, udid):
@ -308,7 +308,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
udid_key = self._build_udid_key(vios_uuid, volume_id)
instance.system_metadata.pop(udid_key)
except Exception as e:
LOG.exception(_LE(u'Failed to delete deviceid key: %s') % e)
LOG.exception(_LE(u'Failed to delete deviceid key: %s'), e)
def _build_udid_key(self, vios_uuid, volume_id):
"""This method will build the udid dictionary key.