Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I6af522f01a2e8c2d071bc67d7e88b771a45e2ff6
This commit is contained in:
libing 2017-06-22 16:35:18 +08:00 committed by Eric Fried
parent a8c4202fce
commit b8d58e598d
22 changed files with 240 additions and 304 deletions

View File

@ -31,7 +31,6 @@ import pypowervm.wrappers.virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import mgmt
from nova_powervm.virt.powervm import vm
@ -229,10 +228,10 @@ class DiskAdapter(object):
return stg_elem, vios
except Exception as e:
msg_args['exc'] = e
LOG.warning(_LW("Failed to map boot disk %(disk_name)s of "
"instance %(instance_name)s to the management "
"partition from Virtual I/O Server "
"%(vios_name)s: %(exc)s"), msg_args)
LOG.warning("Failed to map boot disk %(disk_name)s of "
"instance %(instance_name)s to the management "
"partition from Virtual I/O Server "
"%(vios_name)s: %(exc)s", msg_args)
# Try the next hit, if available.
# We either didn't find the boot dev, or failed all attempts to map it.
raise npvmex.InstanceDiskMappingFailed(**msg_args)

View File

@ -17,7 +17,6 @@
from nova.virt import imagecache
from nova_powervm.virt.powervm.disk import driver
from nova_powervm.virt.powervm.i18n import _LI
from oslo_log import log as logging
from pypowervm.tasks import storage as tsk_stg
@ -80,7 +79,7 @@ class ImageManager(imagecache.ImageCacheManager):
# Remove unused
if unused:
for image in unused:
LOG.info(_LI("Removing unused cache image: '%s'"), image.name)
LOG.info("Removing unused cache image: '%s'", image.name)
tsk_stg.rm_vg_storage(base_dir, vdisks=unused)
def update(self, context, all_instances):

View File

@ -35,8 +35,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
from nova_powervm.virt.powervm.disk import imagecache
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vm
@ -67,7 +65,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
self.image_cache_mgr = imagecache.ImageManager(self._vios_uuid,
self.vg_uuid, adapter)
self.cache_lock = lockutils.ReaderWriterLock()
LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"),
LOG.info("Local Storage driver initialized: volume group: '%s'",
self.vg_name)
@property
@ -157,8 +155,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Make sure the remove function will run within the transaction manager
def rm_func(vios_w):
LOG.info(_LI("Disconnecting instance %(inst)s from storage "
"disks."), {'inst': instance.name})
LOG.info("Disconnecting instance %(inst)s from storage "
"disks.", {'inst': instance.name})
return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func)
@ -186,11 +184,10 @@ class LocalStorage(disk_dvr.DiskAdapter):
"""
tsk_map.remove_vdisk_mapping(self.adapter, vios_uuid, self.mp_uuid,
disk_names=[disk_name])
LOG.info(_LI(
"Unmapped boot disk %(disk_name)s from the management partition "
"from Virtual I/O Server %(vios_name)s."), {
'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_name': vios_uuid})
LOG.info("Unmapped boot disk %(disk_name)s from the management "
"partition from Virtual I/O Server %(vios_name)s.",
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_name': vios_uuid})
def _create_disk_from_image(self, context, instance, image_meta,
image_type=disk_dvr.DiskType.BOOT):
@ -205,7 +202,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
:param image_type: the image type. See disk constants above.
:return: The backing pypowervm storage object that was created.
"""
LOG.info(_LI('Create disk.'), instance=instance)
LOG.info('Create disk.', instance=instance)
# Disk size to API is in bytes. Input from flavor is in Gb
disk_bytes = self._disk_gb_to_bytes(instance.flavor.root_gb,
@ -272,8 +269,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
def add_func(vios_w):
LOG.info(_LI("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s."),
LOG.info("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, disk_info)
@ -316,7 +313,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
break
if not disk_found:
LOG.error(_LE('Disk %s not found during resize.'), vol_name,
LOG.error('Disk %s not found during resize.', vol_name,
instance=instance)
raise nova_exc.DiskNotFound(
location=self.vg_name + '/' + vol_name)
@ -330,7 +327,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Get the disk name based on the instance and type
vol_name = self._get_disk_name(disk_info['type'], instance, short=True)
LOG.info(_LI('Extending disk: %s'), vol_name)
LOG.info('Extending disk: %s', vol_name)
try:
_extend()
except pvm_exc.Error:

View File

@ -21,8 +21,6 @@ from nova_powervm import conf as cfg
from nova_powervm.virt.powervm.disk import driver as disk_drv
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vm
from nova import image
@ -71,9 +69,9 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self.ssp_name = self._ssp.name
self.tier_name = self._tier.name
LOG.info(_LI("SSP Storage driver initialized. "
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; "
"Tier '%(tier_name)s"),
LOG.info("SSP Storage driver initialized. "
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; "
"Tier '%(tier_name)s",
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name,
'tier_name': self.tier_name})
@ -142,8 +140,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Delay run function to remove the mapping between the VM and the LU
def rm_func(vios_w):
LOG.info(_LI("Removing SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s."),
LOG.info("Removing SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func)
@ -186,11 +184,10 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
"""
tsk_map.remove_lu_mapping(self.adapter, vios_uuid, self.mp_uuid,
disk_names=[disk_name])
LOG.info(_LI(
"Unmapped boot disk %(disk_name)s from the management partition "
"from Virtual I/O Server %(vios_uuid)s."), {
'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_uuid': vios_uuid})
LOG.info("Unmapped boot disk %(disk_name)s from the management "
"partition from Virtual I/O Server %(vios_uuid)s.",
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_uuid': vios_uuid})
def delete_disks(self, storage_elems):
"""Removes the disks specified by the mappings.
@ -216,8 +213,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:param image_type: The image type. See disk_drv.DiskType.
:return: The backing pypowervm LU storage object that was created.
"""
LOG.info(_LI('SSP: Create %(image_type)s disk from image %(image_id)s '
'for instance %(instance_uuid)s.'),
LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s '
'for instance %(instance_uuid)s.',
dict(image_type=image_type, image_id=image_meta.id,
instance_uuid=instance.uuid))
@ -228,7 +225,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM)
boot_lu_name = self._get_disk_name(image_type, instance)
LOG.info(_LI('SSP: Disk name is %s'), boot_lu_name)
LOG.info('SSP: Disk name is %s', boot_lu_name)
return tsk_stg.crt_lu(
self._tier, boot_lu_name, instance.flavor.root_gb,
@ -265,8 +262,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# This is the delay apply mapping
def add_func(vios_w):
LOG.info(_LI("Adding SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s."),
LOG.info("Adding SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, lu)
@ -311,9 +308,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
ssp_uuid = data.get('ssp_uuid')
if ssp_uuid is not None:
return ssp_uuid == self._cluster.ssp_uuid
except Exception as e:
LOG.exception(_LE(u'Error checking for shared storage. '
'exception=%s'), e)
except Exception:
LOG.exception('Error checking for shared storage.')
return False
def check_instance_shared_storage_cleanup(self, context, data):

View File

@ -52,9 +52,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
from nova_powervm.virt.powervm import event
from nova_powervm.virt.powervm import host as pvm_host
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import image as img
from nova_powervm.virt.powervm import live_migration as lpm
from nova_powervm.virt.powervm import media
@ -123,7 +120,7 @@ class PowerVMDriver(driver.ComputeDriver):
pvm_par.validate_vios_ready(self.adapter)
# Do a scrub of the I/O plane to make sure the system is in good shape
LOG.info(_LI("Clearing stale I/O connections on driver init."))
LOG.info("Clearing stale I/O connections on driver init.")
pvm_stor.ComprehensiveScrub(self.adapter).execute()
# Initialize the disk adapter. Sets self.disk_dvr
@ -144,7 +141,7 @@ class PowerVMDriver(driver.ComputeDriver):
# Clean-up any orphan adapters
self._cleanup_orphan_adapters(CONF.powervm.pvm_vswitch_for_novalink_io)
LOG.info(_LI("The compute driver has been initialized."))
LOG.info("The compute driver has been initialized.")
def cleanup_host(self, host):
"""Clean up anything that is necessary for the driver gracefully stop.
@ -157,7 +154,7 @@ class PowerVMDriver(driver.ComputeDriver):
except Exception:
pass
LOG.info(_LI("The compute driver has been shutdown."))
LOG.info("The compute driver has been shutdown.")
def _get_adapter(self):
# Build the adapter. May need to attempt the connection multiple times
@ -214,13 +211,13 @@ class PowerVMDriver(driver.ComputeDriver):
_("Expected exactly one host; found %d"), len(syswraps))
self.host_wrapper = syswraps[0]
self.host_uuid = self.host_wrapper.uuid
LOG.info(_LI("Host UUID is:%s"), self.host_uuid)
LOG.info("Host UUID is:%s", self.host_uuid)
@staticmethod
def _log_operation(op, instance):
"""Log entry point of driver operations."""
LOG.info(_LI('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s'),
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name},
instance=instance)
@ -625,9 +622,9 @@ class PowerVMDriver(driver.ComputeDriver):
def _rm_vscsi_maps(vwrap):
removals = pvm_smap.remove_maps(vwrap, pvm_inst_uuid)
if removals:
LOG.warning(_LW("Removing %(num_maps)d storage-less VSCSI "
"mappings associated with LPAR ID "
"%(lpar_uuid)s from VIOS %(vios_name)s."),
LOG.warning("Removing %(num_maps)d storage-less VSCSI "
"mappings associated with LPAR ID "
"%(lpar_uuid)s from VIOS %(vios_name)s.",
{'num_maps': len(removals),
'lpar_uuid': pvm_inst_uuid,
'vios_name': vwrap.name})
@ -665,7 +662,7 @@ class PowerVMDriver(driver.ComputeDriver):
pvm_inst_uuid = vm.get_pvm_uuid(instance)
_setup_flow_and_run()
except exception.InstanceNotFound:
LOG.warning(_LW('VM was not found during destroy operation.'),
LOG.warning('VM was not found during destroy operation.',
instance=instance)
return
except Exception as e:
@ -693,7 +690,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param migrate_data: a LiveMigrateData object
"""
if instance.task_state == task_states.RESIZE_REVERTING:
LOG.info(_LI('Destroy called for migrated/resized instance.'),
LOG.info('Destroy called for migrated/resized instance.',
instance=instance)
# This destroy is part of resize or migrate. It's called to
# revert the resize/migration on the destination host.
@ -706,7 +703,7 @@ class PowerVMDriver(driver.ComputeDriver):
qprop='PartitionName', log_errors=False)
if vm_name == self._gen_resize_name(instance, same_host=True):
# Since it matches it must have been a resize, don't delete it!
LOG.info(_LI('Ignoring destroy call during resize revert.'),
LOG.info('Ignoring destroy call during resize revert.',
instance=instance)
return
@ -763,8 +760,8 @@ class PowerVMDriver(driver.ComputeDriver):
# host. If the migration failed, then the VM is probably not on
# the destination host.
if not vm.instance_exists(self.adapter, instance):
LOG.info(_LI('During volume detach, the instance was not found'
' on this host.'), instance=instance)
LOG.info('During volume detach, the instance was not found on '
'this host.', instance=instance)
# Check if there is live migration cleanup to do on this volume.
mig = self.live_migrations.get(instance.uuid, None)
@ -1060,8 +1057,8 @@ class PowerVMDriver(driver.ComputeDriver):
# This code was pulled from the libvirt driver.
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
LOG.warning('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s',
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
@ -1392,8 +1389,8 @@ class PowerVMDriver(driver.ComputeDriver):
:returns: a dict containing migration info (hypervisor-dependent)
"""
LOG.info(_LI("Checking live migration capability on destination "
"host."), instance=instance)
LOG.info("Checking live migration capability on destination host.",
instance=instance)
mig = lpm.LiveMigrationDest(self, instance)
self.live_migrations[instance.uuid] = mig
@ -1407,8 +1404,8 @@ class PowerVMDriver(driver.ComputeDriver):
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
LOG.info(_LI("Cleaning up from checking live migration capability "
"on destination."))
LOG.info("Cleaning up from checking live migration capability "
"on destination.")
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
@ -1423,7 +1420,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info (hypervisor-dependent)
"""
LOG.info(_LI("Checking live migration capability on source host."),
LOG.info("Checking live migration capability on source host.",
instance=instance)
mig = lpm.LiveMigrationSrc(self, instance, dest_check_data)
self.live_migrations[instance.uuid] = mig
@ -1445,8 +1442,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param disk_info: instance disk information
:param migrate_data: a LiveMigrateData object
"""
LOG.info(_LI("Pre live migration processing."),
instance=instance)
LOG.info("Pre live migration processing.", instance=instance)
mig = self.live_migrations[instance.uuid]
# Get a volume driver for each volume
@ -1484,7 +1480,7 @@ class PowerVMDriver(driver.ComputeDriver):
mig.live_migration(context, migrate_data)
except pvm_exc.JobRequestTimedOut as timeout_ex:
# If the migration operation exceeds configured timeout
LOG.error(_LE("Live migration timed out. Aborting migration"),
LOG.error("Live migration timed out. Aborting migration",
instance=instance)
mig.migration_abort()
self._migration_exception_util(context, instance, dest,
@ -1523,7 +1519,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param ex: exception reason
"""
LOG.warning(_LW("Rolling back live migration."), instance=instance)
LOG.warning("Rolling back live migration.", instance=instance)
try:
mig.rollback_live_migration(context)
recover_method(context, instance, dest, migrate_data=migrate_data)
@ -1615,7 +1611,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param instance: instance object reference
:param network_info: instance network information
"""
LOG.info(_LI("Post live migration processing on source host."),
LOG.info("Post live migration processing on source host.",
instance=instance)
mig = self.live_migrations[instance.uuid]
mig.post_live_migration_at_source(network_info)
@ -1631,7 +1627,7 @@ class PowerVMDriver(driver.ComputeDriver):
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
LOG.info(_LI("Post live migration processing on destination host."),
LOG.info("Post live migration processing on destination host.",
instance=instance)
mig = self.live_migrations[instance.uuid]
mig.instance = instance

View File

@ -23,7 +23,6 @@ from pypowervm.tasks.monitor import util as pcm_util
import subprocess
from nova import conf as cfg
from nova_powervm.virt.powervm.i18n import _LW
LOG = logging.getLogger(__name__)
@ -204,13 +203,13 @@ class HostCPUStats(pcm_util.MetricCache):
# Should not happen, but just in case there is any precision loss from
# CPU data back to system.
if user_cycles_delta + fw_cycles_delta > tot_cycles_delta:
LOG.warning(_LW(
LOG.warning(
"Host CPU Metrics determined that the total cycles reported "
"was less than the used cycles. This indicates an issue with "
"the PCM data. Please investigate the results.\n"
"Total Delta Cycles: %(tot_cycles)d\n"
"User Delta Cycles: %(user_cycles)d\n"
"Firmware Delta Cycles: %(fw_cycles)d"),
"Firmware Delta Cycles: %(fw_cycles)d",
{'tot_cycles': tot_cycles_delta, 'fw_cycles': fw_cycles_delta,
'user_cycles': user_cycles_delta})
tot_cycles_delta = user_cycles_delta + fw_cycles_delta

View File

@ -19,7 +19,3 @@ import oslo_i18n
# Initialize message translators and short cut methods
_translators = oslo_i18n.TranslatorFactory(domain='nova-powervm')
_ = _translators.primary
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical

View File

@ -31,8 +31,6 @@ from pypowervm import util
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm
@ -163,7 +161,7 @@ class LiveMigrationDest(LiveMigration):
# For each volume, make sure it's ready to migrate
for vol_drv in vol_drvs:
LOG.info(_LI('Performing pre migration for volume %(volume)s'),
LOG.info('Performing pre migration for volume %(volume)s',
dict(volume=vol_drv.volume_id), instance=self.instance)
try:
vol_drv.pre_live_migration_on_destination(
@ -201,7 +199,7 @@ class LiveMigrationDest(LiveMigration):
# For each volume, make sure it completes the migration
for vol_drv in vol_drvs:
LOG.info(_LI('Performing post migration for volume %(volume)s'),
LOG.info('Performing post migration for volume %(volume)s',
dict(volume=vol_drv.volume_id), instance=self.instance)
try:
vol_drv.post_live_migration_at_destination(mig_vol_stor)
@ -238,7 +236,7 @@ class LiveMigrationDest(LiveMigration):
:param vol_drv: volume driver for the attached volume
"""
LOG.info(_LI('Performing detach for volume %(volume)s'),
LOG.info('Performing detach for volume %(volume)s',
dict(volume=vol_drv.volume_id), instance=self.instance)
# Ensure the volume data is present before trying cleanup
if hasattr(self, 'pre_live_vol_data'):
@ -402,7 +400,7 @@ class LiveMigrationSrc(LiveMigration):
"""
# For each volume, make sure the source is cleaned
for vol_drv in vol_drvs:
LOG.info(_LI('Performing post migration for volume %(volume)s'),
LOG.info('Performing post migration for volume %(volume)s',
dict(volume=vol_drv.volume_id), instance=self.instance)
try:
vol_drv.post_live_migration_at_source(migrate_data.vol_data)
@ -442,7 +440,7 @@ class LiveMigrationSrc(LiveMigration):
self.migration_recover()
except Exception as ex:
LOG.error(_LE("Migration recover failed with error: %s"), ex,
LOG.error("Migration recover failed with error: %s", ex,
instance=self.instance)
finally:
LOG.debug("Finished migration rollback.", instance=self.instance)

View File

@ -35,7 +35,6 @@ from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
@ -98,7 +97,7 @@ class ConfigDrivePowerVM(object):
:return iso_path: The path to the ISO
:return file_name: The file name for the ISO
"""
LOG.info(_LI("Creating config drive for instance: %s"), instance.name,
LOG.info("Creating config drive for instance: %s", instance.name,
instance=instance)
extra_md = {}
if admin_pass is not None:
@ -122,8 +121,8 @@ class ConfigDrivePowerVM(object):
max_len=pvm_const.MaxLen.VOPT_NAME)
iso_path = os.path.join(im_path, file_name)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
LOG.info(_LI("Config drive ISO being built for instance %(inst)s "
"building to path %(iso_path)s."),
LOG.info("Config drive ISO being built for instance %(inst)s "
"building to path %(iso_path)s.",
{'inst': instance.name, 'iso_path': iso_path},
instance=instance)
# In case, if there's an OSError related failure while
@ -211,8 +210,8 @@ class ConfigDrivePowerVM(object):
# Define the function to build and add the mapping
def add_func(vios_w):
LOG.info(_LI("Adding cfg drive mapping for instance %(inst)s for "
"Virtual I/O Server %(vios)s"),
LOG.info("Adding cfg drive mapping for instance %(inst)s for "
"Virtual I/O Server %(vios)s",
{'inst': instance.name, 'vios': vios_w.name},
instance=instance)
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
@ -341,7 +340,7 @@ class ConfigDrivePowerVM(object):
media_elems = [x.backing_storage for x in media_mappings]
def rm_vopt():
LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
LOG.info("Removing virtual optical for VM with UUID %s.",
lpar_uuid)
vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
parent_type=pvm_vios.VIOS,

View File

@ -23,8 +23,6 @@ from pypowervm import exceptions as pvm_exc
import six
import time
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm.nvram import api
from nova_powervm.virt.powervm import vm
@ -106,8 +104,7 @@ class NvramManager(object):
try:
return self._api.fetch(instance)
except Exception as e:
LOG.exception(_LE('Could not update NVRAM: %s'), e,
instance=instance)
LOG.exception('Could not update NVRAM.', instance=instance)
raise api.NVRAMDownloadException(instance=instance.name,
reason=six.text_type(e))
@ -124,7 +121,7 @@ class NvramManager(object):
self._api.delete(instance)
except Exception as e:
# Delete exceptions should not end the operation
LOG.warning(_LW('Could not delete NVRAM: %s'), e,
LOG.warning('Could not delete NVRAM: %s', e,
instance=instance)
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)

View File

@ -25,8 +25,6 @@ import types
from nova_powervm import conf as cfg
from nova_powervm.conf import powervm
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm.nvram import api
from oslo_concurrency import lockutils
@ -181,8 +179,8 @@ class SwiftNvramStore(api.NvramStore):
# If upload failed during nvram/slot_map update due to
# expired keystone token, retry swift-client operation
# to allow regeneration of token
LOG.warning(_LW('NVRAM upload failed due to invalid '
'token. Retrying upload.'))
LOG.warning('NVRAM upload failed due to invalid '
'token. Retrying upload.')
return True
# The upload failed.
raise api.NVRAMUploadException(instance=inst_name,
@ -218,7 +216,7 @@ class SwiftNvramStore(api.NvramStore):
data = data.encode('ascii')
md5 = hashlib.md5(data).hexdigest()
if existing_hash == md5:
LOG.info(_LI('NVRAM has not changed for instance: %s'),
LOG.info('NVRAM has not changed for instance: %s',
instance.name, instance=instance)
return
@ -287,7 +285,7 @@ class SwiftNvramStore(api.NvramStore):
try:
os.remove(f.name)
except Exception:
LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
LOG.warning('Could not remove temporary file: %s', f.name)
def delete_slot_map(self, inst_key):
"""Delete the Slot Map from Swift.

View File

@ -23,7 +23,6 @@ from pypowervm.tasks import slot_map
from pypowervm.tasks import storage as pvm_tstor
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _LW
LOG = logging.getLogger(__name__)
@ -177,9 +176,9 @@ class SwiftSlotManager(NovaSlotManager):
try:
self.store_api.delete_slot_map(key)
except Exception:
LOG.warning(_LW("Unable to delete the slot map from Swift backing "
"store with ID %(key)s. Will require "
"manual cleanup."), {'key': key},
LOG.warning("Unable to delete the slot map from Swift backing "
"store with ID %(key)s. Will require "
"manual cleanup.", {'key': key},
instance=self.instance)

View File

@ -17,7 +17,6 @@
from oslo_log import log as logging
from taskflow import task
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import image
LOG = logging.getLogger(__name__)
@ -74,9 +73,9 @@ class StreamToGlance(task.Task):
def execute(self, disk_path):
metadata = image.snapshot_metadata(self.context, self.image_api,
self.image_id, self.instance)
LOG.info(_LI("Starting stream of boot device for instance %(inst)s "
"(local blockdev %(devpath)s) to glance image "
"%(img_id)s."),
LOG.info("Starting stream of boot device for instance %(inst)s "
"(local blockdev %(devpath)s) to glance image "
"%(img_id)s.",
{'inst': self.instance.name, 'devpath': disk_path,
'img_id': self.image_id}, instance=self.instance)
image.stream_blockdev_to_glance(self.context, self.image_api,

View File

@ -24,9 +24,6 @@ from pypowervm.wrappers import network as pvm_net
from taskflow import task
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm
@ -63,9 +60,9 @@ class UnplugVifs(task.Task):
# error up front.
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable:
LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
'because the system is not in a correct state. '
'The reason reported by the system is: %(reason)s'),
LOG.error('Unable to remove VIFs from instance %(inst)s '
'because the system is not in a correct state. '
'The reason reported by the system is: %(reason)s',
{'inst': self.instance.name, 'reason': reason},
instance=self.instance)
raise exception.VirtualInterfaceUnplugException(reason=reason)
@ -152,10 +149,10 @@ class PlugVifs(task.Task):
# Check to see if the LPAR is OK to add VIFs to.
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable and self.crt_network_infos:
LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s. The '
'VM was in a state where VIF plugging is not '
'acceptable. The reason from the system is: '
'%(reason)s'),
LOG.error('Unable to create VIF(s) for instance %(sys)s. The '
'VM was in a state where VIF plugging is not '
'acceptable. The reason from the system is: '
'%(reason)s',
{'sys': self.instance.name, 'reason': reason},
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
@ -167,8 +164,8 @@ class PlugVifs(task.Task):
# See: https://bugs.launchpad.net/nova/+bug/1535918
undo_host_change = False
if self.instance.host != CONF.host:
LOG.warning(_LW('Instance was not assigned to this host. '
'It was assigned to: %s'), self.instance.host,
LOG.warning('Instance was not assigned to this host. '
'It was assigned to: %s', self.instance.host,
instance=self.instance)
# Update the instance...
old_host = self.instance.host
@ -180,7 +177,7 @@ class PlugVifs(task.Task):
# not wait for the neutron event as that likely won't be sent (it was
# already done).
for network_info in self.update_network_infos:
LOG.info(_LI("Updating VIF with mac %(mac)s for instance %(sys)s"),
LOG.info("Updating VIF with mac %(mac)s for instance %(sys)s",
{'mac': network_info['address'],
'sys': self.instance.name}, instance=self.instance)
vif.plug(self.adapter, self.host_uuid, self.instance,
@ -193,8 +190,8 @@ class PlugVifs(task.Task):
deadline=CONF.vif_plugging_timeout,
error_callback=self._vif_callback_failed):
for network_info in self.crt_network_infos:
LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
'%(sys)s'),
LOG.info('Creating VIF with mac %(mac)s for instance '
'%(sys)s',
{'mac': network_info['address'],
'sys': self.instance.name},
instance=self.instance)
@ -205,13 +202,13 @@ class PlugVifs(task.Task):
pvm_net.CNA):
self.cnas.append(new_vif)
except eventlet.timeout.Timeout:
LOG.error(_LE('Error waiting for VIF to be created for instance '
'%(sys)s'), {'sys': self.instance.name},
LOG.error('Error waiting for VIF to be created for instance '
'%(sys)s', {'sys': self.instance.name},
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
finally:
if undo_host_change:
LOG.info(_LI('Undoing temporary host assignment to instance.'),
LOG.info('Undoing temporary host assignment to instance.',
instance=self.instance)
self.instance.host = old_host
self.instance.save()
@ -219,8 +216,8 @@ class PlugVifs(task.Task):
return self.cnas
def _vif_callback_failed(self, event_name, instance):
LOG.error(_LE('VIF Plug failure for callback on event '
'%(event)s for instance %(uuid)s'),
LOG.error('VIF Plug failure for callback on event '
'%(event)s for instance %(uuid)s',
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
@ -250,9 +247,8 @@ class PlugVifs(task.Task):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning(_LW('VIF creation being rolled back for instance '
'%(inst)s'), {'inst': self.instance.name},
instance=self.instance)
LOG.warning('VIF creation being rolled back for instance %(inst)s',
{'inst': self.instance.name}, instance=self.instance)
# Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance)
@ -302,14 +298,14 @@ class PlugMgmtVif(task.Task):
self.instance.name)
return None
LOG.info(_LI('Plugging the Management Network Interface to instance '
'%s'), self.instance.name, instance=self.instance)
LOG.info('Plugging the Management Network Interface to instance %s',
self.instance.name, instance=self.instance)
# Determine if we need to create the secure RMC VIF. This should only
# be needed if there is not a VIF on the secure RMC vSwitch
vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
if vswitch is None:
LOG.warning(_LW('No management VIF created for instance %s due to '
'lack of Management Virtual Switch'),
LOG.warning('No management VIF created for instance %s due to '
'lack of Management Virtual Switch',
self.instance.name)
return None

View File

@ -24,8 +24,6 @@ from taskflow.types import failure as task_fail
from nova_powervm.virt.powervm.disk import driver as disk_driver
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import mgmt
@ -51,15 +49,14 @@ class ConnectVolume(task.Task):
super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id)
def execute(self):
LOG.info(_LI('Connecting volume %(vol)s to instance %(inst)s'),
LOG.info('Connecting volume %(vol)s to instance %(inst)s',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
self.vol_drv.connect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
'disconnected'),
LOG.warning('Volume %(vol)s for instance %(inst)s to be disconnected',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
# Note that the rollback is *instant*. Resetting the FeedTask ensures
@ -74,8 +71,8 @@ class ConnectVolume(task.Task):
except npvmex.VolumeDetachFailed as e:
# Only log that the volume detach failed. Should not be blocking
# due to being in the revert flow.
LOG.warning(_LW("Unable to disconnect volume for %(inst)s during "
"rollback. Error was: %(error)s"),
LOG.warning("Unable to disconnect volume for %(inst)s during "
"rollback. Error was: %(error)s",
{'inst': self.vol_drv.instance.name,
'error': e.message})
@ -100,15 +97,14 @@ class DisconnectVolume(task.Task):
'disconnect_vol_%s' % self.vol_id)
def execute(self):
LOG.info(_LI('Disconnecting volume %(vol)s from instance %(inst)s'),
LOG.info('Disconnecting volume %(vol)s from instance %(inst)s',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
self.vol_drv.disconnect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
're-connected'),
LOG.warning('Volume %(vol)s for instance %(inst)s to be re-connected',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
# Note that the rollback is *instant*. Resetting the FeedTask ensures
@ -124,8 +120,8 @@ class DisconnectVolume(task.Task):
except npvmex.VolumeAttachFailed as e:
# Only log that the volume attach failed. Should not be blocking
# due to being in the revert flow. See comment above.
LOG.warning(_LW("Unable to re-connect volume for %(inst)s during "
"rollback. Error was: %(error)s"),
LOG.warning("Unable to re-connect volume for %(inst)s during "
"rollback. Error was: %(error)s",
{'inst': self.vol_drv.instance.name,
'error': e.message})
@ -262,8 +258,8 @@ class InstanceDiskToMgmt(task.Task):
# partition from the same VIOS - it is safe to use the first one.
the_map = new_maps[0]
# Scan the SCSI bus, discover the disk, find its canonical path.
LOG.info(_LI("Discovering device and path for mapping of %(dev_name)s "
"on the management partition."),
LOG.info("Discovering device and path for mapping of %(dev_name)s "
"on the management partition.",
{'dev_name': self.stg_elem.name})
self.disk_path = mgmt.discover_vscsi_disk(the_map)
return self.stg_elem, self.vios_wrap, self.disk_path
@ -277,9 +273,9 @@ class InstanceDiskToMgmt(task.Task):
if self.vios_wrap is None or self.stg_elem is None:
# We never even got connected - nothing to do
return
LOG.warning(_LW("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via "
"Virtual I/O Server %(vios_name)s."),
LOG.warning("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via "
"Virtual I/O Server %(vios_name)s.",
{'disk_name': self.stg_elem.name,
'instance_name': self.instance.name,
'vios_name': self.vios_wrap.name})
@ -289,8 +285,8 @@ class InstanceDiskToMgmt(task.Task):
if self.disk_path is None:
# We did not discover the disk - nothing else to do.
return
LOG.warning(_LW("Removing disk %(disk_path)s from the management "
"partition."), {'disk_path': self.disk_path})
LOG.warning("Removing disk %(disk_path)s from the management "
"partition.", {'disk_path': self.disk_path})
mgmt.remove_block_dev(self.disk_path)
@ -334,15 +330,15 @@ class RemoveInstanceDiskFromMgmt(task.Task):
# stg_elem is None if boot disk was not mapped to management partition
if stg_elem is None:
return
LOG.info(_LI("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via Virtual "
"I/O Server %(vios_name)s."),
LOG.info("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via Virtual "
"I/O Server %(vios_name)s.",
{'disk_name': stg_elem.name,
'instance_name': self.instance.name,
'vios_name': vios_wrap.name})
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
LOG.info(_LI("Removing disk %(disk_path)s from the management "
"partition."), {'disk_path': disk_path})
LOG.info("Removing disk %(disk_path)s from the management "
"partition.", {'disk_path': disk_path})
mgmt.remove_block_dev(disk_path)
@ -402,8 +398,8 @@ class CreateAndConnectCfgDrive(task.Task):
try:
self.mb.dlt_vopt(lpar_wrap.uuid)
except Exception as e:
LOG.warning(_LW('Vopt removal as part of spawn reversion failed '
'with: %(exc)s'), {'exc': six.text_type(e)},
LOG.warning('Vopt removal as part of spawn reversion failed '
'with: %(exc)s', {'exc': six.text_type(e)},
instance=self.instance)
@ -505,8 +501,8 @@ class SaveBDM(task.Task):
super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id)
def execute(self):
LOG.info(_LI('Saving block device mapping for volume id %(vol_id)s '
'on instance %(inst)s.'),
LOG.info('Saving block device mapping for volume id %(vol_id)s '
'on instance %(inst)s.',
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
self.bdm.save()
@ -535,7 +531,7 @@ class FindDisk(task.Task):
def execute(self):
disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
if not disk:
LOG.warning(_LW('Disk not found: %(disk_name)s'),
LOG.warning('Disk not found: %(disk_name)s',
{'disk_name':
self.disk_dvr._get_disk_name(self.disk_type,
self.instance),
@ -562,6 +558,6 @@ class ExtendDisk(task.Task):
super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type'])
def execute(self):
LOG.info(_LI('Extending disk size of disk: %(disk)s size: %(size)s.'),
LOG.info('Extending disk size of disk: %(disk)s size: %(size)s.',
{'disk': self.disk_info['type'], 'size': self.size})
self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)

View File

@ -18,13 +18,9 @@ from oslo_log import log as logging
from pypowervm import const as pvm_const
from pypowervm.tasks import partition as pvm_tpar
from pypowervm.tasks import storage as pvm_stg
import six
from taskflow import task
from taskflow.types import failure as task_fail
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova.compute import task_states
@ -102,7 +98,7 @@ class Create(task.Task):
def execute(self):
data = None
if self.nvram_mgr is not None:
LOG.info(_LI('Fetching NVRAM for instance %s.'),
LOG.info('Fetching NVRAM for instance %s.',
self.instance.name, instance=self.instance)
data = self.nvram_mgr.fetch(self.instance)
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
@ -118,8 +114,8 @@ class Create(task.Task):
# build map earlier in the spawn, just before the LPAR is created.
# Only rebuilds should be passing in None for stg_ftsk.
if self.stg_ftsk.name == 'create_scrubber':
LOG.info(_LI('Scrubbing storage for instance %s as part of '
'rebuild.'), self.instance.name,
LOG.info('Scrubbing storage for instance %s as part of '
'rebuild.', self.instance.name,
instance=self.instance)
self.stg_ftsk.execute()
@ -181,7 +177,7 @@ class Rename(task.Task):
self.vm_name = name
def execute(self):
LOG.info(_LI('Renaming instance to name: %s'), self.name,
LOG.info('Renaming instance to name: %s', self.name,
instance=self.instance)
return vm.rename(self.adapter, self.instance, self.vm_name)
@ -206,7 +202,7 @@ class PowerOn(task.Task):
vm.power_on(self.adapter, self.instance, opts=self.pwr_opts)
def revert(self, result, flow_failures):
LOG.warning(_LW('Powering off instance: %s'), self.instance.name)
LOG.warning('Powering off instance: %s', self.instance.name)
if isinstance(result, task_fail.Failure):
# The power on itself failed...can't power off.
@ -260,12 +256,9 @@ class StoreNvram(task.Task):
try:
self.nvram_mgr.store(self.instance, immediate=self.immediate)
except Exception as e:
LOG.exception(_LE('Unable to store NVRAM for instance '
'%(name)s. Exception: %(reason)s'),
{'name': self.instance.name,
'reason': six.text_type(e)},
instance=self.instance)
except Exception:
LOG.exception('Unable to store NVRAM for instance %(name)s.',
{'name': self.instance.name}, instance=self.instance)
class DeleteNvram(task.Task):
@ -284,19 +277,16 @@ class DeleteNvram(task.Task):
def execute(self):
if self.nvram_mgr is None:
LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
LOG.info("No op for NVRAM delete.", instance=self.instance)
return
LOG.info(_LI('Deleting NVRAM for instance: %s'),
LOG.info('Deleting NVRAM for instance: %s',
self.instance.name, instance=self.instance)
try:
self.nvram_mgr.remove(self.instance)
except Exception as e:
LOG.exception(_LE('Unable to delete NVRAM for instance '
'%(name)s. Exception: %(reason)s'),
{'name': self.instance.name,
'reason': six.text_type(e)},
instance=self.instance)
except Exception:
LOG.exception('Unable to delete NVRAM for instance %(name)s.',
{'name': self.instance.name}, instance=self.instance)
class Delete(task.Task):

View File

@ -40,9 +40,6 @@ from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import network as pvm_net
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
LOG = log.getLogger(__name__)
@ -114,8 +111,8 @@ def _push_vif_event(adapter, action, vif_w, instance, vif_type):
event = pvm_evt.Event.bld(adapter, data, detail)
try:
event = event.create()
LOG.debug(_LI('Pushed custom event for consumption by neutron agent: '
'%s'), str(event), instance=instance)
LOG.debug('Pushed custom event for consumption by neutron agent: %s',
str(event), instance=instance)
except Exception:
with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception('Custom VIF event push failed. %s', str(event),
@ -371,21 +368,21 @@ class PvmVifDriver(object):
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.'),
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None
LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'),
LOG.info('Deleting VIF with mac %(mac)s for instance %(inst)s.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
try:
cna_w.delete()
except Exception as e:
LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
'%(inst)s.'),
LOG.error('Unable to unplug VIF with mac %(mac)s for instance '
'%(inst)s.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.exception("PowerVM error during vif unplug.",
@ -603,9 +600,9 @@ class PvmLBVifDriver(PvmLioVifDriver):
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.'),
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None
@ -619,8 +616,8 @@ class PvmLBVifDriver(PvmLioVifDriver):
utils.execute('brctl', 'delif', vif['network']['bridge'],
dev_name, run_as_root=True)
except Exception as e:
LOG.warning(_LW('Unable to delete device %(dev_name)s from bridge '
'%(bridge)s. Error: %(error)s'),
LOG.warning('Unable to delete device %(dev_name)s from bridge '
'%(bridge)s. Error: %(error)s',
{'dev_name': dev_name,
'bridge': vif['network']['bridge'],
'error': e.message}, instance=self.instance)
@ -700,9 +697,9 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
vnic = vm.get_vnics(
self.adapter, self.instance, mac=mac, one_result=True)
if not vnic:
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. No matching vNIC was found '
'on the instance. VIF: %(vif)s'),
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. No matching vNIC was found '
'on the instance. VIF: %(vif)s',
{'mac': mac, 'inst': self.instance.name, 'vif': vif},
instance=self.instance)
return None
@ -773,9 +770,9 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.'),
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None
@ -825,8 +822,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Save this data for the migration command.
vea_vlan_mappings[vif['address']] = cna_w.pvid
LOG.info(_LI("VIF with mac %(mac)s is going on trunk %(dev)s with "
"PVID %(pvid)s"),
LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with "
"PVID %(pvid)s",
{'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid},
instance=self.instance)
@ -844,9 +841,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
mac address, value is the destination's
target hypervisor VLAN.
"""
LOG.warning(_LW("Rolling back the live migrate of VIF with mac "
"%(mac)s."), {'mac': vif['address']},
instance=self.instance)
LOG.warning("Rolling back the live migrate of VIF with mac %(mac)s.",
{'mac': vif['address']}, instance=self.instance)
# We know that we just attached the VIF to the NovaLink VM. Search
# for a trunk adapter with the PVID and vSwitch that we specified
@ -874,8 +870,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
if trunk:
# Delete the peer'd trunk adapter.
LOG.warning(_LW("Deleting target side trunk adapter %(dev)s for "
"rollback operation"), {'dev': trunk.dev_name},
LOG.warning("Deleting target side trunk adapter %(dev)s for "
"rollback operation", {'dev': trunk.dev_name},
instance=self.instance)
trunk.delete()

View File

@ -21,8 +21,6 @@ from taskflow import task
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from oslo_log import log as logging
@ -87,7 +85,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
# Check if volume is available in destination.
vol_path = self._get_path()
if not os.path.exists(vol_path):
LOG.warning(_LW("File not found at path %s"), vol_path,
LOG.warning("File not found at path %s", vol_path,
instance=self.instance)
raise p_exc.VolumePreMigrationFailed(
volume_id=self.volume_id, instance_name=self.instance.name)
@ -104,8 +102,8 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids:
return None
LOG.info(_LI("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s."),
LOG.info("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s.",
{'vm': self.instance.name, 'vios': vios_w.name},
instance=self.instance)
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
@ -148,9 +146,8 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids:
return None
LOG.info(_LI("Disconnecting instance %(inst)s from storage "
"disks."), {'inst': self.instance.name},
instance=self.instance)
LOG.info("Disconnecting instance %(inst)s from storage disks.",
{'inst': self.instance.name}, instance=self.instance)
removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid,
match_func=match_func)
for rm_map in removed_maps:

View File

@ -18,8 +18,6 @@ from oslo_log import log as logging
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from nova_powervm.virt.powervm.volume import volume as volume
@ -151,28 +149,28 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
# If we have no device name, at this point
# we should not continue. Subsequent scrub code on future
# deploys will clean this up.
LOG.warning(_LW(
LOG.warning(
"Disconnect Volume: The backing hdisk for volume "
"%(volume_id)s on Virtual I/O Server %(vios)s is "
"not in a valid state. No disconnect "
"actions to be taken as volume is not healthy."),
"actions to be taken as volume is not healthy.",
{'volume_id': self.volume_id, 'vios': vios_w.name},
instance=self.instance)
return False
except Exception as e:
LOG.warning(_LW(
LOG.warning(
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s."
" Error: %(error)s"),
" Error: %(error)s",
{'error': e, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance)
return False
# We have found the device name
LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s."),
LOG.info("Disconnect Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s.",
{'volume_id': self.volume_id,
'vios_name': vios_w.name, 'hdisk': device_name},
instance=self.instance)
@ -204,10 +202,10 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
# Warn if no hdisks disconnected.
if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]):
LOG.warning(_LW(
LOG.warning(
"Disconnect Volume: Failed to disconnect the volume "
"%(volume_id)s on ANY of the Virtual I/O Servers for "
"instance %(inst)s."),
"instance %(inst)s.",
{'inst': self.instance.name, 'volume_id': self.volume_id},
instance=self.instance)

View File

@ -29,9 +29,6 @@ from nova_powervm import conf as cfg
from nova_powervm.conf import powervm as pvm_cfg
from nova_powervm.virt.powervm import exception as exc
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm.volume import driver as v_driver
LOG = logging.getLogger(__name__)
@ -302,9 +299,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return False
# At this point, it should be correct.
LOG.info(_LI("Instance %(inst)s has not yet defined a WWPN on "
"fabric %(fabric)s. Appropriate WWPNs will be "
"generated."),
LOG.info("Instance %(inst)s has not yet defined a WWPN on "
"fabric %(fabric)s. Appropriate WWPNs will be "
"generated.",
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
return True
@ -402,8 +399,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# were already configured.
for fabric in self._fabric_names():
fc_state = self._get_fabric_state(fabric)
LOG.info(_LI(
"NPIV wwpns fabric state=%(st)s for instance %(inst)s"),
LOG.info(
"NPIV wwpns fabric state=%(st)s for instance %(inst)s",
{'st': fc_state, 'inst': self.instance.name},
instance=self.instance)
@ -487,9 +484,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
self._set_fabric_meta(fabric, npiv_port_maps)
LOG.warning(_LW("Had to update the system metadata for the WWPNs "
"due to incorrect physical WWPNs on fabric "
"%(fabric)s"),
LOG.warning("Had to update the system metadata for the WWPNs due to "
"incorrect physical WWPNs on fabric %(fabric)s",
{'fabric': fabric}, instance=self.instance)
return npiv_port_maps
@ -514,15 +510,15 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
for npiv_port_map in npiv_port_maps:
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
if vios_w is None:
LOG.error(_LE("Mappings were not able to find a proper VIOS. "
"The port mappings were %s."), npiv_port_maps,
LOG.error("Mappings were not able to find a proper VIOS. "
"The port mappings were %s.", npiv_port_maps,
instance=self.instance)
raise exc.VolumeAttachFailed(
volume_id=volume_id, instance_name=self.instance.name,
reason=_("Unable to find a Virtual I/O Server that "
"hosts the NPIV port map for the server."))
ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
"for Virtual I/O Server %(vios)s."),
ls = [LOG.info, "Adding NPIV mapping for instance %(inst)s "
"for Virtual I/O Server %(vios)s.",
{'inst': self.instance.name, 'vios': vios_w.name}]
# Add the subtask to add the specific map.
@ -575,8 +571,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
vios_wraps = self.stg_ftsk.feed
for npiv_port_map in npiv_port_maps:
ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
"%(inst)s for fabric %(fabric)s."),
ls = [LOG.info, "Removing a NPIV mapping for instance "
"%(inst)s for fabric %(fabric)s.",
{'inst': self.instance.name, 'fabric': fabric}]
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
@ -587,9 +583,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
pvm_vfcm.remove_maps, self.vm_uuid,
port_map=npiv_port_map, logspec=ls)
else:
LOG.warning(_LW("No storage connections found between the "
"Virtual I/O Servers and FC Fabric "
"%(fabric)s."), {'fabric': fabric},
LOG.warning("No storage connections found between the "
"Virtual I/O Servers and FC Fabric "
"%(fabric)s.", {'fabric': fabric},
instance=self.instance)
def _set_fabric_state(self, fabric, state):
@ -603,7 +599,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
FS_INST_MAPPED: Fabric is mapped with the nova instance.
"""
meta_key = self._sys_fabric_state_key(fabric)
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s"),
LOG.info("Setting Fabric state=%(st)s for instance=%(inst)s",
{'st': state, 'inst': self.instance.name},
instance=self.instance)
self.instance.system_metadata[meta_key] = state
@ -655,8 +651,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
meta_elems.append(p_wwpn)
meta_elems.extend(v_wwpn.split())
LOG.info(_LI("Fabric %(fabric)s wwpn metadata will be set to "
"%(meta)s for instance %(inst)s"),
LOG.info("Fabric %(fabric)s wwpn metadata will be set to "
"%(meta)s for instance %(inst)s",
{'fabric': fabric, 'meta': ",".join(meta_elems),
'inst': self.instance.name},
instance=self.instance)
@ -700,10 +696,10 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
if self.instance.system_metadata.get(meta_key) is None:
# If no mappings exist, log a warning.
LOG.warning(_LW("No NPIV mappings exist for instance %(inst)s on "
"fabric %(fabric)s. May not have connected to "
"the fabric yet or fabric configuration was "
"recently modified."),
LOG.warning("No NPIV mappings exist for instance %(inst)s on "
"fabric %(fabric)s. May not have connected to "
"the fabric yet or fabric configuration was "
"recently modified.",
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
return []

View File

@ -19,8 +19,6 @@ from taskflow import task
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from pypowervm import const as pvm_const
@ -124,7 +122,6 @@ class VscsiVolumeAdapter(object):
'found on %(vios_act)d Virtual I/O Servers.') %
{'volume_id': self.volume_id, 'vios_act': num_vioses_found,
'vios_req': CONF.powervm.vscsi_vios_connections_required})
LOG.error(msg)
ex_args = {'volume_id': self.volume_id, 'reason': msg,
'instance_name': self.instance.name}
raise p_exc.VolumeAttachFailed(**ex_args)
@ -145,9 +142,9 @@ class VscsiVolumeAdapter(object):
for a particular bus, or none of them.
"""
def add_func(vios_w):
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s"), {'dev': device_name,
'vm': self.vm_uuid})
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s", {'dev': device_name,
'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name, udid)
v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv,
@ -165,8 +162,8 @@ class VscsiVolumeAdapter(object):
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
'volume id %s'), self.volume_id)
LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id)
return None
def _set_udid(self, udid):
@ -186,8 +183,8 @@ class VscsiVolumeAdapter(object):
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
'volume id %s'), self.volume_id)
LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id)
return None
def _set_devname(self, devname):
@ -208,8 +205,8 @@ class VscsiVolumeAdapter(object):
used when a volume is detached from the VM.
"""
def rm_func(vios_w):
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s"), {'dev': device_name, 'vm': vm_uuid})
LOG.info("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s", {'dev': device_name, 'vm': vm_uuid})
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -232,15 +229,15 @@ class VscsiVolumeAdapter(object):
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
"""
def rm_hdisk():
LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
LOG.info("Running remove for hdisk: '%s'", device_name)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid)
except Exception as e:
# If there is a failure, log it, but don't stop the process
LOG.warning(_LW("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server."),
LOG.warning("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server.",
{'disk': device_name})
LOG.warning(e)
@ -250,8 +247,8 @@ class VscsiVolumeAdapter(object):
stg_ftsk = stg_ftsk or self.stg_ftsk
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
else:
LOG.info(_LI("hdisk %(disk)s is not removed because it has "
"existing storage mappings"), {'disk': device_name})
LOG.info("hdisk %(disk)s is not removed because it has "
"existing storage mappings", {'disk': device_name})
def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings
@ -268,7 +265,7 @@ class VscsiVolumeAdapter(object):
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info(_LI("%(num)d Storage Mappings found for %(dev)s"),
LOG.info("%(num)d Storage Mappings found for %(dev)s",
{'num': len(mappings), 'dev': device_name})
# the mapping is still present as the task feed removes
# the mapping later
@ -279,10 +276,10 @@ class VscsiVolumeAdapter(object):
if not udid and not devname:
LOG.warning(
_LW('Could not remove hdisk for volume: %s'), self.volume_id)
'Could not remove hdisk for volume: %s', self.volume_id)
return
LOG.info(_LI('Removing hdisk for udid: %s'), udid)
LOG.info('Removing hdisk for udid: %s', udid)
def find_hdisk_to_remove(vios_w):
if devname is None:
@ -291,7 +288,7 @@ class VscsiVolumeAdapter(object):
device_name = devname
if device_name is None:
return
LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
LOG.info('Removing %(hdisk)s from VIOS %(vios)s',
{'hdisk': device_name, 'vios': vios_w.name})
self._add_remove_hdisk(vios_w, device_name,
stg_ftsk=rmv_hdisk_ftsk)

View File

@ -19,8 +19,6 @@ from oslo_log import log as logging
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from nova_powervm.virt.powervm.volume import volume as volume
@ -187,14 +185,14 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
vios_w.uuid, itls)
if hdisk.good_discovery(status, device_name):
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
'volume %(volume_id)s. Status code: %(status)s.'),
LOG.info('Discovered %(hdisk)s on vios %(vios)s for '
'volume %(volume_id)s. Status code: %(status)s.',
{'hdisk': device_name, 'vios': vios_w.name,
'volume_id': volume_id, 'status': str(status)},
instance=self.instance)
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s '
'on %(vios)s is in use. Error code: %(status)s.'),
LOG.warning('Discovered device %(dev)s for volume %(volume)s '
'on %(vios)s is in use. Error code: %(status)s.',
{'dev': device_name, 'volume': volume_id,
'vios': vios_w.name, 'status': str(status)},
instance=self.instance)
@ -274,28 +272,28 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# in the I/O Server. Subsequent scrub code on future
# deploys will clean this up.
if not hdisk.good_discovery(status, device_name):
LOG.warning(_LW(
LOG.warning(
"Disconnect Volume: The backing hdisk for volume "
"%(volume_id)s on Virtual I/O Server %(vios)s is "
"not in a valid state. This may be the result of "
"an evacuate."),
"an evacuate.",
{'volume_id': self.volume_id, 'vios': vios_w.name},
instance=self.instance)
return False
except Exception as e:
LOG.warning(_LW(
LOG.warning(
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s. Volume "
"UDID: %(volume_uid)s. Error: %(error)s"),
"UDID: %(volume_uid)s. Error: %(error)s",
{'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance)
return False
# We have found the device name
LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s. Volume UDID: %(volume_uid)s."),
LOG.info("Disconnect Volume: Discovered the device %(hdisk)s "
"on Virtual I/O Server %(vios_name)s for volume "
"%(volume_id)s. Volume UDID: %(volume_uid)s.",
{'volume_uid': udid, 'volume_id': self.volume_id,
'vios_name': vios_w.name, 'hdisk': device_name},
instance=self.instance)
@ -326,9 +324,9 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Warn if no hdisks disconnected.
if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]):
LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
"volume %(volume_id)s on ANY of the Virtual "
"I/O Servers for instance %(inst)s."),
LOG.warning("Disconnect Volume: Failed to disconnect the "
"volume %(volume_id)s on ANY of the Virtual "
"I/O Servers for instance %(inst)s.",
{'inst': self.instance.name,
'volume_id': self.volume_id},
instance=self.instance)