Fix code for new pep8 rules

This change updates the code to comply with a new pep8 rule to defer the
string concatenation to the logger instead of doing it inline.

It also adds an ignore rule for conf options being outside of nova.conf.
Given that we are an out-of-tree driver, we can not put our conf options
in tree.  This is a temporary work around until the powervm driver is in
tree.

Change-Id: I8fe77e8250a2b59ae842cdacfa48baf66356ee50
Closes-Bug: #1632441
This commit is contained in:
Drew Thorstensen 2016-10-11 15:36:47 -04:00
parent 772ccf9d61
commit 6c6c355705
10 changed files with 41 additions and 41 deletions

View File

@ -131,8 +131,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Make sure the remove function will run within the transaction manager # Make sure the remove function will run within the transaction manager
def rm_func(vios_w): def rm_func(vios_w):
LOG.info(_LI("Disconnecting instance %(inst)s from storage disks.") LOG.info(_LI("Disconnecting instance %(inst)s from storage "
% {'inst': instance.name}) "disks."), {'inst': instance.name})
return tsk_map.remove_maps(vios_w, lpar_uuid, return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func) match_func=match_func)

View File

@ -1865,9 +1865,9 @@ class PowerVMDriver(driver.ComputeDriver):
adp_type = VOLUME_DRIVER_MAPPINGS[CONF.powervm.volume_adapter] adp_type = VOLUME_DRIVER_MAPPINGS[CONF.powervm.volume_adapter]
vol_cls = importutils.import_class(adp_type) vol_cls = importutils.import_class(adp_type)
if conn_info: if conn_info:
LOG.debug('Volume Adapter returned for connection_info=%s' % LOG.debug('Volume Adapter returned for connection_info=%s',
conn_info) conn_info)
LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s' % LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s',
{'cls': vol_cls.__name__, 'inst': instance.name}) {'cls': vol_cls.__name__, 'inst': instance.name})
return vol_cls(self.adapter, self.host_uuid, return vol_cls(self.adapter, self.host_uuid,
instance, conn_info, stg_ftsk=stg_ftsk) instance, conn_info, stg_ftsk=stg_ftsk)
@ -1936,13 +1936,12 @@ class NovaEventHandler(pvm_apt.RawEventHandler):
(self.inst_actions_handled & set(details))): (self.inst_actions_handled & set(details))):
if not inst: if not inst:
LOG.debug('PowerVM Nova Event Handler: Getting inst ' LOG.debug('PowerVM Nova Event Handler: Getting inst '
'for id %s' % pvm_uuid) 'for id %s', pvm_uuid)
inst = vm.get_instance(ctx.get_admin_context(), inst = vm.get_instance(ctx.get_admin_context(),
pvm_uuid) pvm_uuid)
if inst: if inst:
LOG.debug('Handle action "%(action)s" event for instance: ' LOG.debug('Handle action "%(action)s" event for instance: '
'%(inst)s' % '%(inst)s', dict(action=details, inst=inst.name))
dict(action=details, inst=inst.name))
self._handle_inst_event( self._handle_inst_event(
inst, pvm_uuid, uri, etype, details, eid) inst, pvm_uuid, uri, etype, details, eid)
return inst return inst
@ -2032,8 +2031,8 @@ class NovaEventHandler(pvm_apt.RawEventHandler):
eid = pvm_event['EventID'] eid = pvm_event['EventID']
if etype not in ['NEW_CLIENT']: if etype not in ['NEW_CLIENT']:
LOG.debug('PowerVM Event-Action: %s URI: %s Details %s' % LOG.debug('PowerVM Event-Action: %s URI: %s Details %s',
(etype, uri, details)) etype, uri, details)
inst_cache[uri] = self._handle_event(uri, etype, details, eid, inst_cache[uri] = self._handle_event(uri, etype, details, eid,
inst=inst_cache.get(uri, inst=inst_cache.get(uri,
None)) None))

View File

@ -123,9 +123,9 @@ class LiveMigrationDest(LiveMigration):
self.mig_data.dest_proc_compat = ( self.mig_data.dest_proc_compat = (
','.join(self.drvr.host_wrapper.proc_compat_modes)) ','.join(self.drvr.host_wrapper.proc_compat_modes))
LOG.debug('src_compute_info: %s' % src_compute_info) LOG.debug('src_compute_info: %s', src_compute_info)
LOG.debug('dst_compute_info: %s' % dst_compute_info) LOG.debug('dst_compute_info: %s', dst_compute_info)
LOG.debug('Migration data: %s' % self.mig_data) LOG.debug('Migration data: %s', self.mig_data)
return self.mig_data return self.mig_data
@ -143,7 +143,7 @@ class LiveMigrationDest(LiveMigration):
""" """
LOG.debug('Running pre live migration on destination.', LOG.debug('Running pre live migration on destination.',
instance=self.instance) instance=self.instance)
LOG.debug('Migration data: %s' % migrate_data) LOG.debug('Migration data: %s', migrate_data)
# Set the ssh auth key. # Set the ssh auth key.
mgmt_task.add_authorized_key(self.drvr.adapter, mgmt_task.add_authorized_key(self.drvr.adapter,
@ -241,7 +241,7 @@ class LiveMigrationSrc(LiveMigration):
lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance) lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
self.lpar_w = lpar_w self.lpar_w = lpar_w
LOG.debug('Dest Migration data: %s' % self.mig_data) LOG.debug('Dest Migration data: %s', self.mig_data)
# Check proc compatibility modes # Check proc compatibility modes
if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
@ -278,7 +278,7 @@ class LiveMigrationSrc(LiveMigration):
vol_drv.pre_live_migration_on_source(vol_data) vol_drv.pre_live_migration_on_source(vol_data)
self.mig_data.vol_data = vol_data self.mig_data.vol_data = vol_data
LOG.debug('Src Migration data: %s' % self.mig_data) LOG.debug('Src Migration data: %s', self.mig_data)
# Create a FeedTask to scrub any orphaned mappings/storage associated # Create a FeedTask to scrub any orphaned mappings/storage associated
# with this LPAR. (Don't run it yet - we want to do the VOpt removal # with this LPAR. (Don't run it yet - we want to do the VOpt removal
@ -304,7 +304,7 @@ class LiveMigrationSrc(LiveMigration):
:param migrate_data: a PowerVMLiveMigrateData object :param migrate_data: a PowerVMLiveMigrateData object
""" """
LOG.debug("Starting migration.", instance=self.instance) LOG.debug("Starting migration.", instance=self.instance)
LOG.debug("Migrate data: %s" % migrate_data) LOG.debug("Migrate data: %s", migrate_data)
# The passed in mig data has more info (dest data added), so replace # The passed in mig data has more info (dest data added), so replace
self.mig_data = migrate_data self.mig_data = migrate_data

View File

@ -86,7 +86,7 @@ class SwiftNvramStore(api.NvramStore):
LOG.debug(str(r)) LOG.debug(str(r))
result = results result = results
else: else:
LOG.debug('SwiftOperation result: %s' % str(result)) LOG.debug('SwiftOperation result: %s', str(result))
return result return result
except swft_srv.SwiftError as e: except swft_srv.SwiftError as e:
LOG.exception(e) LOG.exception(e)
@ -218,7 +218,7 @@ class SwiftNvramStore(api.NvramStore):
return return
self._store(instance.uuid, instance.name, data, exists=exists) self._store(instance.uuid, instance.name, data, exists=exists)
LOG.debug('NVRAM updated for instance: %s' % instance.name) LOG.debug('NVRAM updated for instance: %s', instance.name)
def store_slot_map(self, inst_key, data): def store_slot_map(self, inst_key, data):
"""Store the Slot Map to Swift. """Store the Slot Map to Swift.
@ -292,7 +292,7 @@ class SwiftNvramStore(api.NvramStore):
for result in self._run_operation('delete', container=self.container, for result in self._run_operation('delete', container=self.container,
objects=[inst_key]): objects=[inst_key]):
LOG.debug('Delete slot map result: %s' % str(result)) LOG.debug('Delete slot map result: %s', str(result))
if not result['success']: if not result['success']:
raise api.NVRAMDeleteException(reason=result, raise api.NVRAMDeleteException(reason=result,
instance=inst_key) instance=inst_key)
@ -305,7 +305,7 @@ class SwiftNvramStore(api.NvramStore):
for result in self._run_operation('delete', container=self.container, for result in self._run_operation('delete', container=self.container,
objects=[instance.uuid]): objects=[instance.uuid]):
LOG.debug('Delete result: %s' % str(result), instance=instance) LOG.debug('Delete result: %s', str(result), instance=instance)
if not result['success']: if not result['success']:
raise api.NVRAMDeleteException(instance=instance.name, raise api.NVRAMDeleteException(instance=instance.name,
reason=result) reason=result)

View File

@ -314,8 +314,8 @@ class PvmVifDriver(object):
LOG.exception(e) LOG.exception(e)
raise exception.VirtualInterfaceUnplugException( raise exception.VirtualInterfaceUnplugException(
_LE('Unable to unplug VIF with mac %(mac)s for instance ' _LE('Unable to unplug VIF with mac %(mac)s for instance '
'%(inst)s.') % {'mac': vif['address'], '%(inst)s.'), {'mac': vif['address'],
'inst': self.instance.name}) 'inst': self.instance.name})
return cna_w return cna_w
def _find_cna_for_vif(self, cna_w_list, vif): def _find_cna_for_vif(self, cna_w_list, vif):
@ -382,7 +382,7 @@ class PvmSeaVifDriver(PvmVifDriver):
if not vlan: if not vlan:
vlan = int(vif['details']['vlan']) vlan = int(vif['details']['vlan'])
LOG.debug("Creating SEA based VIF with VLAN %s" % str(vlan)) LOG.debug("Creating SEA based VIF with VLAN %s", str(vlan))
cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan, cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan,
mac_addr=vif['address'], slot_num=slot_num) mac_addr=vif['address'], slot_num=slot_num)

View File

@ -125,7 +125,7 @@ def translate_event(pvm_state, pwr_state):
elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING: elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING:
trans = event.EVENT_LIFECYCLE_RESUMED trans = event.EVENT_LIFECYCLE_RESUMED
LOG.debug('Transistion to %s' % trans) LOG.debug('Translated Event to %s', trans)
return trans return trans

View File

@ -238,7 +238,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# manager. Given that, we need to update the order of the WWPNs. # manager. Given that, we need to update the order of the WWPNs.
# The first WWPN is the one that is logged into the fabric and this # The first WWPN is the one that is logged into the fabric and this
# will now indicate that our WWPN is logged in. # will now indicate that our WWPN is logged in.
LOG.debug('Post live migrate volume store: %s' % mig_vol_stor, LOG.debug('Post live migrate volume store: %s', mig_vol_stor,
instance=self.instance) instance=self.instance)
for fabric in self._fabric_names(): for fabric in self._fabric_names():
# We check the mig_vol_stor to see if this fabric has already been # We check the mig_vol_stor to see if this fabric has already been
@ -254,8 +254,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Flip the WPWNs # Flip the WPWNs
c_wwpns = port_map[1].split() c_wwpns = port_map[1].split()
c_wwpns.reverse() c_wwpns.reverse()
LOG.debug('Flipping WWPNs, ports: %s wwpns: %s' % LOG.debug('Flipping WWPNs, ports: %s wwpns: %s',
(port_map, c_wwpns), instance=self.instance) port_map, c_wwpns, instance=self.instance)
# Get the new physical WWPN. # Get the new physical WWPN.
vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(vios_wraps, vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(vios_wraps,
c_wwpns)[1] c_wwpns)[1]
@ -402,7 +402,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
for fabric in self._fabric_names(): for fabric in self._fabric_names():
fc_state = self._get_fabric_state(fabric) fc_state = self._get_fabric_state(fabric)
LOG.info(_LI("NPIV wwpns fabric state=%(st)s for " LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
"instance %(inst)s") % "instance %(inst)s"),
{'st': fc_state, 'inst': self.instance.name}) {'st': fc_state, 'inst': self.instance.name})
if self._is_initial_wwpn(fc_state, fabric): if self._is_initial_wwpn(fc_state, fabric):
@ -473,7 +473,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return npiv_port_maps return npiv_port_maps
# If ANY of the VIOS ports were not there, rebuild the port maps # If ANY of the VIOS ports were not there, rebuild the port maps
LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state." % LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
npiv_port_maps) npiv_port_maps)
v_wwpns = [] v_wwpns = []
for port_map in npiv_port_maps: for port_map in npiv_port_maps:
@ -483,7 +483,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Derive new maps and don't preserve existing maps # Derive new maps and don't preserve existing maps
npiv_port_maps = pvm_vfcm.derive_npiv_map( npiv_port_maps = pvm_vfcm.derive_npiv_map(
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False) vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
LOG.debug("Rebuilt port maps: %s" % npiv_port_maps) LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
self._set_fabric_meta(fabric, npiv_port_maps) self._set_fabric_meta(fabric, npiv_port_maps)
LOG.warning(_LW("Had to update the system metadata for the WWPNs " LOG.warning(_LW("Had to update the system metadata for the WWPNs "
"due to incorrect physical WWPNs on fabric " "due to incorrect physical WWPNs on fabric "
@ -607,7 +607,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
FS_INST_MAPPED: Fabric is mapped with the nova instance. FS_INST_MAPPED: Fabric is mapped with the nova instance.
""" """
meta_key = self._sys_fabric_state_key(fabric) meta_key = self._sys_fabric_state_key(fabric)
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s") % LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s"),
{'st': state, 'inst': self.instance.name}) {'st': state, 'inst': self.instance.name})
self.instance.system_metadata[meta_key] = state self.instance.system_metadata[meta_key] = state

View File

@ -146,8 +146,8 @@ class VscsiVolumeAdapter(object):
""" """
def add_func(vios_w): def add_func(vios_w):
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s " LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name, "to VM %(vm)s"), {'dev': device_name,
'vm': self.vm_uuid}) 'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name, target_name) pv = pvm_stor.PV.bld(self.adapter, device_name, target_name)
v_map = tsk_map.build_vscsi_mapping( v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv, self.host_uuid, vios_w, self.vm_uuid, pv,
@ -209,7 +209,7 @@ class VscsiVolumeAdapter(object):
""" """
def rm_func(vios_w): def rm_func(vios_w):
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s " LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name, 'vm': vm_uuid}) "to VM %(vm)s"), {'dev': device_name, 'vm': vm_uuid})
removed_maps = tsk_map.remove_maps( removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid, vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name])) tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -232,7 +232,7 @@ class VscsiVolumeAdapter(object):
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
""" """
def rm_hdisk(): def rm_hdisk():
LOG.info(_LI("Running remove for hdisk: '%s'") % device_name) LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
try: try:
# Attempt to remove the hDisk # Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name, hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
@ -278,11 +278,11 @@ class VscsiVolumeAdapter(object):
"""Cleanup the hdisk associated with this udid.""" """Cleanup the hdisk associated with this udid."""
if not udid and not devname: if not udid and not devname:
LOG.warning(_LW('Could not remove hdisk for volume: %s') LOG.warning(
% self.volume_id) _LW('Could not remove hdisk for volume: %s'), self.volume_id)
return return
LOG.info(_LI('Removing hdisk for udid: %s') % udid) LOG.info(_LI('Removing hdisk for udid: %s'), udid)
def find_hdisk_to_remove(vios_w): def find_hdisk_to_remove(vios_w):
if devname is None: if devname is None:

View File

@ -181,8 +181,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Server (if any). # Server (if any).
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
if len(itls) == 0: if len(itls) == 0:
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.' LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
% {'vios': vios_w.name, 'volume_id': volume_id}) {'vios': vios_w.name, 'volume_id': volume_id})
return None, None, None return None, None, None
status, device_name, udid = hdisk.discover_hdisk(self.adapter, status, device_name, udid = hdisk.discover_hdisk(self.adapter,

View File

@ -33,7 +33,8 @@ commands =
whitelist_externals = bash whitelist_externals = bash
[flake8] [flake8]
ignore = E125 # N342 - Config Opts need to be outside nova/conf until powervm is part of nova proper
ignore = E125,N342
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
[hacking] [hacking]