Fix code for new pep8 rules

This change updates the code to comply with a new pep8 rule to defer the
string concatenation to the logger instead of doing it inline.

It also adds an ignore rule for conf options being outside of nova.conf.
Given that we are an out-of-tree driver, we can not put our conf options
in tree.  This is a temporary work around until the powervm driver is in
tree.

Change-Id: I8fe77e8250a2b59ae842cdacfa48baf66356ee50
Closes-Bug: #1632441
This commit is contained in:
Drew Thorstensen 2016-10-11 15:36:47 -04:00
parent 772ccf9d61
commit 6c6c355705
10 changed files with 41 additions and 41 deletions

View File

@ -131,8 +131,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Make sure the remove function will run within the transaction manager
def rm_func(vios_w):
LOG.info(_LI("Disconnecting instance %(inst)s from storage disks.")
% {'inst': instance.name})
LOG.info(_LI("Disconnecting instance %(inst)s from storage "
"disks."), {'inst': instance.name})
return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func)

View File

@ -1865,9 +1865,9 @@ class PowerVMDriver(driver.ComputeDriver):
adp_type = VOLUME_DRIVER_MAPPINGS[CONF.powervm.volume_adapter]
vol_cls = importutils.import_class(adp_type)
if conn_info:
LOG.debug('Volume Adapter returned for connection_info=%s' %
LOG.debug('Volume Adapter returned for connection_info=%s',
conn_info)
LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s' %
LOG.debug('Volume Adapter class %(cls)s for instance %(inst)s',
{'cls': vol_cls.__name__, 'inst': instance.name})
return vol_cls(self.adapter, self.host_uuid,
instance, conn_info, stg_ftsk=stg_ftsk)
@ -1936,13 +1936,12 @@ class NovaEventHandler(pvm_apt.RawEventHandler):
(self.inst_actions_handled & set(details))):
if not inst:
LOG.debug('PowerVM Nova Event Handler: Getting inst '
'for id %s' % pvm_uuid)
'for id %s', pvm_uuid)
inst = vm.get_instance(ctx.get_admin_context(),
pvm_uuid)
if inst:
LOG.debug('Handle action "%(action)s" event for instance: '
'%(inst)s' %
dict(action=details, inst=inst.name))
'%(inst)s', dict(action=details, inst=inst.name))
self._handle_inst_event(
inst, pvm_uuid, uri, etype, details, eid)
return inst
@ -2032,8 +2031,8 @@ class NovaEventHandler(pvm_apt.RawEventHandler):
eid = pvm_event['EventID']
if etype not in ['NEW_CLIENT']:
LOG.debug('PowerVM Event-Action: %s URI: %s Details %s' %
(etype, uri, details))
LOG.debug('PowerVM Event-Action: %s URI: %s Details %s',
etype, uri, details)
inst_cache[uri] = self._handle_event(uri, etype, details, eid,
inst=inst_cache.get(uri,
None))

View File

@ -123,9 +123,9 @@ class LiveMigrationDest(LiveMigration):
self.mig_data.dest_proc_compat = (
','.join(self.drvr.host_wrapper.proc_compat_modes))
LOG.debug('src_compute_info: %s' % src_compute_info)
LOG.debug('dst_compute_info: %s' % dst_compute_info)
LOG.debug('Migration data: %s' % self.mig_data)
LOG.debug('src_compute_info: %s', src_compute_info)
LOG.debug('dst_compute_info: %s', dst_compute_info)
LOG.debug('Migration data: %s', self.mig_data)
return self.mig_data
@ -143,7 +143,7 @@ class LiveMigrationDest(LiveMigration):
"""
LOG.debug('Running pre live migration on destination.',
instance=self.instance)
LOG.debug('Migration data: %s' % migrate_data)
LOG.debug('Migration data: %s', migrate_data)
# Set the ssh auth key.
mgmt_task.add_authorized_key(self.drvr.adapter,
@ -241,7 +241,7 @@ class LiveMigrationSrc(LiveMigration):
lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
self.lpar_w = lpar_w
LOG.debug('Dest Migration data: %s' % self.mig_data)
LOG.debug('Dest Migration data: %s', self.mig_data)
# Check proc compatibility modes
if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
@ -278,7 +278,7 @@ class LiveMigrationSrc(LiveMigration):
vol_drv.pre_live_migration_on_source(vol_data)
self.mig_data.vol_data = vol_data
LOG.debug('Src Migration data: %s' % self.mig_data)
LOG.debug('Src Migration data: %s', self.mig_data)
# Create a FeedTask to scrub any orphaned mappings/storage associated
# with this LPAR. (Don't run it yet - we want to do the VOpt removal
@ -304,7 +304,7 @@ class LiveMigrationSrc(LiveMigration):
:param migrate_data: a PowerVMLiveMigrateData object
"""
LOG.debug("Starting migration.", instance=self.instance)
LOG.debug("Migrate data: %s" % migrate_data)
LOG.debug("Migrate data: %s", migrate_data)
# The passed in mig data has more info (dest data added), so replace
self.mig_data = migrate_data

View File

@ -86,7 +86,7 @@ class SwiftNvramStore(api.NvramStore):
LOG.debug(str(r))
result = results
else:
LOG.debug('SwiftOperation result: %s' % str(result))
LOG.debug('SwiftOperation result: %s', str(result))
return result
except swft_srv.SwiftError as e:
LOG.exception(e)
@ -218,7 +218,7 @@ class SwiftNvramStore(api.NvramStore):
return
self._store(instance.uuid, instance.name, data, exists=exists)
LOG.debug('NVRAM updated for instance: %s' % instance.name)
LOG.debug('NVRAM updated for instance: %s', instance.name)
def store_slot_map(self, inst_key, data):
"""Store the Slot Map to Swift.
@ -292,7 +292,7 @@ class SwiftNvramStore(api.NvramStore):
for result in self._run_operation('delete', container=self.container,
objects=[inst_key]):
LOG.debug('Delete slot map result: %s' % str(result))
LOG.debug('Delete slot map result: %s', str(result))
if not result['success']:
raise api.NVRAMDeleteException(reason=result,
instance=inst_key)
@ -305,7 +305,7 @@ class SwiftNvramStore(api.NvramStore):
for result in self._run_operation('delete', container=self.container,
objects=[instance.uuid]):
LOG.debug('Delete result: %s' % str(result), instance=instance)
LOG.debug('Delete result: %s', str(result), instance=instance)
if not result['success']:
raise api.NVRAMDeleteException(instance=instance.name,
reason=result)

View File

@ -314,8 +314,8 @@ class PvmVifDriver(object):
LOG.exception(e)
raise exception.VirtualInterfaceUnplugException(
_LE('Unable to unplug VIF with mac %(mac)s for instance '
'%(inst)s.') % {'mac': vif['address'],
'inst': self.instance.name})
'%(inst)s.'), {'mac': vif['address'],
'inst': self.instance.name})
return cna_w
def _find_cna_for_vif(self, cna_w_list, vif):
@ -382,7 +382,7 @@ class PvmSeaVifDriver(PvmVifDriver):
if not vlan:
vlan = int(vif['details']['vlan'])
LOG.debug("Creating SEA based VIF with VLAN %s" % str(vlan))
LOG.debug("Creating SEA based VIF with VLAN %s", str(vlan))
cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan,
mac_addr=vif['address'], slot_num=slot_num)

View File

@ -125,7 +125,7 @@ def translate_event(pvm_state, pwr_state):
elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING:
trans = event.EVENT_LIFECYCLE_RESUMED
LOG.debug('Transistion to %s' % trans)
LOG.debug('Translated Event to %s', trans)
return trans

View File

@ -238,7 +238,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# manager. Given that, we need to update the order of the WWPNs.
# The first WWPN is the one that is logged into the fabric and this
# will now indicate that our WWPN is logged in.
LOG.debug('Post live migrate volume store: %s' % mig_vol_stor,
LOG.debug('Post live migrate volume store: %s', mig_vol_stor,
instance=self.instance)
for fabric in self._fabric_names():
# We check the mig_vol_stor to see if this fabric has already been
@ -254,8 +254,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Flip the WPWNs
c_wwpns = port_map[1].split()
c_wwpns.reverse()
LOG.debug('Flipping WWPNs, ports: %s wwpns: %s' %
(port_map, c_wwpns), instance=self.instance)
LOG.debug('Flipping WWPNs, ports: %s wwpns: %s',
port_map, c_wwpns, instance=self.instance)
# Get the new physical WWPN.
vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(vios_wraps,
c_wwpns)[1]
@ -402,7 +402,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
for fabric in self._fabric_names():
fc_state = self._get_fabric_state(fabric)
LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
"instance %(inst)s") %
"instance %(inst)s"),
{'st': fc_state, 'inst': self.instance.name})
if self._is_initial_wwpn(fc_state, fabric):
@ -473,7 +473,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return npiv_port_maps
# If ANY of the VIOS ports were not there, rebuild the port maps
LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state." %
LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
npiv_port_maps)
v_wwpns = []
for port_map in npiv_port_maps:
@ -483,7 +483,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Derive new maps and don't preserve existing maps
npiv_port_maps = pvm_vfcm.derive_npiv_map(
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
LOG.debug("Rebuilt port maps: %s" % npiv_port_maps)
LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
self._set_fabric_meta(fabric, npiv_port_maps)
LOG.warning(_LW("Had to update the system metadata for the WWPNs "
"due to incorrect physical WWPNs on fabric "
@ -607,7 +607,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
FS_INST_MAPPED: Fabric is mapped with the nova instance.
"""
meta_key = self._sys_fabric_state_key(fabric)
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s") %
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s"),
{'st': state, 'inst': self.instance.name})
self.instance.system_metadata[meta_key] = state

View File

@ -146,8 +146,8 @@ class VscsiVolumeAdapter(object):
"""
def add_func(vios_w):
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name,
'vm': self.vm_uuid})
"to VM %(vm)s"), {'dev': device_name,
'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name, target_name)
v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv,
@ -209,7 +209,7 @@ class VscsiVolumeAdapter(object):
"""
def rm_func(vios_w):
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s") % {'dev': device_name, 'vm': vm_uuid})
"to VM %(vm)s"), {'dev': device_name, 'vm': vm_uuid})
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -232,7 +232,7 @@ class VscsiVolumeAdapter(object):
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
"""
def rm_hdisk():
LOG.info(_LI("Running remove for hdisk: '%s'") % device_name)
LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
@ -278,11 +278,11 @@ class VscsiVolumeAdapter(object):
"""Cleanup the hdisk associated with this udid."""
if not udid and not devname:
LOG.warning(_LW('Could not remove hdisk for volume: %s')
% self.volume_id)
LOG.warning(
_LW('Could not remove hdisk for volume: %s'), self.volume_id)
return
LOG.info(_LI('Removing hdisk for udid: %s') % udid)
LOG.info(_LI('Removing hdisk for udid: %s'), udid)
def find_hdisk_to_remove(vios_w):
if devname is None:

View File

@ -181,8 +181,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Server (if any).
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
if len(itls) == 0:
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.'
% {'vios': vios_w.name, 'volume_id': volume_id})
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
{'vios': vios_w.name, 'volume_id': volume_id})
return None, None, None
status, device_name, udid = hdisk.discover_hdisk(self.adapter,

View File

@ -33,7 +33,8 @@ commands =
whitelist_externals = bash
[flake8]
ignore = E125
# N342 - Config Opts need to be outside nova/conf until powervm is part of nova proper
ignore = E125,N342
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
[hacking]