Update to hacking 0.12.0 and use new checks

Use hacking 0.12.0

Use the new checks that are available:
    [H106] Don’t put vim configuration in source files.
    [H203] Use assertIs(Not)None to check for None.
    [H904] Delay string interpolations at logging calls.

Fix code so tests pass.

Change-Id: I902e999687b066800e18fafd091571bf718b15f4
Depends-On: I2aa44b62f900d4dfd67701b01eadd0523fbfaf07
This commit is contained in:
John L. Villalovos
2016-11-08 10:07:56 -08:00
parent e15fe4ac6b
commit 8b6894483b
16 changed files with 42 additions and 40 deletions

View File

@@ -178,8 +178,8 @@ class BaseImageService(object):
:raises: ImageNotFound :raises: ImageNotFound
""" """
LOG.debug("Getting image metadata from glance. Image: %s" LOG.debug("Getting image metadata from glance. Image: %s",
% image_href) image_href)
(image_id, self.glance_host, (image_id, self.glance_host,
self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href) self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href)

View File

@@ -299,7 +299,7 @@ def fetch(context, image_href, path, force_raw=False):
# checked before we got here. # checked before we got here.
image_service = service.get_image_service(image_href, image_service = service.get_image_service(image_href,
context=context) context=context)
LOG.debug("Using %(image_service)s to download image %(image_href)s." % LOG.debug("Using %(image_service)s to download image %(image_href)s.",
{'image_service': image_service.__class__, {'image_service': image_service.__class__,
'image_href': image_href}) 'image_href': image_href})
@@ -330,7 +330,7 @@ def image_to_raw(image_href, path, path_tmp):
if fmt != "raw": if fmt != "raw":
staged = "%s.converted" % path staged = "%s.converted" % path
LOG.debug("%(image)s was %(format)s, converting to raw" % LOG.debug("%(image)s was %(format)s, converting to raw",
{'image': image_href, 'format': fmt}) {'image': image_href, 'format': fmt})
with fileutils.remove_path_on_error(staged): with fileutils.remove_path_on_error(staged):
disk_utils.convert_image(path_tmp, staged, 'raw') disk_utils.convert_image(path_tmp, staged, 'raw')

View File

@@ -70,8 +70,8 @@ def execute(*cmd, **kwargs):
result = processutils.execute(*cmd, **kwargs) result = processutils.execute(*cmd, **kwargs)
LOG.debug('Execution completed, command line is "%s"', LOG.debug('Execution completed, command line is "%s"',
' '.join(map(str, cmd))) ' '.join(map(str, cmd)))
LOG.debug('Command stdout is: "%s"' % result[0]) LOG.debug('Command stdout is: "%s"', result[0])
LOG.debug('Command stderr is: "%s"' % result[1]) LOG.debug('Command stderr is: "%s"', result[1])
return result return result
@@ -109,7 +109,7 @@ def ssh_connect(connection):
# send TCP keepalive packets every 20 seconds # send TCP keepalive packets every 20 seconds
ssh.get_transport().set_keepalive(20) ssh.get_transport().set_keepalive(20)
except Exception as e: except Exception as e:
LOG.debug("SSH connect failed: %s" % e) LOG.debug("SSH connect failed: %s", e)
raise exception.SSHConnectFailed(host=connection.get('host')) raise exception.SSHConnectFailed(host=connection.get('host'))
return ssh return ssh

View File

@@ -106,7 +106,7 @@ class ConductorManager(base_manager.BaseConductorManager):
""" """
node_id = node_obj.uuid node_id = node_obj.uuid
LOG.debug("RPC update_node called for node %s." % node_id) LOG.debug("RPC update_node called for node %s.", node_id)
# NOTE(jroll) clear maintenance_reason if node.update sets # NOTE(jroll) clear maintenance_reason if node.update sets
# maintenance to False for backwards compatibility, for tools # maintenance to False for backwards compatibility, for tools
@@ -222,7 +222,7 @@ class ConductorManager(base_manager.BaseConductorManager):
or return it in the response body (False). or return it in the response body (False).
""" """
LOG.debug("RPC vendor_passthru called for node %s." % node_id) LOG.debug("RPC vendor_passthru called for node %s.", node_id)
# NOTE(mariojv): Not all vendor passthru methods require an exclusive # NOTE(mariojv): Not all vendor passthru methods require an exclusive
# lock on a node, so we acquire a shared lock initially. If a method # lock on a node, so we acquire a shared lock initially. If a method
# requires an exclusive lock, we'll acquire one after checking # requires an exclusive lock, we'll acquire one after checking
@@ -314,7 +314,7 @@ class ConductorManager(base_manager.BaseConductorManager):
""" """
# Any locking in a top-level vendor action will need to be done by the # Any locking in a top-level vendor action will need to be done by the
# implementation, as there is little we could reasonably lock on here. # implementation, as there is little we could reasonably lock on here.
LOG.debug("RPC driver_vendor_passthru for driver %s." % driver_name) LOG.debug("RPC driver_vendor_passthru for driver %s.", driver_name)
driver = driver_factory.get_driver(driver_name) driver = driver_factory.get_driver(driver_name)
if not getattr(driver, 'vendor', None): if not getattr(driver, 'vendor', None):
raise exception.UnsupportedDriverExtension( raise exception.UnsupportedDriverExtension(
@@ -361,8 +361,8 @@ class ConductorManager(base_manager.BaseConductorManager):
:returns: dictionary of <method name>:<method metadata> entries. :returns: dictionary of <method name>:<method metadata> entries.
""" """
LOG.debug("RPC get_node_vendor_passthru_methods called for node %s" LOG.debug("RPC get_node_vendor_passthru_methods called for node %s",
% node_id) node_id)
lock_purpose = 'listing vendor passthru methods' lock_purpose = 'listing vendor passthru methods'
with task_manager.acquire(context, node_id, shared=True, with task_manager.acquire(context, node_id, shared=True,
purpose=lock_purpose) as task: purpose=lock_purpose) as task:
@@ -387,8 +387,8 @@ class ConductorManager(base_manager.BaseConductorManager):
""" """
# Any locking in a top-level vendor action will need to be done by the # Any locking in a top-level vendor action will need to be done by the
# implementation, as there is little we could reasonably lock on here. # implementation, as there is little we could reasonably lock on here.
LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s" LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s",
% driver_name) driver_name)
driver = driver_factory.get_driver(driver_name) driver = driver_factory.get_driver(driver_name)
if not getattr(driver, 'vendor', None): if not getattr(driver, 'vendor', None):
raise exception.UnsupportedDriverExtension( raise exception.UnsupportedDriverExtension(
@@ -426,7 +426,7 @@ class ConductorManager(base_manager.BaseConductorManager):
target from the current state. target from the current state.
""" """
LOG.debug("RPC do_node_deploy called for node %s." % node_id) LOG.debug("RPC do_node_deploy called for node %s.", node_id)
# NOTE(comstud): If the _sync_power_states() periodic task happens # NOTE(comstud): If the _sync_power_states() periodic task happens
# to have locked this node, we'll fail to acquire the lock. The # to have locked this node, we'll fail to acquire the lock. The
@@ -509,7 +509,7 @@ class ConductorManager(base_manager.BaseConductorManager):
target from the current state. target from the current state.
""" """
LOG.debug("RPC do_node_tear_down called for node %s." % node_id) LOG.debug("RPC do_node_tear_down called for node %s.", node_id)
with task_manager.acquire(context, node_id, shared=False, with task_manager.acquire(context, node_id, shared=False,
purpose='node tear down') as task: purpose='node tear down') as task:
@@ -896,7 +896,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# Kill this worker, the async step will make an RPC call to # Kill this worker, the async step will make an RPC call to
# continue_node_clean to continue cleaning # continue_node_clean to continue cleaning
LOG.info(_LI('Clean step %(step)s on node %(node)s being ' LOG.info(_LI('Clean step %(step)s on node %(node)s being '
'executed asynchronously, waiting for driver.') % 'executed asynchronously, waiting for driver.'),
{'node': node.uuid, 'step': step}) {'node': node.uuid, 'step': step})
target_state = states.MANAGEABLE if manual_clean else None target_state = states.MANAGEABLE if manual_clean else None
task.process_event('wait', target_state=target_state) task.process_event('wait', target_state=target_state)
@@ -1553,7 +1553,7 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: InvalidParameterValue when the wrong driver info is specified. :raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if missing supplied info. :raises: MissingParameterValue if missing supplied info.
""" """
LOG.debug('RPC get_console_information called for node %s' % node_id) LOG.debug('RPC get_console_information called for node %s', node_id)
lock_purpose = 'getting console information' lock_purpose = 'getting console information'
with task_manager.acquire(context, node_id, shared=True, with task_manager.acquire(context, node_id, shared=True,
@@ -2160,7 +2160,7 @@ class ConductorManager(base_manager.BaseConductorManager):
description for them. description for them.
""" """
LOG.debug("RPC get_raid_logical_disk_properties " LOG.debug("RPC get_raid_logical_disk_properties "
"called for driver %s" % driver_name) "called for driver %s", driver_name)
driver = driver_factory.get_driver(driver_name) driver = driver_factory.get_driver(driver_name)
if not getattr(driver, 'raid', None): if not getattr(driver, 'raid', None):

View File

@@ -405,7 +405,7 @@ class AgentDeployMixin(object):
clean_step_hook = _get_post_clean_step_hook(node) clean_step_hook = _get_post_clean_step_hook(node)
if clean_step_hook is not None: if clean_step_hook is not None:
LOG.debug('For node %(node)s, executing post clean step ' LOG.debug('For node %(node)s, executing post clean step '
'hook %(method)s for clean step %(step)s' % 'hook %(method)s for clean step %(step)s',
{'method': clean_step_hook.__name__, {'method': clean_step_hook.__name__,
'node': node.uuid, 'node': node.uuid,
'step': node.clean_step}) 'step': node.clean_step})

View File

@@ -693,7 +693,7 @@ def try_set_boot_device(task, device, persistent=True):
if get_boot_mode_for_deploy(task.node) == 'uefi': if get_boot_mode_for_deploy(task.node) == 'uefi':
LOG.warning(_LW("ipmitool is unable to set boot device while " LOG.warning(_LW("ipmitool is unable to set boot device while "
"the node %s is in UEFI boot mode. Please set " "the node %s is in UEFI boot mode. Please set "
"the boot device manually.") % task.node.uuid) "the boot device manually."), task.node.uuid)
else: else:
raise raise

View File

@@ -628,7 +628,7 @@ def _commit_to_controllers(node, controllers):
"""Commit changes to RAID controllers on the node.""" """Commit changes to RAID controllers on the node."""
if not controllers: if not controllers:
LOG.debug('No changes on any of the controllers on node %s' % LOG.debug('No changes on any of the controllers on node %s',
node.uuid) node.uuid)
return return

View File

@@ -180,7 +180,7 @@ class ImageCache(object):
if self.master_dir is None: if self.master_dir is None:
return return
LOG.debug("Starting clean up for master image cache %(dir)s" % LOG.debug("Starting clean up for master image cache %(dir)s",
{'dir': self.master_dir}) {'dir': self.master_dir})
amount_copy = amount amount_copy = amount

View File

@@ -277,10 +277,10 @@ def _parse_driver_info(node):
if not username: if not username:
LOG.warning(_LW('ipmi_username is not defined or empty for node %s: ' LOG.warning(_LW('ipmi_username is not defined or empty for node %s: '
'NULL user will be utilized.') % node.uuid) 'NULL user will be utilized.'), node.uuid)
if not password: if not password:
LOG.warning(_LW('ipmi_password is not defined or empty for node %s: ' LOG.warning(_LW('ipmi_password is not defined or empty for node %s: '
'NULL password will be utilized.') % node.uuid) 'NULL password will be utilized.'), node.uuid)
if protocol_version not in VALID_PROTO_VERSIONS: if protocol_version not in VALID_PROTO_VERSIONS:
valid_versions = ', '.join(VALID_PROTO_VERSIONS) valid_versions = ', '.join(VALID_PROTO_VERSIONS)

View File

@@ -491,12 +491,12 @@ def _get_hosts_name_for_node(ssh_obj, driver_info):
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'], cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['list_all']) driver_info['cmd_set']['list_all'])
full_node_list = _ssh_execute(ssh_obj, cmd_to_exec) full_node_list = _ssh_execute(ssh_obj, cmd_to_exec)
LOG.debug("Retrieved Node List: %s" % repr(full_node_list)) LOG.debug("Retrieved Node List: %s", repr(full_node_list))
# for each node check Mac Addresses # for each node check Mac Addresses
for node in full_node_list: for node in full_node_list:
if not node: if not node:
continue continue
LOG.debug("Checking Node: %s's Mac address." % node) LOG.debug("Checking Node: %s's Mac address.", node)
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'], cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['get_node_macs']) driver_info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node) cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node)
@@ -508,7 +508,7 @@ def _get_hosts_name_for_node(ssh_obj, driver_info):
for node_mac in driver_info['macs']: for node_mac in driver_info['macs']:
if (driver_utils.normalize_mac(host_mac) if (driver_utils.normalize_mac(host_mac)
in driver_utils.normalize_mac(node_mac)): in driver_utils.normalize_mac(node_mac)):
LOG.debug("Found Mac address: %s" % node_mac) LOG.debug("Found Mac address: %s", node_mac)
matched_name = node matched_name = node
break break

View File

@@ -1692,10 +1692,10 @@ class TestPost(test_api_base.BaseApiTest):
def test_create_node_chassis_uuid_always_in_response(self): def test_create_node_chassis_uuid_always_in_response(self):
result = self._test_create_node(chassis_uuid=None) result = self._test_create_node(chassis_uuid=None)
self.assertEqual(None, result['chassis_uuid']) self.assertIsNone(result['chassis_uuid'])
result = self._test_create_node(uuid=uuidutils.generate_uuid(), result = self._test_create_node(uuid=uuidutils.generate_uuid(),
remove_chassis_uuid=True) remove_chassis_uuid=True)
self.assertEqual(None, result['chassis_uuid']) self.assertIsNone(result['chassis_uuid'])
def test_create_node_invalid_chassis(self): def test_create_node_invalid_chassis(self):
ndict = test_api_utils.post_get_test_node(chassis_uuid=0) ndict = test_api_utils.post_get_test_node(chassis_uuid=0)

View File

@@ -1792,7 +1792,7 @@ class TrySetBootDeviceTestCase(db_base.DbTestCase):
persistent=True) persistent=True)
node_set_boot_device_mock.assert_called_once_with( node_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK, persistent=True) task, boot_devices.DISK, persistent=True)
log_mock.warning.assert_called_once_with(mock.ANY) log_mock.warning.assert_called_once_with(mock.ANY, self.node.uuid)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
def test_try_set_boot_device_ipmifailure_bios( def test_try_set_boot_device_ipmifailure_bios(

View File

@@ -652,11 +652,9 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase):
ipmi._parse_driver_info(node) ipmi._parse_driver_info(node)
calls = [ calls = [
mock.call(u'ipmi_username is not defined or empty for node ' mock.call(u'ipmi_username is not defined or empty for node '
u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: NULL user will ' u'%s: NULL user will be utilized.', self.node.uuid),
u'be utilized.'),
mock.call(u'ipmi_password is not defined or empty for node ' mock.call(u'ipmi_password is not defined or empty for node '
u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: NULL password ' u'%s: NULL password will be utilized.', self.node.uuid),
u'will be utilized.'),
] ]
mock_log.assert_has_calls(calls) mock_log.assert_has_calls(calls)

View File

@@ -62,18 +62,18 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest):
def verify_partition(self, client, label, mount, gib_size): def verify_partition(self, client, label, mount, gib_size):
"""Verify a labeled partition's mount point and size.""" """Verify a labeled partition's mount point and size."""
LOG.info("Looking for partition %s mounted on %s" % (label, mount)) LOG.info("Looking for partition %s mounted on %s", label, mount)
# Validate we have a device with the given partition label # Validate we have a device with the given partition label
cmd = "/sbin/blkid | grep '%s' | cut -d':' -f1" % label cmd = "/sbin/blkid | grep '%s' | cut -d':' -f1" % label
device = client.exec_command(cmd).rstrip('\n') device = client.exec_command(cmd).rstrip('\n')
LOG.debug("Partition device is %s" % device) LOG.debug("Partition device is %s", device)
self.assertNotEqual('', device) self.assertNotEqual('', device)
# Validate the mount point for the device # Validate the mount point for the device
cmd = "mount | grep '%s' | cut -d' ' -f3" % device cmd = "mount | grep '%s' | cut -d' ' -f3" % device
actual_mount = client.exec_command(cmd).rstrip('\n') actual_mount = client.exec_command(cmd).rstrip('\n')
LOG.debug("Partition mount point is %s" % actual_mount) LOG.debug("Partition mount point is %s", actual_mount)
self.assertEqual(actual_mount, mount) self.assertEqual(actual_mount, mount)
# Validate the partition size matches what we expect # Validate the partition size matches what we expect
@@ -83,7 +83,7 @@ class BaremetalBasicOps(baremetal_manager.BaremetalScenarioTest):
num_bytes = client.exec_command(cmd).rstrip('\n') num_bytes = client.exec_command(cmd).rstrip('\n')
num_bytes = int(num_bytes) * 512 num_bytes = int(num_bytes) * 512
actual_gib_size = num_bytes / (1024 * 1024 * 1024) actual_gib_size = num_bytes / (1024 * 1024 * 1024)
LOG.debug("Partition size is %d GiB" % actual_gib_size) LOG.debug("Partition size is %d GiB", actual_gib_size)
self.assertEqual(actual_gib_size, gib_size) self.assertEqual(actual_gib_size, gib_size)
def get_flavor_ephemeral_size(self): def get_flavor_ephemeral_size(self):

View File

@@ -1,7 +1,7 @@
# The order of packages is significant, because pip processes them in the order # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
hacking<0.12,>=0.11.0 # Apache-2.0 hacking<0.13,>=0.12.0 # Apache-2.0
coverage>=4.0 # Apache-2.0 coverage>=4.0 # Apache-2.0
doc8 # Apache-2.0 doc8 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD

View File

@@ -91,6 +91,10 @@ commands = {posargs}
ignore = E129 ignore = E129
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
max-complexity=17 max-complexity=17
# [H106] Dont put vim configuration in source files.
# [H203] Use assertIs(Not)None to check for None.
# [H904] Delay string interpolations at logging calls.
enable-extensions=H106,H203,H904
[hacking] [hacking]
import_exceptions = testtools.matchers, ironic.common.i18n import_exceptions = testtools.matchers, ironic.common.i18n