Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled
by the logging code, rather than being done
at the point of the logging call.
So add the following hacking rule for it.

- [N354] String interpolation should be delayed at logging calls.

See the oslo i18n guideline.

* http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Change-Id: Ief6d3ee3539c0857098fffdb7acfeec3e0fed6eb
Closes-Bug: #1596829
This commit is contained in:
Takashi NATSUME 2016-06-28 16:42:31 +09:00
parent 3d7ff766a7
commit 4eb89c206e
24 changed files with 134 additions and 56 deletions

View File

@ -64,6 +64,7 @@ Nova Specific Commandments
- [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method. - [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method.
- [N352] LOG.warn is deprecated. Enforce use of LOG.warning. - [N352] LOG.warn is deprecated. Enforce use of LOG.warning.
- [N353] Validate that context objects is not passed in logging calls. - [N353] Validate that context objects is not passed in logging calls.
- [N354] String interpolation should be delayed at logging calls.
Creating Unit Tests Creating Unit Tests
------------------- -------------------

View File

@ -406,7 +406,7 @@ class InstanceMetadata(object):
else: else:
LOG.debug('Metadata for device with unknown bus %s ' LOG.debug('Metadata for device with unknown bus %s '
'has not been included in the ' 'has not been included in the '
'output' % device.bus.__class__.__name__) 'output', device.bus.__class__.__name__)
continue continue
if 'address' in device.bus: if 'address' in device.bus:
address = device.bus.address address = device.bus.address
@ -424,7 +424,7 @@ class InstanceMetadata(object):
else: else:
LOG.debug('Metadata for device of unknown type %s has not ' LOG.debug('Metadata for device of unknown type %s has not '
'been included in the ' 'been included in the '
'output' % device.__class__.__name__) 'output', device.__class__.__name__)
continue continue
device_metadata['bus'] = bus device_metadata['bus'] = bus

View File

@ -1940,7 +1940,7 @@ class API(base.Base):
self.volume_api.delete(context, bdm.volume_id) self.volume_api.delete(context, bdm.volume_id)
except Exception as exc: except Exception as exc:
err_str = _LW("Ignoring volume cleanup failure due to %s") err_str = _LW("Ignoring volume cleanup failure due to %s")
LOG.warning(err_str % exc, instance=instance) LOG.warning(err_str, exc, instance=instance)
bdm.destroy() bdm.destroy()
def _local_delete(self, context, instance, bdms, delete_type, cb): def _local_delete(self, context, instance, bdms, delete_type, cb):

View File

@ -5177,7 +5177,7 @@ class ComputeManager(manager.Manager):
network_info, network_info,
disk, disk,
migrate_data) migrate_data)
LOG.debug('driver pre_live_migration data is %s' % migrate_data) LOG.debug('driver pre_live_migration data is %s', migrate_data)
# NOTE(tr3buchet): setup networks on destination host # NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance, self.network_api.setup_networks_on_host(context, instance,
@ -6171,9 +6171,9 @@ class ComputeManager(manager.Manager):
# block entire periodic task thread # block entire periodic task thread
uuid = db_instance.uuid uuid = db_instance.uuid
if uuid in self._syncs_in_progress: if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid) LOG.debug('Sync already in progress for %s', uuid)
else: else:
LOG.debug('Triggering sync for uuid %s' % uuid) LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance) self._sync_power_pool.spawn_n(_sync, db_instance)

View File

@ -105,6 +105,9 @@ doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b") r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile( log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)") r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
log_string_interpolation = re.compile(r".*LOG\.(error|warning|info"
r"|critical|exception|debug)"
r"\([^,]*%[^,]*[,)]")
class BaseASTChecker(ast.NodeVisitor): class BaseASTChecker(ast.NodeVisitor):
@ -794,6 +797,28 @@ def check_context_log(logical_line, physical_line, filename):
"kwarg.") "kwarg.")
def check_delayed_string_interpolation(logical_line, physical_line, filename):
"""Check whether string interpolation is delayed at logging calls
Not correct: LOG.debug('Example: %s' % 'bad')
Correct: LOG.debug('Example: %s', 'good')
N354
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_string_interpolation.match(logical_line):
yield(logical_line.index('%'),
"N354: String interpolation should be delayed to be "
"handled by the logging code, rather than being done "
"at the point of the logging call. "
"Use ',' instead of '%'.")
def factory(register): def factory(register):
register(import_no_db_in_virt) register(import_no_db_in_virt)
register(no_db_session_in_public_api) register(no_db_session_in_public_api)
@ -834,3 +859,4 @@ def factory(register):
register(no_log_warn) register(no_log_warn)
register(CheckForUncalledTestClosure) register(CheckForUncalledTestClosure)
register(check_context_log) register(check_context_log)
register(check_delayed_string_interpolation)

View File

@ -833,7 +833,7 @@ class NetworkManager(manager.Manager):
if address: if address:
LOG.debug('Associating instance with specified fixed IP ' LOG.debug('Associating instance with specified fixed IP '
'%(address)s in network %(network)s on subnet ' '%(address)s in network %(network)s on subnet '
'%(cidr)s.' % '%(cidr)s.',
{'address': address, 'network': network['id'], {'address': address, 'network': network['id'],
'cidr': network['cidr']}, 'cidr': network['cidr']},
instance=instance) instance=instance)
@ -842,7 +842,7 @@ class NetworkManager(manager.Manager):
vif_id=vif.id) vif_id=vif.id)
else: else:
LOG.debug('Associating instance with fixed IP from pool ' LOG.debug('Associating instance with fixed IP from pool '
'in network %(network)s on subnet %(cidr)s.' % 'in network %(network)s on subnet %(cidr)s.',
{'network': network['id'], {'network': network['id'],
'cidr': network['cidr']}, 'cidr': network['cidr']},
instance=instance) instance=instance)
@ -879,7 +879,7 @@ class NetworkManager(manager.Manager):
self.instance_dns_manager.delete_entry, self.instance_dns_manager.delete_entry,
instance_id, self.instance_dns_domain)) instance_id, self.instance_dns_domain))
LOG.debug('Setting up network %(network)s on host %(host)s.' % LOG.debug('Setting up network %(network)s on host %(host)s.',
{'network': network['id'], 'host': self.host}, {'network': network['id'], 'host': self.host},
instance=instance) instance=instance)
self._setup_network_on_host(context, network) self._setup_network_on_host(context, network)

View File

@ -2352,7 +2352,7 @@ class API(base_api.NetworkAPI):
if old_pci_devices and new_pci_devices: if old_pci_devices and new_pci_devices:
LOG.debug("Determining PCI devices mapping using migration" LOG.debug("Determining PCI devices mapping using migration"
"context: old_pci_devices: %(old)s, " "context: old_pci_devices: %(old)s, "
"new_pci_devices: %(new)s" % "new_pci_devices: %(new)s",
{'old': [dev for dev in old_pci_devices], {'old': [dev for dev in old_pci_devices],
'new': [dev for dev in new_pci_devices]}) 'new': [dev for dev in new_pci_devices]})
return {old.address: new return {old.address: new

View File

@ -65,7 +65,7 @@ class BuildRequest(base.NovaObject):
jsonutils.loads(db_instance)) jsonutils.loads(db_instance))
except TypeError: except TypeError:
LOG.debug('Failed to load instance from BuildRequest with uuid ' LOG.debug('Failed to load instance from BuildRequest with uuid '
'%s because it is None' % (self.instance_uuid)) '%s because it is None', self.instance_uuid)
raise exception.BuildRequestNotFound(uuid=self.instance_uuid) raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
except ovoo_exc.IncompatibleObjectVersion as exc: except ovoo_exc.IncompatibleObjectVersion as exc:
# This should only happen if proper service upgrade strategies are # This should only happen if proper service upgrade strategies are

View File

@ -170,7 +170,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
self.bdms.append(bdmi) self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False): def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self) LOG.debug('Converting to legacy: %s', self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict() legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) - keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'}) set(LiveMigrateData.fields.keys()) - {'bdms'})
@ -193,11 +193,11 @@ class LibvirtLiveMigrateData(LiveMigrateData):
legacy['pre_live_migration_result'] = live_result legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result) self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy) LOG.debug('Legacy result: %s', legacy)
return legacy return legacy
def from_legacy_dict(self, legacy): def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy) LOG.debug('Converting legacy dict to obj: %s', legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy) super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys()) keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}: for k in keys - {'bdms'}:
@ -213,7 +213,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
if 'serial_listen_addr' in pre_result: if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr'] self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result) self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self) LOG.debug('Converted object: %s', self)
def is_on_shared_storage(self): def is_on_shared_storage(self):
return self.is_shared_block_storage or self.is_shared_instance_path return self.is_shared_block_storage or self.is_shared_instance_path

View File

@ -337,8 +337,8 @@ class SchedulerReportClient(object):
new_gen = updated_inventories_result['resource_provider_generation'] new_gen = updated_inventories_result['resource_provider_generation']
self._resource_providers[compute_node.uuid].generation = new_gen self._resource_providers[compute_node.uuid].generation = new_gen
LOG.debug('Updated inventory for %s at generation %i' % ( LOG.debug('Updated inventory for %s at generation %i',
compute_node.uuid, new_gen)) compute_node.uuid, new_gen)
return True return True
@safe_connect @safe_connect

View File

@ -51,7 +51,7 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
prop = image_props.get(key) prop = image_props.get(key)
except AttributeError: except AttributeError:
LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' " LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' "
"that is not present in the image metadata.") % "that is not present in the image metadata."),
{"host": host_state.host, "key": key}) {"host": host_state.host, "key": key})
continue continue

View File

@ -494,7 +494,7 @@ class HostManager(object):
forced_hosts_str = ', '.join(hosts_to_force) forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching " msg = _LI("No hosts matched due to not matching "
"'force_hosts' value of '%s'") "'force_hosts' value of '%s'")
LOG.info(msg % forced_hosts_str) LOG.info(msg, forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force): def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = [] forced_nodes = []
@ -510,7 +510,7 @@ class HostManager(object):
forced_nodes_str = ', '.join(nodes_to_force) forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching " msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'") "'force_nodes' value of '%s'")
LOG.info(msg % forced_nodes_str) LOG.info(msg, forced_nodes_str)
def _get_hosts_matching_request(hosts, requested_destination): def _get_hosts_matching_request(hosts, requested_destination):
(host, node) = (requested_destination.host, (host, node) = (requested_destination.host,
@ -519,14 +519,14 @@ class HostManager(object):
if x.host == host and x.nodename == node] if x.host == host and x.nodename == node]
if requested_nodes: if requested_nodes:
LOG.info(_LI('Host filter only checking host %(host)s and ' LOG.info(_LI('Host filter only checking host %(host)s and '
'node %(node)s') % {'host': host, 'node': node}) 'node %(node)s'), {'host': host, 'node': node})
else: else:
# NOTE(sbauza): The API level should prevent the user from # NOTE(sbauza): The API level should prevent the user from
# providing a wrong destination but let's make sure a wrong # providing a wrong destination but let's make sure a wrong
# destination doesn't trample the scheduler still. # destination doesn't trample the scheduler still.
LOG.info(_LI('No hosts matched due to not matching requested ' LOG.info(_LI('No hosts matched due to not matching requested '
'destination (%(host)s, %(node)s)' 'destination (%(host)s, %(node)s)'),
) % {'host': host, 'node': node}) {'host': host, 'node': node})
return iter(requested_nodes) return iter(requested_nodes)
ignore_hosts = spec_obj.ignore_hosts or [] ignore_hosts = spec_obj.ignore_hosts or []

View File

@ -63,7 +63,7 @@ class MemcachedDriver(base.Driver):
key = "%(topic)s:%(host)s" % service_ref key = "%(topic)s:%(host)s" % service_ref
is_up = self.mc.get(str(key)) is not None is_up = self.mc.get(str(key)) is not None
if not is_up: if not is_up:
LOG.debug('Seems service %s is down' % key) LOG.debug('Seems service %s is down', key)
return is_up return is_up

View File

@ -788,3 +788,54 @@ class HackingTestCase(test.NoDBTestCase):
instance=instance) instance=instance)
""" """
self._assert_has_no_errors(code, checks.check_context_log) self._assert_has_no_errors(code, checks.check_context_log)
def test_check_delayed_string_interpolation(self):
checker = checks.check_delayed_string_interpolation
code = """
msg_w = _LW('Test string (%s)')
msg_i = _LI('Test string (%s)')
value = 'test'
LOG.error(_LE("Test string (%s)") % value)
LOG.warning(msg_w % 'test%string')
LOG.info(msg_i %
"test%string%info")
LOG.critical(
_LC('Test string (%s)') % value,
instance=instance)
LOG.exception(_LE(" 'Test quotation %s' \"Test\"") % 'test')
LOG.debug(' "Test quotation %s" \'Test\'' % "test")
LOG.debug('Tesing %(test)s' %
{'test': ','.join(
['%s=%s' % (name, value)
for name, value in test.items()])})
"""
expected_errors = [(5, 34, 'N354'), (6, 18, 'N354'), (7, 15, 'N354'),
(10, 28, 'N354'), (12, 49, 'N354'),
(13, 40, 'N354'), (14, 28, 'N354')]
self._assert_has_errors(code, checker, expected_errors=expected_errors)
self._assert_has_no_errors(code, checker,
filename='nova/tests/unit/test_hacking.py')
code = """
msg_w = _LW('Test string (%s)')
msg_i = _LI('Test string (%s)')
value = 'test'
LOG.error(_LE("Test string (%s)"), value)
LOG.error(_LE("Test string (%s)") % value) # noqa
LOG.warning(msg_w, 'test%string')
LOG.info(msg_i,
"test%string%info")
LOG.critical(
_LC('Test string (%s)'), value,
instance=instance)
LOG.exception(_LE(" 'Test quotation %s' \"Test\""), 'test')
LOG.debug(' "Test quotation %s" \'Test\'', "test")
LOG.debug('Tesing %(test)s',
{'test': ','.join(
['%s=%s' % (name, value)
for name, value in test.items()])})
"""
self._assert_has_no_errors(code, checker)

View File

@ -462,7 +462,7 @@ def teardown_container(container_dir, container_root_device=None):
utils.execute('qemu-nbd', '-d', container_root_device, utils.execute('qemu-nbd', '-d', container_root_device,
run_as_root=True) run_as_root=True)
else: else:
LOG.debug('No release necessary for block device %s' % LOG.debug('No release necessary for block device %s',
container_root_device) container_root_device)
except Exception: except Exception:
LOG.exception(_LE('Failed to teardown container filesystem')) LOG.exception(_LE('Failed to teardown container filesystem'))

View File

@ -762,7 +762,7 @@ class Host(object):
xml = secret_conf.to_xml() xml = secret_conf.to_xml()
try: try:
LOG.debug('Secret XML: %s' % xml) LOG.debug('Secret XML: %s', xml)
conn = self.get_connection() conn = self.get_connection()
secret = conn.secretDefineXML(xml) secret = conn.secretDefineXML(xml)
if password is not None: if password is not None:

View File

@ -267,7 +267,7 @@ class Image(object):
if size < base_size: if size < base_size:
msg = _LE('%(base)s virtual size %(base_size)s ' msg = _LE('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s') 'larger than flavor root disk size %(size)s')
LOG.error(msg % {'base': base, LOG.error(msg, {'base': base,
'base_size': base_size, 'base_size': base_size,
'size': size}) 'size': size})
raise exception.FlavorDiskSmallerThanImage( raise exception.FlavorDiskSmallerThanImage(
@ -871,7 +871,7 @@ class Rbd(Image):
include_locations=True) include_locations=True)
locations = image_meta['locations'] locations = image_meta['locations']
LOG.debug('Image locations are: %(locs)s' % {'locs': locations}) LOG.debug('Image locations are: %(locs)s', {'locs': locations})
if image_meta.get('disk_format') not in ['raw', 'iso']: if image_meta.get('disk_format') not in ['raw', 'iso']:
reason = _('Image is not raw format') reason = _('Image is not raw format')

View File

@ -220,7 +220,7 @@ class RBDDriver(object):
try: try:
return self.exists(image, pool=pool, snapshot=snapshot) return self.exists(image, pool=pool, snapshot=snapshot)
except rbd.Error as e: except rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' % LOG.debug('Unable to open image %(loc)s: %(err)s',
dict(loc=url, err=e)) dict(loc=url, err=e))
return False return False

View File

@ -343,18 +343,18 @@ class XenAPISession(object):
task_ref = self.call_xenapi("task.create", name, task_ref = self.call_xenapi("task.create", name,
desc) desc)
try: try:
LOG.debug('Created task %s with ref %s' % (name, task_ref)) LOG.debug('Created task %s with ref %s', name, task_ref)
yield task_ref yield task_ref
finally: finally:
self.call_xenapi("task.destroy", task_ref) self.call_xenapi("task.destroy", task_ref)
LOG.debug('Destroyed task ref %s' % (task_ref)) LOG.debug('Destroyed task ref %s', task_ref)
@contextlib.contextmanager @contextlib.contextmanager
def http_connection(session): def http_connection(session):
conn = None conn = None
xs_url = urllib.parse.urlparse(session.url) xs_url = urllib.parse.urlparse(session.url)
LOG.debug("Creating http(s) connection to %s" % session.url) LOG.debug("Creating http(s) connection to %s", session.url)
if xs_url.scheme == 'http': if xs_url.scheme == 'http':
conn = http_client.HTTPConnection(xs_url.netloc) conn = http_client.HTTPConnection(xs_url.netloc)
elif xs_url.scheme == 'https': elif xs_url.scheme == 'https':

View File

@ -56,8 +56,8 @@ def invalid_option(option_name, recommended_value):
LOG.exception(_LE('Current value of ' LOG.exception(_LE('Current value of '
'CONF.xenserver.%(option)s option incompatible with ' 'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. ' 'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"') % { 'Consider using "%(recommended)s"'),
'option': option_name, {'option': option_name,
'recommended': recommended_value}) 'recommended': recommended_value})
raise exception.NotSupportedWithOption( raise exception.NotSupportedWithOption(
operation=option_name, operation=option_name,

View File

@ -932,7 +932,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
_auto_configure_disk(session, vdi_ref, new_gb) _auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e: except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s') msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warning(msg % e) LOG.warning(msg, e)
def _make_partition(session, dev, partition_start, partition_end): def _make_partition(session, dev, partition_start, partition_end):

View File

@ -363,8 +363,8 @@ def _stream_to_vdi(conn, vdi_import_path, file_size, file_obj):
'Content-Length': '%s' % file_size} 'Content-Length': '%s' % file_size}
CHUNK_SIZE = 16 * 1024 CHUNK_SIZE = 16 * 1024
LOG.debug('Initialising PUT request to %s (Headers: %s)' % ( LOG.debug('Initialising PUT request to %s (Headers: %s)',
vdi_import_path, headers)) vdi_import_path, headers)
conn.request('PUT', vdi_import_path, headers=headers) conn.request('PUT', vdi_import_path, headers=headers)
remain_size = file_size remain_size = file_size
while remain_size >= CHUNK_SIZE: while remain_size >= CHUNK_SIZE:

View File

@ -224,5 +224,5 @@ class VolumeOps(object):
# Forget (i.e. disconnect) SR only if not in use # Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref) volume_utils.purge_sr(self._session, sr_ref)
except Exception: except Exception:
LOG.debug('Ignoring error while purging sr: %s' % sr_ref, LOG.debug('Ignoring error while purging sr: %s', sr_ref,
exc_info=True) exc_info=True)

View File

@ -45,24 +45,24 @@ def delete_if_exists(path):
os.unlink(path) os.unlink(path)
except OSError, e: # noqa except OSError, e: # noqa
if e.errno == errno.ENOENT: if e.errno == errno.ENOENT:
LOG.warning("'%s' was already deleted, skipping delete" % path) LOG.warning("'%s' was already deleted, skipping delete", path)
else: else:
raise raise
def _link(src, dst): def _link(src, dst):
LOG.info("Hard-linking file '%s' -> '%s'" % (src, dst)) LOG.info("Hard-linking file '%s' -> '%s'", src, dst)
os.link(src, dst) os.link(src, dst)
def _rename(src, dst): def _rename(src, dst):
LOG.info("Renaming file '%s' -> '%s'" % (src, dst)) LOG.info("Renaming file '%s' -> '%s'", src, dst)
try: try:
os.rename(src, dst) os.rename(src, dst)
except OSError, e: # noqa except OSError, e: # noqa
if e.errno == errno.EXDEV: if e.errno == errno.EXDEV:
LOG.error("Invalid cross-device link. Perhaps %s and %s should " LOG.error("Invalid cross-device link. Perhaps %s and %s should "
"be symlinked on the same filesystem?" % (src, dst)) "be symlinked on the same filesystem?", src, dst)
raise raise
@ -70,7 +70,7 @@ def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,
universal_newlines=False, close_fds=True, env=None): universal_newlines=False, close_fds=True, env=None):
"""Make a subprocess according to the given command-line string """Make a subprocess according to the given command-line string
""" """
LOG.info("Running cmd '%s'" % " ".join(cmdline)) LOG.info("Running cmd '%s'", " ".join(cmdline))
kwargs = {} kwargs = {}
kwargs['stdout'] = stdout and subprocess.PIPE or None kwargs['stdout'] = stdout and subprocess.PIPE or None
kwargs['stderr'] = stderr and subprocess.PIPE or None kwargs['stderr'] = stderr and subprocess.PIPE or None
@ -109,7 +109,7 @@ def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):
ret = proc.returncode ret = proc.returncode
if ret not in ok_exit_codes: if ret not in ok_exit_codes:
LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected " LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected "
"return code in '%(ok)s' but got '%(rc)s': %(err)s" % "return code in '%(ok)s' but got '%(rc)s': %(err)s",
{'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes, {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,
'rc': ret, 'err': err}) 'rc': ret, 'err': err})
raise SubprocessException(' '.join(cmdline), ret, out, err) raise SubprocessException(' '.join(cmdline), ret, out, err)
@ -132,11 +132,11 @@ def run_command(cmd, cmd_input=None, ok_exit_codes=None):
def try_kill_process(proc): def try_kill_process(proc):
"""Sends the given process the SIGKILL signal.""" """Sends the given process the SIGKILL signal."""
pid = proc.pid pid = proc.pid
LOG.info("Killing process %s" % pid) LOG.info("Killing process %s", pid)
try: try:
os.kill(pid, signal.SIGKILL) os.kill(pid, signal.SIGKILL)
except Exception: except Exception:
LOG.exception("Failed to kill %s" % pid) LOG.exception("Failed to kill %s", pid)
def make_staging_area(sr_path): def make_staging_area(sr_path):
@ -279,14 +279,14 @@ def _validate_vhd(vdi_path):
extra = (" ensure source and destination host machines have " extra = (" ensure source and destination host machines have "
"time set correctly") "time set correctly")
LOG.info("VDI Error details: %s" % out) LOG.info("VDI Error details: %s", out)
raise Exception( raise Exception(
"VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'" "VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'"
"%(extra)s" % {'vdi_path': vdi_path, 'part': part, "%(extra)s" % {'vdi_path': vdi_path, 'part': part,
'details': details, 'extra': extra}) 'details': details, 'extra': extra})
LOG.info("VDI is valid: %s" % vdi_path) LOG.info("VDI is valid: %s", vdi_path)
def _validate_vdi_chain(vdi_path): def _validate_vdi_chain(vdi_path):
@ -470,7 +470,7 @@ def extract_tarball(fileobj, path, callback=None):
tar_pid = tar_proc.pid tar_pid = tar_proc.pid
if returncode is not None: if returncode is not None:
LOG.error("tar extract with process id '%(pid)s' " LOG.error("tar extract with process id '%(pid)s' "
"exited early with '%(rc)s'" % "exited early with '%(rc)s'",
{'pid': tar_pid, 'rc': returncode}) {'pid': tar_pid, 'rc': returncode})
raise SubprocessException( raise SubprocessException(
' '.join(tar_cmd), returncode, "", "") ' '.join(tar_cmd), returncode, "", "")
@ -479,7 +479,7 @@ def extract_tarball(fileobj, path, callback=None):
# no need to kill already dead process # no need to kill already dead process
raise raise
except Exception: except Exception:
LOG.exception("Failed while sending data to tar pid: %s" % tar_pid) LOG.exception("Failed while sending data to tar pid: %s", tar_pid)
try_kill_process(tar_proc) try_kill_process(tar_proc)
raise raise