Browse Source

Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled
by the logging code, rather than being done
at the point of the logging call.
So add the following hacking rule for it.

- [N354] String interpolation should be delayed at logging calls.

See the oslo i18n guideline.

* http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Change-Id: Ief6d3ee3539c0857098fffdb7acfeec3e0fed6eb
Closes-Bug: #1596829
tags/15.0.0.0b1
Takashi NATSUME 3 years ago
parent
commit
4eb89c206e

+ 1
- 0
HACKING.rst View File

@@ -64,6 +64,7 @@ Nova Specific Commandments
64 64
 - [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method.
65 65
 - [N352] LOG.warn is deprecated. Enforce use of LOG.warning.
66 66
 - [N353] Validate that context objects is not passed in logging calls.
67
+- [N354] String interpolation should be delayed at logging calls.
67 68
 
68 69
 Creating Unit Tests
69 70
 -------------------

+ 2
- 2
nova/api/metadata/base.py View File

@@ -406,7 +406,7 @@ class InstanceMetadata(object):
406 406
                     else:
407 407
                         LOG.debug('Metadata for device with unknown bus %s '
408 408
                                   'has not been included in the '
409
-                                  'output' % device.bus.__class__.__name__)
409
+                                  'output', device.bus.__class__.__name__)
410 410
                         continue
411 411
                     if 'address' in device.bus:
412 412
                         address = device.bus.address
@@ -424,7 +424,7 @@ class InstanceMetadata(object):
424 424
                 else:
425 425
                     LOG.debug('Metadata for device of unknown type %s has not '
426 426
                               'been included in the '
427
-                              'output' % device.__class__.__name__)
427
+                              'output', device.__class__.__name__)
428 428
                     continue
429 429
 
430 430
                 device_metadata['bus'] = bus

+ 1
- 1
nova/compute/api.py View File

@@ -1940,7 +1940,7 @@ class API(base.Base):
1940 1940
                         self.volume_api.delete(context, bdm.volume_id)
1941 1941
                 except Exception as exc:
1942 1942
                     err_str = _LW("Ignoring volume cleanup failure due to %s")
1943
-                    LOG.warning(err_str % exc, instance=instance)
1943
+                    LOG.warning(err_str, exc, instance=instance)
1944 1944
             bdm.destroy()
1945 1945
 
1946 1946
     def _local_delete(self, context, instance, bdms, delete_type, cb):

+ 3
- 3
nova/compute/manager.py View File

@@ -5177,7 +5177,7 @@ class ComputeManager(manager.Manager):
5177 5177
                                        network_info,
5178 5178
                                        disk,
5179 5179
                                        migrate_data)
5180
-        LOG.debug('driver pre_live_migration data is %s' % migrate_data)
5180
+        LOG.debug('driver pre_live_migration data is %s', migrate_data)
5181 5181
 
5182 5182
         # NOTE(tr3buchet): setup networks on destination host
5183 5183
         self.network_api.setup_networks_on_host(context, instance,
@@ -6171,9 +6171,9 @@ class ComputeManager(manager.Manager):
6171 6171
             # block entire periodic task thread
6172 6172
             uuid = db_instance.uuid
6173 6173
             if uuid in self._syncs_in_progress:
6174
-                LOG.debug('Sync already in progress for %s' % uuid)
6174
+                LOG.debug('Sync already in progress for %s', uuid)
6175 6175
             else:
6176
-                LOG.debug('Triggering sync for uuid %s' % uuid)
6176
+                LOG.debug('Triggering sync for uuid %s', uuid)
6177 6177
                 self._syncs_in_progress[uuid] = True
6178 6178
                 self._sync_power_pool.spawn_n(_sync, db_instance)
6179 6179
 

+ 26
- 0
nova/hacking/checks.py View File

@@ -105,6 +105,9 @@ doubled_words_re = re.compile(
105 105
     r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
106 106
 log_remove_context = re.compile(
107 107
     r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
108
+log_string_interpolation = re.compile(r".*LOG\.(error|warning|info"
109
+                                      r"|critical|exception|debug)"
110
+                                      r"\([^,]*%[^,]*[,)]")
108 111
 
109 112
 
110 113
 class BaseASTChecker(ast.NodeVisitor):
@@ -794,6 +797,28 @@ def check_context_log(logical_line, physical_line, filename):
794 797
               "kwarg.")
795 798
 
796 799
 
800
+def check_delayed_string_interpolation(logical_line, physical_line, filename):
801
+    """Check whether string interpolation is delayed at logging calls
802
+
803
+    Not correct: LOG.debug('Example: %s' % 'bad')
804
+    Correct:     LOG.debug('Example: %s', 'good')
805
+
806
+    N354
807
+    """
808
+    if "nova/tests" in filename:
809
+        return
810
+
811
+    if pep8.noqa(physical_line):
812
+        return
813
+
814
+    if log_string_interpolation.match(logical_line):
815
+        yield(logical_line.index('%'),
816
+              "N354: String interpolation should be delayed to be "
817
+              "handled by the logging code, rather than being done "
818
+              "at the point of the logging call. "
819
+              "Use ',' instead of '%'.")
820
+
821
+
797 822
 def factory(register):
798 823
     register(import_no_db_in_virt)
799 824
     register(no_db_session_in_public_api)
@@ -834,3 +859,4 @@ def factory(register):
834 859
     register(no_log_warn)
835 860
     register(CheckForUncalledTestClosure)
836 861
     register(check_context_log)
862
+    register(check_delayed_string_interpolation)

+ 3
- 3
nova/network/manager.py View File

@@ -833,7 +833,7 @@ class NetworkManager(manager.Manager):
833 833
                 if address:
834 834
                     LOG.debug('Associating instance with specified fixed IP '
835 835
                               '%(address)s in network %(network)s on subnet '
836
-                              '%(cidr)s.' %
836
+                              '%(cidr)s.',
837 837
                               {'address': address, 'network': network['id'],
838 838
                                'cidr': network['cidr']},
839 839
                               instance=instance)
@@ -842,7 +842,7 @@ class NetworkManager(manager.Manager):
842 842
                             vif_id=vif.id)
843 843
                 else:
844 844
                     LOG.debug('Associating instance with fixed IP from pool '
845
-                              'in network %(network)s on subnet %(cidr)s.' %
845
+                              'in network %(network)s on subnet %(cidr)s.',
846 846
                               {'network': network['id'],
847 847
                                'cidr': network['cidr']},
848 848
                               instance=instance)
@@ -879,7 +879,7 @@ class NetworkManager(manager.Manager):
879 879
                         self.instance_dns_manager.delete_entry,
880 880
                         instance_id, self.instance_dns_domain))
881 881
 
882
-            LOG.debug('Setting up network %(network)s on host %(host)s.' %
882
+            LOG.debug('Setting up network %(network)s on host %(host)s.',
883 883
                       {'network': network['id'], 'host': self.host},
884 884
                       instance=instance)
885 885
             self._setup_network_on_host(context, network)

+ 2
- 2
nova/network/neutronv2/api.py View File

@@ -2352,9 +2352,9 @@ class API(base_api.NetworkAPI):
2352 2352
         if old_pci_devices and new_pci_devices:
2353 2353
             LOG.debug("Determining PCI devices mapping using migration"
2354 2354
                       "context: old_pci_devices: %(old)s, "
2355
-                      "new_pci_devices: %(new)s" %
2355
+                      "new_pci_devices: %(new)s",
2356 2356
                       {'old': [dev for dev in old_pci_devices],
2357
-                      'new': [dev for dev in new_pci_devices]})
2357
+                       'new': [dev for dev in new_pci_devices]})
2358 2358
             return {old.address: new
2359 2359
                     for old in old_pci_devices
2360 2360
                         for new in new_pci_devices

+ 1
- 1
nova/objects/build_request.py View File

@@ -65,7 +65,7 @@ class BuildRequest(base.NovaObject):
65 65
                     jsonutils.loads(db_instance))
66 66
         except TypeError:
67 67
             LOG.debug('Failed to load instance from BuildRequest with uuid '
68
-                      '%s because it is None' % (self.instance_uuid))
68
+                      '%s because it is None', self.instance_uuid)
69 69
             raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
70 70
         except ovoo_exc.IncompatibleObjectVersion as exc:
71 71
             # This should only happen if proper service upgrade strategies are

+ 4
- 4
nova/objects/migrate_data.py View File

@@ -170,7 +170,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
170 170
             self.bdms.append(bdmi)
171 171
 
172 172
     def to_legacy_dict(self, pre_migration_result=False):
173
-        LOG.debug('Converting to legacy: %s' % self)
173
+        LOG.debug('Converting to legacy: %s', self)
174 174
         legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
175 175
         keys = (set(self.fields.keys()) -
176 176
                 set(LiveMigrateData.fields.keys()) - {'bdms'})
@@ -193,11 +193,11 @@ class LibvirtLiveMigrateData(LiveMigrateData):
193 193
             legacy['pre_live_migration_result'] = live_result
194 194
             self._bdms_to_legacy(live_result)
195 195
 
196
-        LOG.debug('Legacy result: %s' % legacy)
196
+        LOG.debug('Legacy result: %s', legacy)
197 197
         return legacy
198 198
 
199 199
     def from_legacy_dict(self, legacy):
200
-        LOG.debug('Converting legacy dict to obj: %s' % legacy)
200
+        LOG.debug('Converting legacy dict to obj: %s', legacy)
201 201
         super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
202 202
         keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
203 203
         for k in keys - {'bdms'}:
@@ -213,7 +213,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
213 213
             if 'serial_listen_addr' in pre_result:
214 214
                 self.serial_listen_addr = pre_result['serial_listen_addr']
215 215
             self._bdms_from_legacy(pre_result)
216
-        LOG.debug('Converted object: %s' % self)
216
+        LOG.debug('Converted object: %s', self)
217 217
 
218 218
     def is_on_shared_storage(self):
219 219
         return self.is_shared_block_storage or self.is_shared_instance_path

+ 2
- 2
nova/scheduler/client/report.py View File

@@ -337,8 +337,8 @@ class SchedulerReportClient(object):
337 337
         new_gen = updated_inventories_result['resource_provider_generation']
338 338
 
339 339
         self._resource_providers[compute_node.uuid].generation = new_gen
340
-        LOG.debug('Updated inventory for %s at generation %i' % (
341
-            compute_node.uuid, new_gen))
340
+        LOG.debug('Updated inventory for %s at generation %i',
341
+                  compute_node.uuid, new_gen)
342 342
         return True
343 343
 
344 344
     @safe_connect

+ 2
- 2
nova/scheduler/filters/aggregate_image_properties_isolation.py View File

@@ -51,8 +51,8 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
51 51
                 prop = image_props.get(key)
52 52
             except AttributeError:
53 53
                 LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' "
54
-                                "that is not present in the image metadata.") %
55
-                                {"host": host_state.host, "key": key})
54
+                                "that is not present in the image metadata."),
55
+                            {"host": host_state.host, "key": key})
56 56
                 continue
57 57
 
58 58
             # NOTE(sbauza): Aggregate metadata is only strings, we need to

+ 5
- 5
nova/scheduler/host_manager.py View File

@@ -494,7 +494,7 @@ class HostManager(object):
494 494
                 forced_hosts_str = ', '.join(hosts_to_force)
495 495
                 msg = _LI("No hosts matched due to not matching "
496 496
                           "'force_hosts' value of '%s'")
497
-            LOG.info(msg % forced_hosts_str)
497
+            LOG.info(msg, forced_hosts_str)
498 498
 
499 499
         def _match_forced_nodes(host_map, nodes_to_force):
500 500
             forced_nodes = []
@@ -510,7 +510,7 @@ class HostManager(object):
510 510
                 forced_nodes_str = ', '.join(nodes_to_force)
511 511
                 msg = _LI("No nodes matched due to not matching "
512 512
                           "'force_nodes' value of '%s'")
513
-            LOG.info(msg % forced_nodes_str)
513
+            LOG.info(msg, forced_nodes_str)
514 514
 
515 515
         def _get_hosts_matching_request(hosts, requested_destination):
516 516
             (host, node) = (requested_destination.host,
@@ -519,14 +519,14 @@ class HostManager(object):
519 519
                                if x.host == host and x.nodename == node]
520 520
             if requested_nodes:
521 521
                 LOG.info(_LI('Host filter only checking host %(host)s and '
522
-                             'node %(node)s') % {'host': host, 'node': node})
522
+                             'node %(node)s'), {'host': host, 'node': node})
523 523
             else:
524 524
                 # NOTE(sbauza): The API level should prevent the user from
525 525
                 # providing a wrong destination but let's make sure a wrong
526 526
                 # destination doesn't trample the scheduler still.
527 527
                 LOG.info(_LI('No hosts matched due to not matching requested '
528
-                             'destination (%(host)s, %(node)s)'
529
-                             ) % {'host': host, 'node': node})
528
+                             'destination (%(host)s, %(node)s)'),
529
+                         {'host': host, 'node': node})
530 530
             return iter(requested_nodes)
531 531
 
532 532
         ignore_hosts = spec_obj.ignore_hosts or []

+ 1
- 1
nova/servicegroup/drivers/mc.py View File

@@ -63,7 +63,7 @@ class MemcachedDriver(base.Driver):
63 63
         key = "%(topic)s:%(host)s" % service_ref
64 64
         is_up = self.mc.get(str(key)) is not None
65 65
         if not is_up:
66
-            LOG.debug('Seems service %s is down' % key)
66
+            LOG.debug('Seems service %s is down', key)
67 67
 
68 68
         return is_up
69 69
 

+ 51
- 0
nova/tests/unit/test_hacking.py View File

@@ -788,3 +788,54 @@ class HackingTestCase(test.NoDBTestCase):
788 788
                             instance=instance)
789 789
                """
790 790
         self._assert_has_no_errors(code, checks.check_context_log)
791
+
792
+    def test_check_delayed_string_interpolation(self):
793
+        checker = checks.check_delayed_string_interpolation
794
+        code = """
795
+               msg_w = _LW('Test string (%s)')
796
+               msg_i = _LI('Test string (%s)')
797
+               value = 'test'
798
+
799
+               LOG.error(_LE("Test string (%s)") % value)
800
+               LOG.warning(msg_w % 'test%string')
801
+               LOG.info(msg_i %
802
+                        "test%string%info")
803
+               LOG.critical(
804
+                   _LC('Test string (%s)') % value,
805
+                   instance=instance)
806
+               LOG.exception(_LE(" 'Test quotation %s' \"Test\"") % 'test')
807
+               LOG.debug(' "Test quotation %s" \'Test\'' % "test")
808
+               LOG.debug('Tesing %(test)s' %
809
+                         {'test': ','.join(
810
+                             ['%s=%s' % (name, value)
811
+                              for name, value in test.items()])})
812
+               """
813
+
814
+        expected_errors = [(5, 34, 'N354'), (6, 18, 'N354'), (7, 15, 'N354'),
815
+                           (10, 28, 'N354'), (12, 49, 'N354'),
816
+                           (13, 40, 'N354'), (14, 28, 'N354')]
817
+        self._assert_has_errors(code, checker, expected_errors=expected_errors)
818
+        self._assert_has_no_errors(code, checker,
819
+                                   filename='nova/tests/unit/test_hacking.py')
820
+
821
+        code = """
822
+               msg_w = _LW('Test string (%s)')
823
+               msg_i = _LI('Test string (%s)')
824
+               value = 'test'
825
+
826
+               LOG.error(_LE("Test string (%s)"), value)
827
+               LOG.error(_LE("Test string (%s)") % value) # noqa
828
+               LOG.warning(msg_w, 'test%string')
829
+               LOG.info(msg_i,
830
+                        "test%string%info")
831
+               LOG.critical(
832
+                   _LC('Test string (%s)'), value,
833
+                   instance=instance)
834
+               LOG.exception(_LE(" 'Test quotation %s' \"Test\""), 'test')
835
+               LOG.debug(' "Test quotation %s" \'Test\'', "test")
836
+               LOG.debug('Tesing %(test)s',
837
+                         {'test': ','.join(
838
+                             ['%s=%s' % (name, value)
839
+                              for name, value in test.items()])})
840
+               """
841
+        self._assert_has_no_errors(code, checker)

+ 1
- 1
nova/virt/disk/api.py View File

@@ -462,7 +462,7 @@ def teardown_container(container_dir, container_root_device=None):
462 462
                 utils.execute('qemu-nbd', '-d', container_root_device,
463 463
                               run_as_root=True)
464 464
             else:
465
-                LOG.debug('No release necessary for block device %s' %
465
+                LOG.debug('No release necessary for block device %s',
466 466
                           container_root_device)
467 467
     except Exception:
468 468
         LOG.exception(_LE('Failed to teardown container filesystem'))

+ 1
- 1
nova/virt/libvirt/host.py View File

@@ -762,7 +762,7 @@ class Host(object):
762 762
 
763 763
         xml = secret_conf.to_xml()
764 764
         try:
765
-            LOG.debug('Secret XML: %s' % xml)
765
+            LOG.debug('Secret XML: %s', xml)
766 766
             conn = self.get_connection()
767 767
             secret = conn.secretDefineXML(xml)
768 768
             if password is not None:

+ 4
- 4
nova/virt/libvirt/imagebackend.py View File

@@ -267,9 +267,9 @@ class Image(object):
267 267
         if size < base_size:
268 268
             msg = _LE('%(base)s virtual size %(base_size)s '
269 269
                       'larger than flavor root disk size %(size)s')
270
-            LOG.error(msg % {'base': base,
271
-                              'base_size': base_size,
272
-                              'size': size})
270
+            LOG.error(msg, {'base': base,
271
+                            'base_size': base_size,
272
+                            'size': size})
273 273
             raise exception.FlavorDiskSmallerThanImage(
274 274
                 flavor_size=size, image_size=base_size)
275 275
 
@@ -871,7 +871,7 @@ class Rbd(Image):
871 871
                                    include_locations=True)
872 872
         locations = image_meta['locations']
873 873
 
874
-        LOG.debug('Image locations are: %(locs)s' % {'locs': locations})
874
+        LOG.debug('Image locations are: %(locs)s', {'locs': locations})
875 875
 
876 876
         if image_meta.get('disk_format') not in ['raw', 'iso']:
877 877
             reason = _('Image is not raw format')

+ 1
- 1
nova/virt/libvirt/storage/rbd_utils.py View File

@@ -220,7 +220,7 @@ class RBDDriver(object):
220 220
         try:
221 221
             return self.exists(image, pool=pool, snapshot=snapshot)
222 222
         except rbd.Error as e:
223
-            LOG.debug('Unable to open image %(loc)s: %(err)s' %
223
+            LOG.debug('Unable to open image %(loc)s: %(err)s',
224 224
                       dict(loc=url, err=e))
225 225
             return False
226 226
 

+ 3
- 3
nova/virt/xenapi/client/session.py View File

@@ -343,18 +343,18 @@ class XenAPISession(object):
343 343
         task_ref = self.call_xenapi("task.create", name,
344 344
                                        desc)
345 345
         try:
346
-            LOG.debug('Created task %s with ref %s' % (name, task_ref))
346
+            LOG.debug('Created task %s with ref %s', name, task_ref)
347 347
             yield task_ref
348 348
         finally:
349 349
             self.call_xenapi("task.destroy", task_ref)
350
-            LOG.debug('Destroyed task ref %s' % (task_ref))
350
+            LOG.debug('Destroyed task ref %s', task_ref)
351 351
 
352 352
     @contextlib.contextmanager
353 353
     def http_connection(session):
354 354
         conn = None
355 355
 
356 356
         xs_url = urllib.parse.urlparse(session.url)
357
-        LOG.debug("Creating http(s) connection to %s" % session.url)
357
+        LOG.debug("Creating http(s) connection to %s", session.url)
358 358
         if xs_url.scheme == 'http':
359 359
             conn = http_client.HTTPConnection(xs_url.netloc)
360 360
         elif xs_url.scheme == 'https':

+ 3
- 3
nova/virt/xenapi/driver.py View File

@@ -56,9 +56,9 @@ def invalid_option(option_name, recommended_value):
56 56
     LOG.exception(_LE('Current value of '
57 57
                       'CONF.xenserver.%(option)s option incompatible with '
58 58
                       'CONF.xenserver.independent_compute=True.  '
59
-                      'Consider using "%(recommended)s"') % {
60
-                          'option': option_name,
61
-                          'recommended': recommended_value})
59
+                      'Consider using "%(recommended)s"'),
60
+                  {'option': option_name,
61
+                   'recommended': recommended_value})
62 62
     raise exception.NotSupportedWithOption(
63 63
         operation=option_name,
64 64
         option='CONF.xenserver.independent_compute')

+ 1
- 1
nova/virt/xenapi/vm_utils.py View File

@@ -932,7 +932,7 @@ def try_auto_configure_disk(session, vdi_ref, new_gb):
932 932
         _auto_configure_disk(session, vdi_ref, new_gb)
933 933
     except exception.CannotResizeDisk as e:
934 934
         msg = _LW('Attempted auto_configure_disk failed because: %s')
935
-        LOG.warning(msg % e)
935
+        LOG.warning(msg, e)
936 936
 
937 937
 
938 938
 def _make_partition(session, dev, partition_start, partition_end):

+ 2
- 2
nova/virt/xenapi/volume_utils.py View File

@@ -363,8 +363,8 @@ def _stream_to_vdi(conn, vdi_import_path, file_size, file_obj):
363 363
                'Content-Length': '%s' % file_size}
364 364
 
365 365
     CHUNK_SIZE = 16 * 1024
366
-    LOG.debug('Initialising PUT request to %s (Headers: %s)' % (
367
-        vdi_import_path, headers))
366
+    LOG.debug('Initialising PUT request to %s (Headers: %s)',
367
+              vdi_import_path, headers)
368 368
     conn.request('PUT', vdi_import_path, headers=headers)
369 369
     remain_size = file_size
370 370
     while remain_size >= CHUNK_SIZE:

+ 2
- 2
nova/virt/xenapi/volumeops.py View File

@@ -224,5 +224,5 @@ class VolumeOps(object):
224 224
                 # Forget (i.e. disconnect) SR only if not in use
225 225
                 volume_utils.purge_sr(self._session, sr_ref)
226 226
             except Exception:
227
-                LOG.debug('Ignoring error while purging sr: %s' % sr_ref,
228
-                        exc_info=True)
227
+                LOG.debug('Ignoring error while purging sr: %s', sr_ref,
228
+                          exc_info=True)

+ 12
- 12
plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py View File

@@ -45,24 +45,24 @@ def delete_if_exists(path):
45 45
         os.unlink(path)
46 46
     except OSError, e:  # noqa
47 47
         if e.errno == errno.ENOENT:
48
-            LOG.warning("'%s' was already deleted, skipping delete" % path)
48
+            LOG.warning("'%s' was already deleted, skipping delete", path)
49 49
         else:
50 50
             raise
51 51
 
52 52
 
53 53
 def _link(src, dst):
54
-    LOG.info("Hard-linking file '%s' -> '%s'" % (src, dst))
54
+    LOG.info("Hard-linking file '%s' -> '%s'", src, dst)
55 55
     os.link(src, dst)
56 56
 
57 57
 
58 58
 def _rename(src, dst):
59
-    LOG.info("Renaming file '%s' -> '%s'" % (src, dst))
59
+    LOG.info("Renaming file '%s' -> '%s'", src, dst)
60 60
     try:
61 61
         os.rename(src, dst)
62 62
     except OSError, e:  # noqa
63 63
         if e.errno == errno.EXDEV:
64 64
             LOG.error("Invalid cross-device link.  Perhaps %s and %s should "
65
-                      "be symlinked on the same filesystem?" % (src, dst))
65
+                      "be symlinked on the same filesystem?", src, dst)
66 66
         raise
67 67
 
68 68
 
@@ -70,7 +70,7 @@ def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,
70 70
                     universal_newlines=False, close_fds=True, env=None):
71 71
     """Make a subprocess according to the given command-line string
72 72
     """
73
-    LOG.info("Running cmd '%s'" % " ".join(cmdline))
73
+    LOG.info("Running cmd '%s'", " ".join(cmdline))
74 74
     kwargs = {}
75 75
     kwargs['stdout'] = stdout and subprocess.PIPE or None
76 76
     kwargs['stderr'] = stderr and subprocess.PIPE or None
@@ -109,7 +109,7 @@ def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):
109 109
     ret = proc.returncode
110 110
     if ret not in ok_exit_codes:
111 111
         LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected "
112
-                  "return code in '%(ok)s' but got '%(rc)s': %(err)s" %
112
+                  "return code in '%(ok)s' but got '%(rc)s': %(err)s",
113 113
                   {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,
114 114
                    'rc': ret, 'err': err})
115 115
         raise SubprocessException(' '.join(cmdline), ret, out, err)
@@ -132,11 +132,11 @@ def run_command(cmd, cmd_input=None, ok_exit_codes=None):
132 132
 def try_kill_process(proc):
133 133
     """Sends the given process the SIGKILL signal."""
134 134
     pid = proc.pid
135
-    LOG.info("Killing process %s" % pid)
135
+    LOG.info("Killing process %s", pid)
136 136
     try:
137 137
         os.kill(pid, signal.SIGKILL)
138 138
     except Exception:
139
-        LOG.exception("Failed to kill %s" % pid)
139
+        LOG.exception("Failed to kill %s", pid)
140 140
 
141 141
 
142 142
 def make_staging_area(sr_path):
@@ -279,14 +279,14 @@ def _validate_vhd(vdi_path):
279 279
             extra = (" ensure source and destination host machines have "
280 280
                      "time set correctly")
281 281
 
282
-        LOG.info("VDI Error details: %s" % out)
282
+        LOG.info("VDI Error details: %s", out)
283 283
 
284 284
         raise Exception(
285 285
             "VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'"
286 286
             "%(extra)s" % {'vdi_path': vdi_path, 'part': part,
287 287
                            'details': details, 'extra': extra})
288 288
 
289
-    LOG.info("VDI is valid: %s" % vdi_path)
289
+    LOG.info("VDI is valid: %s", vdi_path)
290 290
 
291 291
 
292 292
 def _validate_vdi_chain(vdi_path):
@@ -470,7 +470,7 @@ def extract_tarball(fileobj, path, callback=None):
470 470
             tar_pid = tar_proc.pid
471 471
             if returncode is not None:
472 472
                 LOG.error("tar extract with process id '%(pid)s' "
473
-                          "exited early with '%(rc)s'" %
473
+                          "exited early with '%(rc)s'",
474 474
                           {'pid': tar_pid, 'rc': returncode})
475 475
                 raise SubprocessException(
476 476
                     ' '.join(tar_cmd), returncode, "", "")
@@ -479,7 +479,7 @@ def extract_tarball(fileobj, path, callback=None):
479 479
         # no need to kill already dead process
480 480
         raise
481 481
     except Exception:
482
-        LOG.exception("Failed while sending data to tar pid: %s" % tar_pid)
482
+        LOG.exception("Failed while sending data to tar pid: %s", tar_pid)
483 483
         try_kill_process(tar_proc)
484 484
         raise
485 485
 

Loading…
Cancel
Save