Browse Source

Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled by
the logging code, rather than being done at the point
of the logging call.
See the oslo i18n guideline
* https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages
and
* https://github.com/openstack-dev/hacking/blob/master/hacking/checks/other.py#L39
Closes-Bug: #1596829

Change-Id: Ibba5791669c137be1483805db657beb907030227
tags/1.9.0
ForestLee 2 years ago
parent
commit
f607ae8ec0

+ 1
- 1
tox.ini View File

@@ -55,7 +55,7 @@ filename = *.py,app.wsgi
55 55
 show-source=True
56 56
 ignore= H105,E123,E226,N320,H202
57 57
 builtins= _
58
-enable-extensions = H106,H203
58
+enable-extensions = H106,H203,H904
59 59
 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
60 60
 
61 61
 [testenv:wheel]

+ 4
- 2
watcher/applier/actions/migration.py View File

@@ -113,8 +113,10 @@ class Migrate(base.BaseAction):
113 113
                                                 dest_hostname=destination)
114 114
         except nova_helper.nvexceptions.ClientException as e:
115 115
             LOG.debug("Nova client exception occurred while live "
116
-                      "migrating instance %s.Exception: %s" %
117
-                      (self.instance_uuid, e))
116
+                      "migrating instance "
117
+                      "%(instance)s.Exception: %(exception)s",
118
+                      {'instance': self.instance_uuid, 'exception': e})
119
+
118 120
         except Exception as e:
119 121
             LOG.exception(e)
120 122
             LOG.critical("Unexpected error occurred. Migration failed for "

+ 2
- 2
watcher/cmd/api.py View File

@@ -40,10 +40,10 @@ def main():
40 40
 
41 41
     if host == '127.0.0.1':
42 42
         LOG.info('serving on 127.0.0.1:%(port)s, '
43
-                 'view at %(protocol)s://127.0.0.1:%(port)s' %
43
+                 'view at %(protocol)s://127.0.0.1:%(port)s',
44 44
                  dict(protocol=protocol, port=port))
45 45
     else:
46
-        LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
46
+        LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
47 47
                  dict(protocol=protocol, host=host, port=port))
48 48
 
49 49
     api_schedule = scheduling.APISchedulingService()

+ 11
- 12
watcher/common/cinder_helper.py View File

@@ -139,13 +139,13 @@ class CinderHelper(object):
139 139
             volume = self.get_volume(volume.id)
140 140
             time.sleep(retry_interval)
141 141
             retry -= 1
142
-            LOG.debug("retry count: %s" % retry)
143
-            LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
142
+            LOG.debug("retry count: %s", retry)
143
+            LOG.debug("Waiting to complete deletion of volume %s", volume.id)
144 144
         if self._can_get_volume(volume.id):
145
-            LOG.error("Volume deletion error: %s" % volume.id)
145
+            LOG.error("Volume deletion error: %s", volume.id)
146 146
             return False
147 147
 
148
-        LOG.debug("Volume %s was deleted successfully." % volume.id)
148
+        LOG.debug("Volume %s was deleted successfully.", volume.id)
149 149
         return True
150 150
 
151 151
     def check_migrated(self, volume, retry_interval=10):
@@ -179,8 +179,7 @@ class CinderHelper(object):
179 179
             LOG.error(error_msg)
180 180
             return False
181 181
         LOG.debug(
182
-            "Volume migration succeeded : "
183
-            "volume %s is now on host '%s'." % (
182
+            "Volume migration succeeded : volume %s is now on host '%s'.", (
184 183
                 volume.id, host_name))
185 184
         return True
186 185
 
@@ -194,8 +193,8 @@ class CinderHelper(object):
194 193
                 message=(_("Volume type must be same for migrating")))
195 194
 
196 195
         source_node = getattr(volume, 'os-vol-host-attr:host')
197
-        LOG.debug("Volume %s found on host '%s'."
198
-                  % (volume.id, source_node))
196
+        LOG.debug("Volume %s found on host '%s'.",
197
+                  (volume.id, source_node))
199 198
 
200 199
         self.cinder.volumes.migrate_volume(
201 200
             volume, dest_node, False, True)
@@ -211,8 +210,8 @@ class CinderHelper(object):
211 210
 
212 211
         source_node = getattr(volume, 'os-vol-host-attr:host')
213 212
         LOG.debug(
214
-            "Volume %s found on host '%s'." % (
215
-                volume.id, source_node))
213
+            "Volume %s found on host '%s'.",
214
+            (volume.id, source_node))
216 215
 
217 216
         self.cinder.volumes.retype(
218 217
             volume, dest_type, "on-demand")
@@ -234,14 +233,14 @@ class CinderHelper(object):
234 233
             LOG.debug('Waiting volume creation of {0}'.format(new_volume))
235 234
             time.sleep(retry_interval)
236 235
             retry -= 1
237
-            LOG.debug("retry count: %s" % retry)
236
+            LOG.debug("retry count: %s", retry)
238 237
 
239 238
         if getattr(new_volume, 'status') != 'available':
240 239
             error_msg = (_("Failed to create volume '%(volume)s. ") %
241 240
                          {'volume': new_volume.id})
242 241
             raise Exception(error_msg)
243 242
 
244
-        LOG.debug("Volume %s was created successfully." % new_volume)
243
+        LOG.debug("Volume %s was created successfully.", new_volume)
245 244
         return new_volume
246 245
 
247 246
     def delete_volume(self, volume):

+ 83
- 66
watcher/common/nova_helper.py View File

@@ -106,7 +106,7 @@ class NovaHelper(object):
106 106
             return True
107 107
         else:
108 108
             LOG.debug("confirm resize failed for the "
109
-                      "instance %s" % instance.id)
109
+                      "instance %s", instance.id)
110 110
             return False
111 111
 
112 112
     def wait_for_volume_status(self, volume, status, timeout=60,
@@ -154,19 +154,20 @@ class NovaHelper(object):
154 154
         """
155 155
         new_image_name = ""
156 156
         LOG.debug(
157
-            "Trying a non-live migrate of instance '%s' " % instance_id)
157
+            "Trying a non-live migrate of instance '%s' ", instance_id)
158 158
 
159 159
         # Looking for the instance to migrate
160 160
         instance = self.find_instance(instance_id)
161 161
         if not instance:
162
-            LOG.debug("Instance %s not found !" % instance_id)
162
+            LOG.debug("Instance %s not found !", instance_id)
163 163
             return False
164 164
         else:
165 165
             # NOTE: If destination node is None call Nova API to migrate
166 166
             # instance
167 167
             host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
168 168
             LOG.debug(
169
-                "Instance %s found on host '%s'." % (instance_id, host_name))
169
+                "Instance %(instance)s found on host '%(host)s'.",
170
+                {'instance': instance_id, 'host': host_name})
170 171
 
171 172
             if dest_hostname is None:
172 173
                 previous_status = getattr(instance, 'status')
@@ -186,12 +187,12 @@ class NovaHelper(object):
186 187
                         return False
187 188
                     LOG.debug(
188 189
                         "cold migration succeeded : "
189
-                        "instance %s is now on host '%s'." % (
190
+                        "instance %s is now on host '%s'.", (
190 191
                             instance_id, new_hostname))
191 192
                     return True
192 193
                 else:
193 194
                     LOG.debug(
194
-                        "cold migration for instance %s failed" % instance_id)
195
+                        "cold migration for instance %s failed", instance_id)
195 196
                     return False
196 197
 
197 198
             if not keep_original_image_name:
@@ -220,7 +221,7 @@ class NovaHelper(object):
220 221
 
221 222
             for network_name, network_conf_obj in addresses.items():
222 223
                 LOG.debug(
223
-                    "Extracting network configuration for network '%s'" %
224
+                    "Extracting network configuration for network '%s'",
224 225
                     network_name)
225 226
 
226 227
                 network_names_list.append(network_name)
@@ -241,7 +242,7 @@ class NovaHelper(object):
241 242
             stopped_ok = self.stop_instance(instance_id)
242 243
 
243 244
             if not stopped_ok:
244
-                LOG.debug("Could not stop instance: %s" % instance_id)
245
+                LOG.debug("Could not stop instance: %s", instance_id)
245 246
                 return False
246 247
 
247 248
             # Building the temporary image which will be used
@@ -251,7 +252,7 @@ class NovaHelper(object):
251 252
 
252 253
             if not image_uuid:
253 254
                 LOG.debug(
254
-                    "Could not build temporary image of instance: %s" %
255
+                    "Could not build temporary image of instance: %s",
255 256
                     instance_id)
256 257
                 return False
257 258
 
@@ -299,8 +300,10 @@ class NovaHelper(object):
299 300
                     blocks.append(
300 301
                         block_device_mapping_v2_item)
301 302
 
302
-                    LOG.debug("Detaching volume %s from instance: %s" % (
303
-                        volume_id, instance_id))
303
+                    LOG.debug(
304
+                        "Detaching volume %(volume)s from "
305
+                        "instance: %(instance)s",
306
+                        {'volume': volume_id, 'instance': instance_id})
304 307
                     # volume.detach()
305 308
                     self.nova.volumes.delete_server_volume(instance_id,
306 309
                                                            volume_id)
@@ -308,11 +311,12 @@ class NovaHelper(object):
308 311
                     if not self.wait_for_volume_status(volume, "available", 5,
309 312
                                                        10):
310 313
                         LOG.debug(
311
-                            "Could not detach volume %s from instance: %s" % (
312
-                                volume_id, instance_id))
314
+                            "Could not detach volume %(volume)s "
315
+                            "from instance: %(instance)s",
316
+                            {'volume': volume_id, 'instance': instance_id})
313 317
                         return False
314 318
                 except ciexceptions.NotFound:
315
-                    LOG.debug("Volume '%s' not found " % image_id)
319
+                    LOG.debug("Volume '%s' not found ", image_id)
316 320
                     return False
317 321
 
318 322
             # We create the new instance from
@@ -331,18 +335,21 @@ class NovaHelper(object):
331 335
             if not new_instance:
332 336
                 LOG.debug(
333 337
                     "Could not create new instance "
334
-                    "for non-live migration of instance %s" % instance_id)
338
+                    "for non-live migration of instance %s", instance_id)
335 339
                 return False
336 340
 
337 341
             try:
338
-                LOG.debug("Detaching floating ip '%s' from instance %s" % (
339
-                    floating_ip, instance_id))
342
+                LOG.debug(
343
+                    "Detaching floating ip '%(floating_ip)s' "
344
+                    "from instance %(instance)s",
345
+                    {'floating_ip': floating_ip, 'instance': instance_id})
340 346
                 # We detach the floating ip from the current instance
341 347
                 instance.remove_floating_ip(floating_ip)
342 348
 
343 349
                 LOG.debug(
344
-                    "Attaching floating ip '%s' to the new instance %s" % (
345
-                        floating_ip, new_instance.id))
350
+                    "Attaching floating ip '%(ip)s' to the new "
351
+                    "instance %(id)s",
352
+                    {'ip': floating_ip, 'id': new_instance.id})
346 353
 
347 354
                 # We attach the same floating ip to the new instance
348 355
                 new_instance.add_floating_ip(floating_ip)
@@ -354,12 +361,12 @@ class NovaHelper(object):
354 361
             # Deleting the old instance (because no more useful)
355 362
             delete_ok = self.delete_instance(instance_id)
356 363
             if not delete_ok:
357
-                LOG.debug("Could not delete instance: %s" % instance_id)
364
+                LOG.debug("Could not delete instance: %s", instance_id)
358 365
                 return False
359 366
 
360 367
             LOG.debug(
361 368
                 "Instance %s has been successfully migrated "
362
-                "to new host '%s' and its new id is %s." % (
369
+                "to new host '%s' and its new id is %s.", (
363 370
                     instance_id, new_host_name, new_instance.id))
364 371
 
365 372
             return True
@@ -376,8 +383,10 @@ class NovaHelper(object):
376 383
         :param instance_id: the unique id of the instance to resize.
377 384
         :param flavor: the name or ID of the flavor to resize to.
378 385
         """
379
-        LOG.debug("Trying a resize of instance %s to flavor '%s'" % (
380
-            instance_id, flavor))
386
+        LOG.debug(
387
+            "Trying a resize of instance %(instance)s to "
388
+            "flavor '%(flavor)s'",
389
+            {'instance': instance_id, 'flavor': flavor})
381 390
 
382 391
         # Looking for the instance to resize
383 392
         instance = self.find_instance(instance_id)
@@ -394,17 +403,17 @@ class NovaHelper(object):
394 403
                       "instance %s. Exception: %s", instance_id, e)
395 404
 
396 405
         if not flavor_id:
397
-            LOG.debug("Flavor not found: %s" % flavor)
406
+            LOG.debug("Flavor not found: %s", flavor)
398 407
             return False
399 408
 
400 409
         if not instance:
401
-            LOG.debug("Instance not found: %s" % instance_id)
410
+            LOG.debug("Instance not found: %s", instance_id)
402 411
             return False
403 412
 
404 413
         instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
405 414
         LOG.debug(
406
-            "Instance %s is in '%s' status." % (instance_id,
407
-                                                instance_status))
415
+            "Instance %(id)s is in '%(status)s' status.",
416
+            {'id': instance_id, 'status': instance_status})
408 417
 
409 418
         instance.resize(flavor=flavor_id)
410 419
         while getattr(instance,
@@ -442,17 +451,20 @@ class NovaHelper(object):
442 451
                               destination_node is None, nova scheduler choose
443 452
                               the destination host
444 453
         """
445
-        LOG.debug("Trying to live migrate instance %s " % (instance_id))
454
+        LOG.debug(
455
+            "Trying a live migrate instance %(instance)s ",
456
+            {'instance': instance_id})
446 457
 
447 458
         # Looking for the instance to migrate
448 459
         instance = self.find_instance(instance_id)
449 460
         if not instance:
450
-            LOG.debug("Instance not found: %s" % instance_id)
461
+            LOG.debug("Instance not found: %s", instance_id)
451 462
             return False
452 463
         else:
453 464
             host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
454 465
             LOG.debug(
455
-                "Instance %s found on host '%s'." % (instance_id, host_name))
466
+                "Instance %(instance)s found on host '%(host)s'.",
467
+                {'instance': instance_id, 'host': host_name})
456 468
 
457 469
             # From nova api version 2.25(Mitaka release), the default value of
458 470
             # block_migration is None which is mapped to 'auto'.
@@ -474,7 +486,7 @@ class NovaHelper(object):
474 486
                 if host_name != new_hostname and instance.status == 'ACTIVE':
475 487
                     LOG.debug(
476 488
                         "Live migration succeeded : "
477
-                        "instance %s is now on host '%s'." % (
489
+                        "instance %s is now on host '%s'.", (
478 490
                             instance_id, new_hostname))
479 491
                     return True
480 492
                 else:
@@ -485,7 +497,7 @@ class NovaHelper(object):
485 497
                     and retry:
486 498
                 instance = self.nova.servers.get(instance.id)
487 499
                 if not getattr(instance, 'OS-EXT-STS:task_state'):
488
-                    LOG.debug("Instance task state: %s is null" % instance_id)
500
+                    LOG.debug("Instance task state: %s is null", instance_id)
489 501
                     break
490 502
                 LOG.debug(
491 503
                     'Waiting the migration of {0}  to {1}'.format(
@@ -501,13 +513,13 @@ class NovaHelper(object):
501 513
 
502 514
             LOG.debug(
503 515
                 "Live migration succeeded : "
504
-                "instance %s is now on host '%s'." % (
505
-                    instance_id, host_name))
516
+                "instance %(instance)s is now on host '%(host)s'.",
517
+                {'instance': instance_id, 'host': host_name})
506 518
 
507 519
             return True
508 520
 
509 521
     def abort_live_migrate(self, instance_id, source, destination, retry=240):
510
-        LOG.debug("Aborting live migration of instance %s" % instance_id)
522
+        LOG.debug("Aborting live migration of instance %s", instance_id)
511 523
         migration = self.get_running_migration(instance_id)
512 524
         if migration:
513 525
             migration_id = getattr(migration[0], "id")
@@ -520,7 +532,7 @@ class NovaHelper(object):
520 532
                 LOG.exception(e)
521 533
         else:
522 534
             LOG.debug(
523
-                "No running migrations found for instance %s" % instance_id)
535
+                "No running migrations found for instance %s", instance_id)
524 536
 
525 537
         while retry:
526 538
             instance = self.nova.servers.get(instance_id)
@@ -585,7 +597,7 @@ class NovaHelper(object):
585 597
         host = self.nova.hosts.get(hostname)
586 598
 
587 599
         if not host:
588
-            LOG.debug("host not found: %s" % hostname)
600
+            LOG.debug("host not found: %s", hostname)
589 601
             return False
590 602
         else:
591 603
             host[0].update(
@@ -607,18 +619,19 @@ class NovaHelper(object):
607 619
             key-value pairs to associate to the image as metadata.
608 620
         """
609 621
         LOG.debug(
610
-            "Trying to create an image from instance %s ..." % instance_id)
622
+            "Trying to create an image from instance %s ...", instance_id)
611 623
 
612 624
         # Looking for the instance
613 625
         instance = self.find_instance(instance_id)
614 626
 
615 627
         if not instance:
616
-            LOG.debug("Instance not found: %s" % instance_id)
628
+            LOG.debug("Instance not found: %s", instance_id)
617 629
             return None
618 630
         else:
619 631
             host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
620 632
             LOG.debug(
621
-                "Instance %s found on host '%s'." % (instance_id, host_name))
633
+                "Instance %(instance)s found on host '%(host)s'.",
634
+                {'instance': instance_id, 'host': host_name})
622 635
 
623 636
             # We need to wait for an appropriate status
624 637
             # of the instance before we can build an image from it
@@ -645,14 +658,15 @@ class NovaHelper(object):
645 658
                     if not image:
646 659
                         break
647 660
                     status = image.status
648
-                    LOG.debug("Current image status: %s" % status)
661
+                    LOG.debug("Current image status: %s", status)
649 662
 
650 663
                 if not image:
651
-                    LOG.debug("Image not found: %s" % image_uuid)
664
+                    LOG.debug("Image not found: %s", image_uuid)
652 665
                 else:
653 666
                     LOG.debug(
654
-                        "Image %s successfully created for instance %s" % (
655
-                            image_uuid, instance_id))
667
+                        "Image %(image)s successfully created for "
668
+                        "instance %(instance)s",
669
+                        {'image': image_uuid, 'instance': instance_id})
656 670
                     return image_uuid
657 671
         return None
658 672
 
@@ -661,16 +675,16 @@ class NovaHelper(object):
661 675
 
662 676
         :param instance_id: the unique id of the instance to delete.
663 677
         """
664
-        LOG.debug("Trying to remove instance %s ..." % instance_id)
678
+        LOG.debug("Trying to remove instance %s ...", instance_id)
665 679
 
666 680
         instance = self.find_instance(instance_id)
667 681
 
668 682
         if not instance:
669
-            LOG.debug("Instance not found: %s" % instance_id)
683
+            LOG.debug("Instance not found: %s", instance_id)
670 684
             return False
671 685
         else:
672 686
             self.nova.servers.delete(instance_id)
673
-            LOG.debug("Instance %s removed." % instance_id)
687
+            LOG.debug("Instance %s removed.", instance_id)
674 688
             return True
675 689
 
676 690
     def stop_instance(self, instance_id):
@@ -678,21 +692,21 @@ class NovaHelper(object):
678 692
 
679 693
         :param instance_id: the unique id of the instance to stop.
680 694
         """
681
-        LOG.debug("Trying to stop instance %s ..." % instance_id)
695
+        LOG.debug("Trying to stop instance %s ...", instance_id)
682 696
 
683 697
         instance = self.find_instance(instance_id)
684 698
 
685 699
         if not instance:
686
-            LOG.debug("Instance not found: %s" % instance_id)
700
+            LOG.debug("Instance not found: %s", instance_id)
687 701
             return False
688 702
         elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
689
-            LOG.debug("Instance has been stopped: %s" % instance_id)
703
+            LOG.debug("Instance has been stopped: %s", instance_id)
690 704
             return True
691 705
         else:
692 706
             self.nova.servers.stop(instance_id)
693 707
 
694 708
             if self.wait_for_instance_state(instance, "stopped", 8, 10):
695
-                LOG.debug("Instance %s stopped." % instance_id)
709
+                LOG.debug("Instance %s stopped.", instance_id)
696 710
                 return True
697 711
             else:
698 712
                 return False
@@ -733,11 +747,11 @@ class NovaHelper(object):
733 747
             return False
734 748
 
735 749
         while instance.status not in status_list and retry:
736
-            LOG.debug("Current instance status: %s" % instance.status)
750
+            LOG.debug("Current instance status: %s", instance.status)
737 751
             time.sleep(sleep)
738 752
             instance = self.nova.servers.get(instance.id)
739 753
             retry -= 1
740
-        LOG.debug("Current instance status: %s" % instance.status)
754
+        LOG.debug("Current instance status: %s", instance.status)
741 755
         return instance.status in status_list
742 756
 
743 757
     def create_instance(self, node_id, inst_name="test", image_id=None,
@@ -753,26 +767,26 @@ class NovaHelper(object):
753 767
         It returns the unique id of the created instance.
754 768
         """
755 769
         LOG.debug(
756
-            "Trying to create new instance '%s' "
757
-            "from image '%s' with flavor '%s' ..." % (
758
-                inst_name, image_id, flavor_name))
770
+            "Trying to create new instance '%(inst)s' "
771
+            "from image '%(image)s' with flavor '%(flavor)s' ...",
772
+            {'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
759 773
 
760 774
         try:
761 775
             self.nova.keypairs.findall(name=keypair_name)
762 776
         except nvexceptions.NotFound:
763
-            LOG.debug("Key pair '%s' not found " % keypair_name)
777
+            LOG.debug("Key pair '%s' not found ", keypair_name)
764 778
             return
765 779
 
766 780
         try:
767 781
             image = self.glance.images.get(image_id)
768 782
         except glexceptions.NotFound:
769
-            LOG.debug("Image '%s' not found " % image_id)
783
+            LOG.debug("Image '%s' not found ", image_id)
770 784
             return
771 785
 
772 786
         try:
773 787
             flavor = self.nova.flavors.find(name=flavor_name)
774 788
         except nvexceptions.NotFound:
775
-            LOG.debug("Flavor '%s' not found " % flavor_name)
789
+            LOG.debug("Flavor '%s' not found ", flavor_name)
776 790
             return
777 791
 
778 792
         # Make sure all security groups exist
@@ -780,7 +794,7 @@ class NovaHelper(object):
780 794
             group_id = self.get_security_group_id_from_name(sec_group_name)
781 795
 
782 796
             if not group_id:
783
-                LOG.debug("Security group '%s' not found " % sec_group_name)
797
+                LOG.debug("Security group '%s' not found ", sec_group_name)
784 798
                 return
785 799
 
786 800
         net_list = list()
@@ -789,7 +803,7 @@ class NovaHelper(object):
789 803
             nic_id = self.get_network_id_from_name(network_name)
790 804
 
791 805
             if not nic_id:
792
-                LOG.debug("Network '%s' not found " % network_name)
806
+                LOG.debug("Network '%s' not found ", network_name)
793 807
                 return
794 808
             net_obj = {"net-id": nic_id}
795 809
             net_list.append(net_obj)
@@ -815,14 +829,16 @@ class NovaHelper(object):
815 829
                 if create_new_floating_ip and instance.status == 'ACTIVE':
816 830
                     LOG.debug(
817 831
                         "Creating a new floating IP"
818
-                        " for instance '%s'" % instance.id)
832
+                        " for instance '%s'", instance.id)
819 833
                     # Creating floating IP for the new instance
820 834
                     floating_ip = self.nova.floating_ips.create()
821 835
 
822 836
                     instance.add_floating_ip(floating_ip)
823 837
 
824
-                    LOG.debug("Instance %s associated to Floating IP '%s'" % (
825
-                        instance.id, floating_ip.ip))
838
+                    LOG.debug(
839
+                        "Instance %(instance)s associated to "
840
+                        "Floating IP '%(ip)s'",
841
+                        {'instance': instance.id, 'ip': floating_ip.ip})
826 842
 
827 843
         return instance
828 844
 
@@ -896,7 +912,7 @@ class NovaHelper(object):
896 912
             LOG.debug('Waiting volume update to {0}'.format(new_volume))
897 913
             time.sleep(retry_interval)
898 914
             retry -= 1
899
-            LOG.debug("retry count: %s" % retry)
915
+            LOG.debug("retry count: %s", retry)
900 916
         if getattr(new_volume, 'status') != "in-use":
901 917
             LOG.error("Volume update retry timeout or error")
902 918
             return False
@@ -904,5 +920,6 @@ class NovaHelper(object):
904 920
         host_name = getattr(new_volume, "os-vol-host-attr:host")
905 921
         LOG.debug(
906 922
             "Volume update succeeded : "
907
-            "Volume %s is now on host '%s'." % (new_volume.id, host_name))
923
+            "Volume %s is now on host '%s'.",
924
+            (new_volume.id, host_name))
908 925
         return True

+ 1
- 1
watcher/decision_engine/messaging/audit_endpoint.py View File

@@ -48,7 +48,7 @@ class AuditEndpoint(object):
48 48
         self._oneshot_handler.execute(audit, context)
49 49
 
50 50
     def trigger_audit(self, context, audit_uuid):
51
-        LOG.debug("Trigger audit %s" % audit_uuid)
51
+        LOG.debug("Trigger audit %s", audit_uuid)
52 52
         self.executor.submit(self.do_trigger_audit,
53 53
                              context,
54 54
                              audit_uuid)

+ 4
- 4
watcher/decision_engine/model/notification/cinder.py View File

@@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
255 255
         ctxt.request_id = metadata['message_id']
256 256
         ctxt.project_domain = event_type
257 257
         LOG.info("Event '%(event)s' received from %(publisher)s "
258
-                 "with metadata %(metadata)s" %
258
+                 "with metadata %(metadata)s",
259 259
                  dict(event=event_type,
260 260
                       publisher=publisher_id,
261 261
                       metadata=metadata))
@@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
286 286
         ctxt.request_id = metadata['message_id']
287 287
         ctxt.project_domain = event_type
288 288
         LOG.info("Event '%(event)s' received from %(publisher)s "
289
-                 "with metadata %(metadata)s" %
289
+                 "with metadata %(metadata)s",
290 290
                  dict(event=event_type,
291 291
                       publisher=publisher_id,
292 292
                       metadata=metadata))
@@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
311 311
         ctxt.request_id = metadata['message_id']
312 312
         ctxt.project_domain = event_type
313 313
         LOG.info("Event '%(event)s' received from %(publisher)s "
314
-                 "with metadata %(metadata)s" %
314
+                 "with metadata %(metadata)s",
315 315
                  dict(event=event_type,
316 316
                       publisher=publisher_id,
317 317
                       metadata=metadata))
@@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
369 369
         ctxt.request_id = metadata['message_id']
370 370
         ctxt.project_domain = event_type
371 371
         LOG.info("Event '%(event)s' received from %(publisher)s "
372
-                 "with metadata %(metadata)s" %
372
+                 "with metadata %(metadata)s",
373 373
                  dict(event=event_type,
374 374
                       publisher=publisher_id,
375 375
                       metadata=metadata))

+ 10
- 10
watcher/decision_engine/model/notification/nova.py View File

@@ -229,7 +229,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
229 229
         ctxt.request_id = metadata['message_id']
230 230
         ctxt.project_domain = event_type
231 231
         LOG.info("Event '%(event)s' received from %(publisher)s "
232
-                 "with metadata %(metadata)s" %
232
+                 "with metadata %(metadata)s",
233 233
                  dict(event=event_type,
234 234
                       publisher=publisher_id,
235 235
                       metadata=metadata))
@@ -275,7 +275,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
275 275
         ctxt.request_id = metadata['message_id']
276 276
         ctxt.project_domain = event_type
277 277
         LOG.info("Event '%(event)s' received from %(publisher)s "
278
-                 "with metadata %(metadata)s" %
278
+                 "with metadata %(metadata)s",
279 279
                  dict(event=event_type,
280 280
                       publisher=publisher_id,
281 281
                       metadata=metadata))
@@ -310,7 +310,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
310 310
         ctxt.request_id = metadata['message_id']
311 311
         ctxt.project_domain = event_type
312 312
         LOG.info("Event '%(event)s' received from %(publisher)s "
313
-                 "with metadata %(metadata)s" %
313
+                 "with metadata %(metadata)s",
314 314
                  dict(event=event_type,
315 315
                       publisher=publisher_id,
316 316
                       metadata=metadata))
@@ -337,7 +337,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
337 337
         ctxt.request_id = metadata['message_id']
338 338
         ctxt.project_domain = event_type
339 339
         LOG.info("Event '%(event)s' received from %(publisher)s "
340
-                 "with metadata %(metadata)s" %
340
+                 "with metadata %(metadata)s",
341 341
                  dict(event=event_type,
342 342
                       publisher=publisher_id,
343 343
                       metadata=metadata))
@@ -372,7 +372,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
372 372
         ctxt.request_id = metadata['message_id']
373 373
         ctxt.project_domain = event_type
374 374
         LOG.info("Event '%(event)s' received from %(publisher)s "
375
-                 "with metadata %(metadata)s" %
375
+                 "with metadata %(metadata)s",
376 376
                  dict(event=event_type,
377 377
                       publisher=publisher_id,
378 378
                       metadata=metadata))
@@ -399,7 +399,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
399 399
         ctxt.request_id = metadata['message_id']
400 400
         ctxt.project_domain = event_type
401 401
         LOG.info("Event '%(event)s' received from %(publisher)s "
402
-                 "with metadata %(metadata)s" %
402
+                 "with metadata %(metadata)s",
403 403
                  dict(event=event_type,
404 404
                       publisher=publisher_id,
405 405
                       metadata=metadata))
@@ -426,7 +426,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
426 426
         ctxt.request_id = metadata['message_id']
427 427
         ctxt.project_domain = event_type
428 428
         LOG.info("Event '%(event)s' received from %(publisher)s "
429
-                 "with metadata %(metadata)s" %
429
+                 "with metadata %(metadata)s",
430 430
                  dict(event=event_type,
431 431
                       publisher=publisher_id,
432 432
                       metadata=metadata))
@@ -459,7 +459,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
459 459
         ctxt.request_id = metadata['message_id']
460 460
         ctxt.project_domain = event_type
461 461
         LOG.info("Event '%(event)s' received from %(publisher)s "
462
-                 "with metadata %(metadata)s" %
462
+                 "with metadata %(metadata)s",
463 463
                  dict(event=event_type,
464 464
                       publisher=publisher_id,
465 465
                       metadata=metadata))
@@ -486,7 +486,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
486 486
         ctxt.request_id = metadata['message_id']
487 487
         ctxt.project_domain = event_type
488 488
         LOG.info("Event '%(event)s' received from %(publisher)s "
489
-                 "with metadata %(metadata)s" %
489
+                 "with metadata %(metadata)s",
490 490
                  dict(event=event_type,
491 491
                       publisher=publisher_id,
492 492
                       metadata=metadata))
@@ -513,7 +513,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
513 513
         ctxt.request_id = metadata['message_id']
514 514
         ctxt.project_domain = event_type
515 515
         LOG.info("Event '%(event)s' received from %(publisher)s "
516
-                 "with metadata %(metadata)s" %
516
+                 "with metadata %(metadata)s",
517 517
                  dict(event=event_type,
518 518
                       publisher=publisher_id,
519 519
                       metadata=metadata))

+ 3
- 3
watcher/decision_engine/scoring/scoring_factory.py View File

@@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
91 91
 
92 92
         for name in engines.keys():
93 93
             se_impl = default.DefaultScoringLoader().load(name)
94
-            LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name())
94
+            LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
95 95
             _scoring_engine_map[se_impl.get_name()] = se_impl
96 96
 
97 97
         engine_containers = \
98 98
             default.DefaultScoringContainerLoader().list_available()
99 99
 
100 100
         for container_id, container_cls in engine_containers.items():
101
-            LOG.debug("Found Scoring Engine container plugin: %s" %
101
+            LOG.debug("Found Scoring Engine container plugin: %s",
102 102
                       container_id)
103 103
             for se in container_cls.get_scoring_engine_list():
104
-                LOG.debug("Found Scoring Engine plugin: %s" %
104
+                LOG.debug("Found Scoring Engine plugin: %s",
105 105
                           se.get_name())
106 106
                 _scoring_engine_map[se.get_name()] = se

+ 2
- 2
watcher/decision_engine/strategy/strategies/basic_consolidation.py View File

@@ -277,7 +277,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
277 277
             resource_id = "%s_%s" % (node.uuid, node.hostname)
278 278
             LOG.error(
279 279
                 "No values returned by %(resource_id)s "
280
-                "for %(metric_name)s" % dict(
280
+                "for %(metric_name)s", dict(
281 281
                     resource_id=resource_id,
282 282
                     metric_name=self.METRIC_NAMES[
283 283
                         self.config.datasource]['host_cpu_usage']))
@@ -297,7 +297,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
297 297
         if instance_cpu_utilization is None:
298 298
             LOG.error(
299 299
                 "No values returned by %(resource_id)s "
300
-                "for %(metric_name)s" % dict(
300
+                "for %(metric_name)s", dict(
301 301
                     resource_id=instance.uuid,
302 302
                     metric_name=self.METRIC_NAMES[
303 303
                         self.config.datasource]['instance_cpu_usage']))

+ 4
- 4
watcher/decision_engine/strategy/strategies/noisy_neighbor.py View File

@@ -199,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
199 199
                                 hosts_need_release[node.uuid] = {
200 200
                                     'priority_vm': potential_priority_instance,
201 201
                                     'noisy_vm': potential_noisy_instance}
202
-                                LOG.debug("Priority VM found: %s" % (
203
-                                    potential_priority_instance.uuid))
204
-                                LOG.debug("Noisy VM found: %s" % (
205
-                                    potential_noisy_instance.uuid))
202
+                                LOG.debug("Priority VM found: %s",
203
+                                          potential_priority_instance.uuid)
204
+                                LOG.debug("Noisy VM found: %s",
205
+                                          potential_noisy_instance.uuid)
206 206
                                 loop_break_flag = True
207 207
                                 break
208 208
 

+ 2
- 1
watcher/decision_engine/strategy/strategies/outlet_temp_control.py View File

@@ -232,7 +232,8 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
232 232
                 LOG.warning("%s: no outlet temp data", resource_id)
233 233
                 continue
234 234
 
235
-            LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
235
+            LOG.debug("%(resource)s: outlet temperature %(temp)f",
236
+                      {'resource': resource_id, 'temp': outlet_temp})
236 237
             instance_data = {'node': node, 'outlet_temp': outlet_temp}
237 238
             if outlet_temp >= self.threshold:
238 239
                 # mark the node to release resources

+ 2
- 1
watcher/decision_engine/strategy/strategies/uniform_airflow.py View File

@@ -375,7 +375,8 @@ class UniformAirflow(base.BaseStrategy):
375 375
                 LOG.warning("%s: no airflow data", resource_id)
376 376
                 continue
377 377
 
378
-            LOG.debug("%s: airflow %f" % (resource_id, airflow))
378
+            LOG.debug("%(resource)s: airflow %(airflow)f",
379
+                      {'resource': resource_id, 'airflow': airflow})
379 380
             nodemap = {'node': node, 'airflow': airflow}
380 381
             if airflow >= self.threshold_airflow:
381 382
                 # mark the node to release resources

+ 3
- 3
watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py View File

@@ -191,7 +191,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
191 191
             return instance.state.value
192 192
         else:
193 193
             LOG.error('Unexpected instance state type, '
194
-                      'state=%(state)s, state_type=%(st)s.' %
194
+                      'state=%(state)s, state_type=%(st)s.',
195 195
                       dict(state=instance.state,
196 196
                            st=type(instance.state)))
197 197
             raise exception.WatcherException
@@ -207,7 +207,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
207 207
             return node.status.value
208 208
         else:
209 209
             LOG.error('Unexpected node status type, '
210
-                      'status=%(status)s, status_type=%(st)s.' %
210
+                      'status=%(status)s, status_type=%(st)s.',
211 211
                       dict(status=node.status,
212 212
                            st=type(node.status)))
213 213
             raise exception.WatcherException
@@ -256,7 +256,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
256 256
             # migration mechanism to move non active VMs.
257 257
             LOG.error(
258 258
                 'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
259
-                'state=%(instance_state)s.' % dict(
259
+                'state=%(instance_state)s.', dict(
260 260
                     instance_uuid=instance.uuid,
261 261
                     instance_state=instance_state_str))
262 262
             return

+ 5
- 5
watcher/decision_engine/strategy/strategies/workload_stabilization.py View File

@@ -203,7 +203,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
203 203
             if avg_meter is None:
204 204
                 LOG.warning(
205 205
                     "No values returned by %(resource_id)s "
206
-                    "for %(metric_name)s" % dict(
206
+                    "for %(metric_name)s", dict(
207 207
                         resource_id=instance.uuid, metric_name=meter))
208 208
                 return
209 209
             if meter == 'cpu_util':
@@ -376,12 +376,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
376 376
         normalized_load = self.normalize_hosts_load(hosts_load)
377 377
         for metric in self.metrics:
378 378
             metric_sd = self.get_sd(normalized_load, metric)
379
-            LOG.info("Standard deviation for %s is %s."
380
-                     % (metric, metric_sd))
379
+            LOG.info("Standard deviation for %s is %s.",
380
+                     (metric, metric_sd))
381 381
             if metric_sd > float(self.thresholds[metric]):
382 382
                 LOG.info("Standard deviation of %s exceeds"
383
-                         " appropriate threshold %s."
384
-                         % (metric, metric_sd))
383
+                         " appropriate threshold %s.",
384
+                         (metric, metric_sd))
385 385
                 return self.simulate_migrations(hosts_load)
386 386
 
387 387
     def add_migration(self,

+ 23
- 23
watcher/decision_engine/strategy/strategies/zone_migration.py View File

@@ -312,7 +312,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
312 312
                     else:
313 313
                         self.instances_migration(targets, action_counter)
314 314
 
315
-        LOG.debug("action total: %s, pools: %s, nodes %s " % (
315
+        LOG.debug("action total: %s, pools: %s, nodes %s ", (
316 316
                   action_counter.total_count,
317 317
                   action_counter.per_pool_count,
318 318
                   action_counter.per_node_count))
@@ -413,13 +413,13 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
413 413
             pool = getattr(volume, 'os-vol-host-attr:host')
414 414
             if action_counter.is_pool_max(pool):
415 415
                 LOG.debug("%s has objects to be migrated, but it has"
416
-                          " reached the limit of parallelization." % pool)
416
+                          " reached the limit of parallelization.", pool)
417 417
                 continue
418 418
 
419 419
             src_type = volume.volume_type
420 420
             dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type)
421 421
             LOG.debug(src_type)
422
-            LOG.debug("%s %s" % (dst_pool, dst_type))
422
+            LOG.debug("%s %s", (dst_pool, dst_type))
423 423
 
424 424
             if self.is_available(volume):
425 425
                 if src_type == dst_type:
@@ -448,7 +448,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
448 448
 
449 449
             if action_counter.is_node_max(src_node):
450 450
                 LOG.debug("%s has objects to be migrated, but it has"
451
-                          " reached the limit of parallelization." % src_node)
451
+                          " reached the limit of parallelization.", src_node)
452 452
                 continue
453 453
 
454 454
             dst_node = self.get_dst_node(src_node)
@@ -643,7 +643,7 @@ class ActionCounter(object):
643 643
         if not self.is_total_max() and not self.is_pool_max(pool):
644 644
             self.per_pool_count[pool] += 1
645 645
             self.total_count += 1
646
-            LOG.debug("total: %s, per_pool: %s" % (
646
+            LOG.debug("total: %s, per_pool: %s", (
647 647
                       self.total_count, self.per_pool_count))
648 648
             return True
649 649
         return False
@@ -660,7 +660,7 @@ class ActionCounter(object):
660 660
         if not self.is_total_max() and not self.is_node_max(node):
661 661
             self.per_node_count[node] += 1
662 662
             self.total_count += 1
663
-            LOG.debug("total: %s, per_node: %s" % (
663
+            LOG.debug("total: %s, per_node: %s", (
664 664
                       self.total_count, self.per_node_count))
665 665
             return True
666 666
         return False
@@ -679,9 +679,9 @@ class ActionCounter(object):
679 679
         """
680 680
         if pool not in self.per_pool_count:
681 681
             self.per_pool_count[pool] = 0
682
-        LOG.debug("the number of parallel per pool %s is %s " %
682
+        LOG.debug("the number of parallel per pool %s is %s ",
683 683
                   (pool, self.per_pool_count[pool]))
684
-        LOG.debug("per pool limit is %s" % self.per_pool_limit)
684
+        LOG.debug("per pool limit is %s", self.per_pool_limit)
685 685
         return self.per_pool_count[pool] >= self.per_pool_limit
686 686
 
687 687
     def is_node_max(self, node):
@@ -724,7 +724,7 @@ class BaseFilter(object):
724 724
             for k, v in six.iteritems(targets):
725 725
                 if not self.is_allowed(k):
726 726
                     continue
727
-                LOG.debug("filter:%s with the key: %s" % (cond, k))
727
+                LOG.debug("filter:%s with the key: %s", (cond, k))
728 728
                 targets[k] = self.exec_filter(v, cond)
729 729
 
730 730
         LOG.debug(targets)
@@ -778,7 +778,7 @@ class ProjectSortFilter(SortMovingToFrontFilter):
778 778
         """
779 779
 
780 780
         project_id = self.get_project_id(item)
781
-        LOG.debug("project_id: %s, sort_key: %s" % (project_id, sort_key))
781
+        LOG.debug("project_id: %s, sort_key: %s", (project_id, sort_key))
782 782
         return project_id == sort_key
783 783
 
784 784
     def get_project_id(self, item):
@@ -812,7 +812,7 @@ class ComputeHostSortFilter(SortMovingToFrontFilter):
812 812
         """
813 813
 
814 814
         host = self.get_host(item)
815
-        LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
815
+        LOG.debug("host: %s, sort_key: %s", (host, sort_key))
816 816
         return host == sort_key
817 817
 
818 818
     def get_host(self, item):
@@ -840,7 +840,7 @@ class StorageHostSortFilter(SortMovingToFrontFilter):
840 840
         """
841 841
 
842 842
         host = self.get_host(item)
843
-        LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
843
+        LOG.debug("host: %s, sort_key: %s", (host, sort_key))
844 844
         return host == sort_key
845 845
 
846 846
     def get_host(self, item):
@@ -867,7 +867,7 @@ class ComputeSpecSortFilter(BaseFilter):
867 867
         result = items
868 868
 
869 869
         if sort_key not in self.accept_keys:
870
-            LOG.warning("Invalid key is specified: %s" % sort_key)
870
+            LOG.warning("Invalid key is specified: %s", sort_key)
871 871
         else:
872 872
             result = self.get_sorted_items(items, sort_key)
873 873
 
@@ -912,11 +912,11 @@ class ComputeSpecSortFilter(BaseFilter):
912 912
         :returns: memory size of item
913 913
         """
914 914
 
915
-        LOG.debug("item: %s, flavors: %s" % (item, flavors))
915
+        LOG.debug("item: %s, flavors: %s", (item, flavors))
916 916
         for flavor in flavors:
917
-            LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
917
+            LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
918 918
             if item.flavor.get('id') == flavor.id:
919
-                LOG.debug("flavor.ram: %s" % flavor.ram)
919
+                LOG.debug("flavor.ram: %s", flavor.ram)
920 920
                 return flavor.ram
921 921
 
922 922
     def get_vcpu_num(self, item, flavors):
@@ -927,11 +927,11 @@ class ComputeSpecSortFilter(BaseFilter):
927 927
         :returns: vcpu number of item
928 928
         """
929 929
 
930
-        LOG.debug("item: %s, flavors: %s" % (item, flavors))
930
+        LOG.debug("item: %s, flavors: %s", (item, flavors))
931 931
         for flavor in flavors:
932
-            LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
932
+            LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
933 933
             if item.flavor.get('id') == flavor.id:
934
-                LOG.debug("flavor.vcpus: %s" % flavor.vcpus)
934
+                LOG.debug("flavor.vcpus: %s", flavor.vcpus)
935 935
                 return flavor.vcpus
936 936
 
937 937
     def get_disk_size(self, item, flavors):
@@ -942,11 +942,11 @@ class ComputeSpecSortFilter(BaseFilter):
942 942
         :returns: disk size of item
943 943
         """
944 944
 
945
-        LOG.debug("item: %s, flavors: %s" % (item, flavors))
945
+        LOG.debug("item: %s, flavors: %s", (item, flavors))
946 946
         for flavor in flavors:
947
-            LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
947
+            LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
948 948
             if item.flavor.get('id') == flavor.id:
949
-                LOG.debug("flavor.disk: %s" % flavor.disk)
949
+                LOG.debug("flavor.disk: %s", flavor.disk)
950 950
                 return flavor.disk
951 951
 
952 952
 
@@ -960,7 +960,7 @@ class StorageSpecSortFilter(BaseFilter):
960 960
         result = items
961 961
 
962 962
         if sort_key not in self.accept_keys:
963
-            LOG.warning("Invalid key is specified: %s" % sort_key)
963
+            LOG.warning("Invalid key is specified: %s", sort_key)
964 964
             return result
965 965
 
966 966
         if sort_key == 'created_at':

Loading…
Cancel
Save