Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled by
the logging code, rather than being done at the point
of the logging call.
See the oslo i18n guideline
* https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages
and
* https://github.com/openstack-dev/hacking/blob/master/hacking/checks/other.py#L39
Closes-Bug: #1596829

Change-Id: Ibba5791669c137be1483805db657beb907030227
This commit is contained in:
ForestLee 2017-07-17 19:03:02 +08:00 committed by Alexander Chadin
parent 3431b77388
commit 403ec94bc1
16 changed files with 160 additions and 140 deletions

View File

@ -55,7 +55,7 @@ filename = *.py,app.wsgi
show-source=True show-source=True
ignore= H105,E123,E226,N320,H202 ignore= H105,E123,E226,N320,H202
builtins= _ builtins= _
enable-extensions = H106,H203 enable-extensions = H106,H203,H904
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
[testenv:wheel] [testenv:wheel]

View File

@ -113,8 +113,10 @@ class Migrate(base.BaseAction):
dest_hostname=destination) dest_hostname=destination)
except nova_helper.nvexceptions.ClientException as e: except nova_helper.nvexceptions.ClientException as e:
LOG.debug("Nova client exception occurred while live " LOG.debug("Nova client exception occurred while live "
"migrating instance %s.Exception: %s" % "migrating instance "
(self.instance_uuid, e)) "%(instance)s.Exception: %(exception)s",
{'instance': self.instance_uuid, 'exception': e})
except Exception as e: except Exception as e:
LOG.exception(e) LOG.exception(e)
LOG.critical("Unexpected error occurred. Migration failed for " LOG.critical("Unexpected error occurred. Migration failed for "

View File

@ -40,10 +40,10 @@ def main():
if host == '127.0.0.1': if host == '127.0.0.1':
LOG.info('serving on 127.0.0.1:%(port)s, ' LOG.info('serving on 127.0.0.1:%(port)s, '
'view at %(protocol)s://127.0.0.1:%(port)s' % 'view at %(protocol)s://127.0.0.1:%(port)s',
dict(protocol=protocol, port=port)) dict(protocol=protocol, port=port))
else: else:
LOG.info('serving on %(protocol)s://%(host)s:%(port)s' % LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
dict(protocol=protocol, host=host, port=port)) dict(protocol=protocol, host=host, port=port))
api_schedule = scheduling.APISchedulingService() api_schedule = scheduling.APISchedulingService()

View File

@ -139,13 +139,13 @@ class CinderHelper(object):
volume = self.get_volume(volume.id) volume = self.get_volume(volume.id)
time.sleep(retry_interval) time.sleep(retry_interval)
retry -= 1 retry -= 1
LOG.debug("retry count: %s" % retry) LOG.debug("retry count: %s", retry)
LOG.debug("Waiting to complete deletion of volume %s" % volume.id) LOG.debug("Waiting to complete deletion of volume %s", volume.id)
if self._can_get_volume(volume.id): if self._can_get_volume(volume.id):
LOG.error("Volume deletion error: %s" % volume.id) LOG.error("Volume deletion error: %s", volume.id)
return False return False
LOG.debug("Volume %s was deleted successfully." % volume.id) LOG.debug("Volume %s was deleted successfully.", volume.id)
return True return True
def check_migrated(self, volume, retry_interval=10): def check_migrated(self, volume, retry_interval=10):
@ -179,8 +179,7 @@ class CinderHelper(object):
LOG.error(error_msg) LOG.error(error_msg)
return False return False
LOG.debug( LOG.debug(
"Volume migration succeeded : " "Volume migration succeeded : volume %s is now on host '%s'.", (
"volume %s is now on host '%s'." % (
volume.id, host_name)) volume.id, host_name))
return True return True
@ -194,8 +193,8 @@ class CinderHelper(object):
message=(_("Volume type must be same for migrating"))) message=(_("Volume type must be same for migrating")))
source_node = getattr(volume, 'os-vol-host-attr:host') source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug("Volume %s found on host '%s'." LOG.debug("Volume %s found on host '%s'.",
% (volume.id, source_node)) (volume.id, source_node))
self.cinder.volumes.migrate_volume( self.cinder.volumes.migrate_volume(
volume, dest_node, False, True) volume, dest_node, False, True)
@ -211,8 +210,8 @@ class CinderHelper(object):
source_node = getattr(volume, 'os-vol-host-attr:host') source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug( LOG.debug(
"Volume %s found on host '%s'." % ( "Volume %s found on host '%s'.",
volume.id, source_node)) (volume.id, source_node))
self.cinder.volumes.retype( self.cinder.volumes.retype(
volume, dest_type, "on-demand") volume, dest_type, "on-demand")
@ -234,14 +233,14 @@ class CinderHelper(object):
LOG.debug('Waiting volume creation of {0}'.format(new_volume)) LOG.debug('Waiting volume creation of {0}'.format(new_volume))
time.sleep(retry_interval) time.sleep(retry_interval)
retry -= 1 retry -= 1
LOG.debug("retry count: %s" % retry) LOG.debug("retry count: %s", retry)
if getattr(new_volume, 'status') != 'available': if getattr(new_volume, 'status') != 'available':
error_msg = (_("Failed to create volume '%(volume)s. ") % error_msg = (_("Failed to create volume '%(volume)s. ") %
{'volume': new_volume.id}) {'volume': new_volume.id})
raise Exception(error_msg) raise Exception(error_msg)
LOG.debug("Volume %s was created successfully." % new_volume) LOG.debug("Volume %s was created successfully.", new_volume)
return new_volume return new_volume
def delete_volume(self, volume): def delete_volume(self, volume):

View File

@ -106,7 +106,7 @@ class NovaHelper(object):
return True return True
else: else:
LOG.debug("confirm resize failed for the " LOG.debug("confirm resize failed for the "
"instance %s" % instance.id) "instance %s", instance.id)
return False return False
def wait_for_volume_status(self, volume, status, timeout=60, def wait_for_volume_status(self, volume, status, timeout=60,
@ -154,19 +154,20 @@ class NovaHelper(object):
""" """
new_image_name = "" new_image_name = ""
LOG.debug( LOG.debug(
"Trying a non-live migrate of instance '%s' " % instance_id) "Trying a non-live migrate of instance '%s' ", instance_id)
# Looking for the instance to migrate # Looking for the instance to migrate
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
if not instance: if not instance:
LOG.debug("Instance %s not found !" % instance_id) LOG.debug("Instance %s not found !", instance_id)
return False return False
else: else:
# NOTE: If destination node is None call Nova API to migrate # NOTE: If destination node is None call Nova API to migrate
# instance # instance
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
LOG.debug( LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name)) "Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
if dest_hostname is None: if dest_hostname is None:
previous_status = getattr(instance, 'status') previous_status = getattr(instance, 'status')
@ -186,12 +187,12 @@ class NovaHelper(object):
return False return False
LOG.debug( LOG.debug(
"cold migration succeeded : " "cold migration succeeded : "
"instance %s is now on host '%s'." % ( "instance %s is now on host '%s'.", (
instance_id, new_hostname)) instance_id, new_hostname))
return True return True
else: else:
LOG.debug( LOG.debug(
"cold migration for instance %s failed" % instance_id) "cold migration for instance %s failed", instance_id)
return False return False
if not keep_original_image_name: if not keep_original_image_name:
@ -220,7 +221,7 @@ class NovaHelper(object):
for network_name, network_conf_obj in addresses.items(): for network_name, network_conf_obj in addresses.items():
LOG.debug( LOG.debug(
"Extracting network configuration for network '%s'" % "Extracting network configuration for network '%s'",
network_name) network_name)
network_names_list.append(network_name) network_names_list.append(network_name)
@ -241,7 +242,7 @@ class NovaHelper(object):
stopped_ok = self.stop_instance(instance_id) stopped_ok = self.stop_instance(instance_id)
if not stopped_ok: if not stopped_ok:
LOG.debug("Could not stop instance: %s" % instance_id) LOG.debug("Could not stop instance: %s", instance_id)
return False return False
# Building the temporary image which will be used # Building the temporary image which will be used
@ -251,7 +252,7 @@ class NovaHelper(object):
if not image_uuid: if not image_uuid:
LOG.debug( LOG.debug(
"Could not build temporary image of instance: %s" % "Could not build temporary image of instance: %s",
instance_id) instance_id)
return False return False
@ -299,8 +300,10 @@ class NovaHelper(object):
blocks.append( blocks.append(
block_device_mapping_v2_item) block_device_mapping_v2_item)
LOG.debug("Detaching volume %s from instance: %s" % ( LOG.debug(
volume_id, instance_id)) "Detaching volume %(volume)s from "
"instance: %(instance)s",
{'volume': volume_id, 'instance': instance_id})
# volume.detach() # volume.detach()
self.nova.volumes.delete_server_volume(instance_id, self.nova.volumes.delete_server_volume(instance_id,
volume_id) volume_id)
@ -308,11 +311,12 @@ class NovaHelper(object):
if not self.wait_for_volume_status(volume, "available", 5, if not self.wait_for_volume_status(volume, "available", 5,
10): 10):
LOG.debug( LOG.debug(
"Could not detach volume %s from instance: %s" % ( "Could not detach volume %(volume)s "
volume_id, instance_id)) "from instance: %(instance)s",
{'volume': volume_id, 'instance': instance_id})
return False return False
except ciexceptions.NotFound: except ciexceptions.NotFound:
LOG.debug("Volume '%s' not found " % image_id) LOG.debug("Volume '%s' not found ", image_id)
return False return False
# We create the new instance from # We create the new instance from
@ -331,18 +335,21 @@ class NovaHelper(object):
if not new_instance: if not new_instance:
LOG.debug( LOG.debug(
"Could not create new instance " "Could not create new instance "
"for non-live migration of instance %s" % instance_id) "for non-live migration of instance %s", instance_id)
return False return False
try: try:
LOG.debug("Detaching floating ip '%s' from instance %s" % ( LOG.debug(
floating_ip, instance_id)) "Detaching floating ip '%(floating_ip)s' "
"from instance %(instance)s",
{'floating_ip': floating_ip, 'instance': instance_id})
# We detach the floating ip from the current instance # We detach the floating ip from the current instance
instance.remove_floating_ip(floating_ip) instance.remove_floating_ip(floating_ip)
LOG.debug( LOG.debug(
"Attaching floating ip '%s' to the new instance %s" % ( "Attaching floating ip '%(ip)s' to the new "
floating_ip, new_instance.id)) "instance %(id)s",
{'ip': floating_ip, 'id': new_instance.id})
# We attach the same floating ip to the new instance # We attach the same floating ip to the new instance
new_instance.add_floating_ip(floating_ip) new_instance.add_floating_ip(floating_ip)
@ -354,12 +361,12 @@ class NovaHelper(object):
# Deleting the old instance (because no more useful) # Deleting the old instance (because no more useful)
delete_ok = self.delete_instance(instance_id) delete_ok = self.delete_instance(instance_id)
if not delete_ok: if not delete_ok:
LOG.debug("Could not delete instance: %s" % instance_id) LOG.debug("Could not delete instance: %s", instance_id)
return False return False
LOG.debug( LOG.debug(
"Instance %s has been successfully migrated " "Instance %s has been successfully migrated "
"to new host '%s' and its new id is %s." % ( "to new host '%s' and its new id is %s.", (
instance_id, new_host_name, new_instance.id)) instance_id, new_host_name, new_instance.id))
return True return True
@ -376,8 +383,10 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to resize. :param instance_id: the unique id of the instance to resize.
:param flavor: the name or ID of the flavor to resize to. :param flavor: the name or ID of the flavor to resize to.
""" """
LOG.debug("Trying a resize of instance %s to flavor '%s'" % ( LOG.debug(
instance_id, flavor)) "Trying a resize of instance %(instance)s to "
"flavor '%(flavor)s'",
{'instance': instance_id, 'flavor': flavor})
# Looking for the instance to resize # Looking for the instance to resize
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
@ -394,17 +403,17 @@ class NovaHelper(object):
"instance %s. Exception: %s", instance_id, e) "instance %s. Exception: %s", instance_id, e)
if not flavor_id: if not flavor_id:
LOG.debug("Flavor not found: %s" % flavor) LOG.debug("Flavor not found: %s", flavor)
return False return False
if not instance: if not instance:
LOG.debug("Instance not found: %s" % instance_id) LOG.debug("Instance not found: %s", instance_id)
return False return False
instance_status = getattr(instance, 'OS-EXT-STS:vm_state') instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
LOG.debug( LOG.debug(
"Instance %s is in '%s' status." % (instance_id, "Instance %(id)s is in '%(status)s' status.",
instance_status)) {'id': instance_id, 'status': instance_status})
instance.resize(flavor=flavor_id) instance.resize(flavor=flavor_id)
while getattr(instance, while getattr(instance,
@ -442,17 +451,20 @@ class NovaHelper(object):
destination_node is None, nova scheduler choose destination_node is None, nova scheduler choose
the destination host the destination host
""" """
LOG.debug("Trying to live migrate instance %s " % (instance_id)) LOG.debug(
"Trying a live migrate instance %(instance)s ",
{'instance': instance_id})
# Looking for the instance to migrate # Looking for the instance to migrate
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
if not instance: if not instance:
LOG.debug("Instance not found: %s" % instance_id) LOG.debug("Instance not found: %s", instance_id)
return False return False
else: else:
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
LOG.debug( LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name)) "Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
# From nova api version 2.25(Mitaka release), the default value of # From nova api version 2.25(Mitaka release), the default value of
# block_migration is None which is mapped to 'auto'. # block_migration is None which is mapped to 'auto'.
@ -474,7 +486,7 @@ class NovaHelper(object):
if host_name != new_hostname and instance.status == 'ACTIVE': if host_name != new_hostname and instance.status == 'ACTIVE':
LOG.debug( LOG.debug(
"Live migration succeeded : " "Live migration succeeded : "
"instance %s is now on host '%s'." % ( "instance %s is now on host '%s'.", (
instance_id, new_hostname)) instance_id, new_hostname))
return True return True
else: else:
@ -485,7 +497,7 @@ class NovaHelper(object):
and retry: and retry:
instance = self.nova.servers.get(instance.id) instance = self.nova.servers.get(instance.id)
if not getattr(instance, 'OS-EXT-STS:task_state'): if not getattr(instance, 'OS-EXT-STS:task_state'):
LOG.debug("Instance task state: %s is null" % instance_id) LOG.debug("Instance task state: %s is null", instance_id)
break break
LOG.debug( LOG.debug(
'Waiting the migration of {0} to {1}'.format( 'Waiting the migration of {0} to {1}'.format(
@ -501,13 +513,13 @@ class NovaHelper(object):
LOG.debug( LOG.debug(
"Live migration succeeded : " "Live migration succeeded : "
"instance %s is now on host '%s'." % ( "instance %(instance)s is now on host '%(host)s'.",
instance_id, host_name)) {'instance': instance_id, 'host': host_name})
return True return True
def abort_live_migrate(self, instance_id, source, destination, retry=240): def abort_live_migrate(self, instance_id, source, destination, retry=240):
LOG.debug("Aborting live migration of instance %s" % instance_id) LOG.debug("Aborting live migration of instance %s", instance_id)
migration = self.get_running_migration(instance_id) migration = self.get_running_migration(instance_id)
if migration: if migration:
migration_id = getattr(migration[0], "id") migration_id = getattr(migration[0], "id")
@ -520,7 +532,7 @@ class NovaHelper(object):
LOG.exception(e) LOG.exception(e)
else: else:
LOG.debug( LOG.debug(
"No running migrations found for instance %s" % instance_id) "No running migrations found for instance %s", instance_id)
while retry: while retry:
instance = self.nova.servers.get(instance_id) instance = self.nova.servers.get(instance_id)
@ -585,7 +597,7 @@ class NovaHelper(object):
host = self.nova.hosts.get(hostname) host = self.nova.hosts.get(hostname)
if not host: if not host:
LOG.debug("host not found: %s" % hostname) LOG.debug("host not found: %s", hostname)
return False return False
else: else:
host[0].update( host[0].update(
@ -607,18 +619,19 @@ class NovaHelper(object):
key-value pairs to associate to the image as metadata. key-value pairs to associate to the image as metadata.
""" """
LOG.debug( LOG.debug(
"Trying to create an image from instance %s ..." % instance_id) "Trying to create an image from instance %s ...", instance_id)
# Looking for the instance # Looking for the instance
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
if not instance: if not instance:
LOG.debug("Instance not found: %s" % instance_id) LOG.debug("Instance not found: %s", instance_id)
return None return None
else: else:
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
LOG.debug( LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name)) "Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
# We need to wait for an appropriate status # We need to wait for an appropriate status
# of the instance before we can build an image from it # of the instance before we can build an image from it
@ -645,14 +658,15 @@ class NovaHelper(object):
if not image: if not image:
break break
status = image.status status = image.status
LOG.debug("Current image status: %s" % status) LOG.debug("Current image status: %s", status)
if not image: if not image:
LOG.debug("Image not found: %s" % image_uuid) LOG.debug("Image not found: %s", image_uuid)
else: else:
LOG.debug( LOG.debug(
"Image %s successfully created for instance %s" % ( "Image %(image)s successfully created for "
image_uuid, instance_id)) "instance %(instance)s",
{'image': image_uuid, 'instance': instance_id})
return image_uuid return image_uuid
return None return None
@ -661,16 +675,16 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to delete. :param instance_id: the unique id of the instance to delete.
""" """
LOG.debug("Trying to remove instance %s ..." % instance_id) LOG.debug("Trying to remove instance %s ...", instance_id)
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
if not instance: if not instance:
LOG.debug("Instance not found: %s" % instance_id) LOG.debug("Instance not found: %s", instance_id)
return False return False
else: else:
self.nova.servers.delete(instance_id) self.nova.servers.delete(instance_id)
LOG.debug("Instance %s removed." % instance_id) LOG.debug("Instance %s removed.", instance_id)
return True return True
def stop_instance(self, instance_id): def stop_instance(self, instance_id):
@ -678,21 +692,21 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to stop. :param instance_id: the unique id of the instance to stop.
""" """
LOG.debug("Trying to stop instance %s ..." % instance_id) LOG.debug("Trying to stop instance %s ...", instance_id)
instance = self.find_instance(instance_id) instance = self.find_instance(instance_id)
if not instance: if not instance:
LOG.debug("Instance not found: %s" % instance_id) LOG.debug("Instance not found: %s", instance_id)
return False return False
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped": elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
LOG.debug("Instance has been stopped: %s" % instance_id) LOG.debug("Instance has been stopped: %s", instance_id)
return True return True
else: else:
self.nova.servers.stop(instance_id) self.nova.servers.stop(instance_id)
if self.wait_for_instance_state(instance, "stopped", 8, 10): if self.wait_for_instance_state(instance, "stopped", 8, 10):
LOG.debug("Instance %s stopped." % instance_id) LOG.debug("Instance %s stopped.", instance_id)
return True return True
else: else:
return False return False
@ -733,11 +747,11 @@ class NovaHelper(object):
return False return False
while instance.status not in status_list and retry: while instance.status not in status_list and retry:
LOG.debug("Current instance status: %s" % instance.status) LOG.debug("Current instance status: %s", instance.status)
time.sleep(sleep) time.sleep(sleep)
instance = self.nova.servers.get(instance.id) instance = self.nova.servers.get(instance.id)
retry -= 1 retry -= 1
LOG.debug("Current instance status: %s" % instance.status) LOG.debug("Current instance status: %s", instance.status)
return instance.status in status_list return instance.status in status_list
def create_instance(self, node_id, inst_name="test", image_id=None, def create_instance(self, node_id, inst_name="test", image_id=None,
@ -753,26 +767,26 @@ class NovaHelper(object):
It returns the unique id of the created instance. It returns the unique id of the created instance.
""" """
LOG.debug( LOG.debug(
"Trying to create new instance '%s' " "Trying to create new instance '%(inst)s' "
"from image '%s' with flavor '%s' ..." % ( "from image '%(image)s' with flavor '%(flavor)s' ...",
inst_name, image_id, flavor_name)) {'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
try: try:
self.nova.keypairs.findall(name=keypair_name) self.nova.keypairs.findall(name=keypair_name)
except nvexceptions.NotFound: except nvexceptions.NotFound:
LOG.debug("Key pair '%s' not found " % keypair_name) LOG.debug("Key pair '%s' not found ", keypair_name)
return return
try: try:
image = self.glance.images.get(image_id) image = self.glance.images.get(image_id)
except glexceptions.NotFound: except glexceptions.NotFound:
LOG.debug("Image '%s' not found " % image_id) LOG.debug("Image '%s' not found ", image_id)
return return
try: try:
flavor = self.nova.flavors.find(name=flavor_name) flavor = self.nova.flavors.find(name=flavor_name)
except nvexceptions.NotFound: except nvexceptions.NotFound:
LOG.debug("Flavor '%s' not found " % flavor_name) LOG.debug("Flavor '%s' not found ", flavor_name)
return return
# Make sure all security groups exist # Make sure all security groups exist
@ -780,7 +794,7 @@ class NovaHelper(object):
group_id = self.get_security_group_id_from_name(sec_group_name) group_id = self.get_security_group_id_from_name(sec_group_name)
if not group_id: if not group_id:
LOG.debug("Security group '%s' not found " % sec_group_name) LOG.debug("Security group '%s' not found ", sec_group_name)
return return
net_list = list() net_list = list()
@ -789,7 +803,7 @@ class NovaHelper(object):
nic_id = self.get_network_id_from_name(network_name) nic_id = self.get_network_id_from_name(network_name)
if not nic_id: if not nic_id:
LOG.debug("Network '%s' not found " % network_name) LOG.debug("Network '%s' not found ", network_name)
return return
net_obj = {"net-id": nic_id} net_obj = {"net-id": nic_id}
net_list.append(net_obj) net_list.append(net_obj)
@ -815,14 +829,16 @@ class NovaHelper(object):
if create_new_floating_ip and instance.status == 'ACTIVE': if create_new_floating_ip and instance.status == 'ACTIVE':
LOG.debug( LOG.debug(
"Creating a new floating IP" "Creating a new floating IP"
" for instance '%s'" % instance.id) " for instance '%s'", instance.id)
# Creating floating IP for the new instance # Creating floating IP for the new instance
floating_ip = self.nova.floating_ips.create() floating_ip = self.nova.floating_ips.create()
instance.add_floating_ip(floating_ip) instance.add_floating_ip(floating_ip)
LOG.debug("Instance %s associated to Floating IP '%s'" % ( LOG.debug(
instance.id, floating_ip.ip)) "Instance %(instance)s associated to "
"Floating IP '%(ip)s'",
{'instance': instance.id, 'ip': floating_ip.ip})
return instance return instance
@ -896,7 +912,7 @@ class NovaHelper(object):
LOG.debug('Waiting volume update to {0}'.format(new_volume)) LOG.debug('Waiting volume update to {0}'.format(new_volume))
time.sleep(retry_interval) time.sleep(retry_interval)
retry -= 1 retry -= 1
LOG.debug("retry count: %s" % retry) LOG.debug("retry count: %s", retry)
if getattr(new_volume, 'status') != "in-use": if getattr(new_volume, 'status') != "in-use":
LOG.error("Volume update retry timeout or error") LOG.error("Volume update retry timeout or error")
return False return False
@ -904,5 +920,6 @@ class NovaHelper(object):
host_name = getattr(new_volume, "os-vol-host-attr:host") host_name = getattr(new_volume, "os-vol-host-attr:host")
LOG.debug( LOG.debug(
"Volume update succeeded : " "Volume update succeeded : "
"Volume %s is now on host '%s'." % (new_volume.id, host_name)) "Volume %s is now on host '%s'.",
(new_volume.id, host_name))
return True return True

View File

@ -48,7 +48,7 @@ class AuditEndpoint(object):
self._oneshot_handler.execute(audit, context) self._oneshot_handler.execute(audit, context)
def trigger_audit(self, context, audit_uuid): def trigger_audit(self, context, audit_uuid):
LOG.debug("Trigger audit %s" % audit_uuid) LOG.debug("Trigger audit %s", audit_uuid)
self.executor.submit(self.do_trigger_audit, self.executor.submit(self.do_trigger_audit,
context, context,
audit_uuid) audit_uuid)

View File

@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))

View File

@ -229,7 +229,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -275,7 +275,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -310,7 +310,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -337,7 +337,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -372,7 +372,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -399,7 +399,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -426,7 +426,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -459,7 +459,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -486,7 +486,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -513,7 +513,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" % "with metadata %(metadata)s",
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))

View File

@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
for name in engines.keys(): for name in engines.keys():
se_impl = default.DefaultScoringLoader().load(name) se_impl = default.DefaultScoringLoader().load(name)
LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name()) LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
_scoring_engine_map[se_impl.get_name()] = se_impl _scoring_engine_map[se_impl.get_name()] = se_impl
engine_containers = \ engine_containers = \
default.DefaultScoringContainerLoader().list_available() default.DefaultScoringContainerLoader().list_available()
for container_id, container_cls in engine_containers.items(): for container_id, container_cls in engine_containers.items():
LOG.debug("Found Scoring Engine container plugin: %s" % LOG.debug("Found Scoring Engine container plugin: %s",
container_id) container_id)
for se in container_cls.get_scoring_engine_list(): for se in container_cls.get_scoring_engine_list():
LOG.debug("Found Scoring Engine plugin: %s" % LOG.debug("Found Scoring Engine plugin: %s",
se.get_name()) se.get_name())
_scoring_engine_map[se.get_name()] = se _scoring_engine_map[se.get_name()] = se

View File

@ -277,7 +277,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
resource_id = "%s_%s" % (node.uuid, node.hostname) resource_id = "%s_%s" % (node.uuid, node.hostname)
LOG.error( LOG.error(
"No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s" % dict( "for %(metric_name)s", dict(
resource_id=resource_id, resource_id=resource_id,
metric_name=self.METRIC_NAMES[ metric_name=self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage'])) self.config.datasource]['host_cpu_usage']))
@ -297,7 +297,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
if instance_cpu_utilization is None: if instance_cpu_utilization is None:
LOG.error( LOG.error(
"No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s" % dict( "for %(metric_name)s", dict(
resource_id=instance.uuid, resource_id=instance.uuid,
metric_name=self.METRIC_NAMES[ metric_name=self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage'])) self.config.datasource]['instance_cpu_usage']))

View File

@ -199,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
hosts_need_release[node.uuid] = { hosts_need_release[node.uuid] = {
'priority_vm': potential_priority_instance, 'priority_vm': potential_priority_instance,
'noisy_vm': potential_noisy_instance} 'noisy_vm': potential_noisy_instance}
LOG.debug("Priority VM found: %s" % ( LOG.debug("Priority VM found: %s",
potential_priority_instance.uuid)) potential_priority_instance.uuid)
LOG.debug("Noisy VM found: %s" % ( LOG.debug("Noisy VM found: %s",
potential_noisy_instance.uuid)) potential_noisy_instance.uuid)
loop_break_flag = True loop_break_flag = True
break break

View File

@ -194,7 +194,8 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
LOG.warning("%s: no outlet temp data", resource_id) LOG.warning("%s: no outlet temp data", resource_id)
continue continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) LOG.debug("%(resource)s: outlet temperature %(temp)f",
{'resource': resource_id, 'temp': outlet_temp})
instance_data = {'node': node, 'outlet_temp': outlet_temp} instance_data = {'node': node, 'outlet_temp': outlet_temp}
if outlet_temp >= self.threshold: if outlet_temp >= self.threshold:
# mark the node to release resources # mark the node to release resources

View File

@ -318,7 +318,8 @@ class UniformAirflow(base.BaseStrategy):
LOG.warning("%s: no airflow data", resource_id) LOG.warning("%s: no airflow data", resource_id)
continue continue
LOG.debug("%s: airflow %f" % (resource_id, airflow)) LOG.debug("%(resource)s: airflow %(airflow)f",
{'resource': resource_id, 'airflow': airflow})
nodemap = {'node': node, 'airflow': airflow} nodemap = {'node': node, 'airflow': airflow}
if airflow >= self.threshold_airflow: if airflow >= self.threshold_airflow:
# mark the node to release resources # mark the node to release resources

View File

@ -168,7 +168,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return instance.state.value return instance.state.value
else: else:
LOG.error('Unexpected instance state type, ' LOG.error('Unexpected instance state type, '
'state=%(state)s, state_type=%(st)s.' % 'state=%(state)s, state_type=%(st)s.',
dict(state=instance.state, dict(state=instance.state,
st=type(instance.state))) st=type(instance.state)))
raise exception.WatcherException raise exception.WatcherException
@ -184,7 +184,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return node.status.value return node.status.value
else: else:
LOG.error('Unexpected node status type, ' LOG.error('Unexpected node status type, '
'status=%(status)s, status_type=%(st)s.' % 'status=%(status)s, status_type=%(st)s.',
dict(status=node.status, dict(status=node.status,
st=type(node.status))) st=type(node.status)))
raise exception.WatcherException raise exception.WatcherException
@ -233,7 +233,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# migration mechanism to move non active VMs. # migration mechanism to move non active VMs.
LOG.error( LOG.error(
'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' 'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.' % dict( 'state=%(instance_state)s.', dict(
instance_uuid=instance.uuid, instance_uuid=instance.uuid,
instance_state=instance_state_str)) instance_state=instance_state_str))
return return

View File

@ -203,7 +203,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
if avg_meter is None: if avg_meter is None:
LOG.warning( LOG.warning(
"No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s" % dict( "for %(metric_name)s", dict(
resource_id=instance.uuid, metric_name=meter)) resource_id=instance.uuid, metric_name=meter))
return return
if meter == 'cpu_util': if meter == 'cpu_util':
@ -375,12 +375,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
normalized_load = self.normalize_hosts_load(hosts_load) normalized_load = self.normalize_hosts_load(hosts_load)
for metric in self.metrics: for metric in self.metrics:
metric_sd = self.get_sd(normalized_load, metric) metric_sd = self.get_sd(normalized_load, metric)
LOG.info("Standard deviation for %s is %s." LOG.info("Standard deviation for %s is %s.",
% (metric, metric_sd)) (metric, metric_sd))
if metric_sd > float(self.thresholds[metric]): if metric_sd > float(self.thresholds[metric]):
LOG.info("Standard deviation of %s exceeds" LOG.info("Standard deviation of %s exceeds"
" appropriate threshold %s." " appropriate threshold %s.",
% (metric, metric_sd)) (metric, metric_sd))
return self.simulate_migrations(hosts_load) return self.simulate_migrations(hosts_load)
def add_migration(self, def add_migration(self,

View File

@ -312,7 +312,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
else: else:
self.instances_migration(targets, action_counter) self.instances_migration(targets, action_counter)
LOG.debug("action total: %s, pools: %s, nodes %s " % ( LOG.debug("action total: %s, pools: %s, nodes %s ", (
action_counter.total_count, action_counter.total_count,
action_counter.per_pool_count, action_counter.per_pool_count,
action_counter.per_node_count)) action_counter.per_node_count))
@ -413,13 +413,13 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
pool = getattr(volume, 'os-vol-host-attr:host') pool = getattr(volume, 'os-vol-host-attr:host')
if action_counter.is_pool_max(pool): if action_counter.is_pool_max(pool):
LOG.debug("%s has objects to be migrated, but it has" LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % pool) " reached the limit of parallelization.", pool)
continue continue
src_type = volume.volume_type src_type = volume.volume_type
dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type) dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type)
LOG.debug(src_type) LOG.debug(src_type)
LOG.debug("%s %s" % (dst_pool, dst_type)) LOG.debug("%s %s", (dst_pool, dst_type))
if self.is_available(volume): if self.is_available(volume):
if src_type == dst_type: if src_type == dst_type:
@ -448,7 +448,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
if action_counter.is_node_max(src_node): if action_counter.is_node_max(src_node):
LOG.debug("%s has objects to be migrated, but it has" LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % src_node) " reached the limit of parallelization.", src_node)
continue continue
dst_node = self.get_dst_node(src_node) dst_node = self.get_dst_node(src_node)
@ -643,7 +643,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_pool_max(pool): if not self.is_total_max() and not self.is_pool_max(pool):
self.per_pool_count[pool] += 1 self.per_pool_count[pool] += 1
self.total_count += 1 self.total_count += 1
LOG.debug("total: %s, per_pool: %s" % ( LOG.debug("total: %s, per_pool: %s", (
self.total_count, self.per_pool_count)) self.total_count, self.per_pool_count))
return True return True
return False return False
@ -660,7 +660,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_node_max(node): if not self.is_total_max() and not self.is_node_max(node):
self.per_node_count[node] += 1 self.per_node_count[node] += 1
self.total_count += 1 self.total_count += 1
LOG.debug("total: %s, per_node: %s" % ( LOG.debug("total: %s, per_node: %s", (
self.total_count, self.per_node_count)) self.total_count, self.per_node_count))
return True return True
return False return False
@ -679,9 +679,9 @@ class ActionCounter(object):
""" """
if pool not in self.per_pool_count: if pool not in self.per_pool_count:
self.per_pool_count[pool] = 0 self.per_pool_count[pool] = 0
LOG.debug("the number of parallel per pool %s is %s " % LOG.debug("the number of parallel per pool %s is %s ",
(pool, self.per_pool_count[pool])) (pool, self.per_pool_count[pool]))
LOG.debug("per pool limit is %s" % self.per_pool_limit) LOG.debug("per pool limit is %s", self.per_pool_limit)
return self.per_pool_count[pool] >= self.per_pool_limit return self.per_pool_count[pool] >= self.per_pool_limit
def is_node_max(self, node): def is_node_max(self, node):
@ -724,7 +724,7 @@ class BaseFilter(object):
for k, v in six.iteritems(targets): for k, v in six.iteritems(targets):
if not self.is_allowed(k): if not self.is_allowed(k):
continue continue
LOG.debug("filter:%s with the key: %s" % (cond, k)) LOG.debug("filter:%s with the key: %s", (cond, k))
targets[k] = self.exec_filter(v, cond) targets[k] = self.exec_filter(v, cond)
LOG.debug(targets) LOG.debug(targets)
@ -778,7 +778,7 @@ class ProjectSortFilter(SortMovingToFrontFilter):
""" """
project_id = self.get_project_id(item) project_id = self.get_project_id(item)
LOG.debug("project_id: %s, sort_key: %s" % (project_id, sort_key)) LOG.debug("project_id: %s, sort_key: %s", (project_id, sort_key))
return project_id == sort_key return project_id == sort_key
def get_project_id(self, item): def get_project_id(self, item):
@ -812,7 +812,7 @@ class ComputeHostSortFilter(SortMovingToFrontFilter):
""" """
host = self.get_host(item) host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key)) LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key return host == sort_key
def get_host(self, item): def get_host(self, item):
@ -840,7 +840,7 @@ class StorageHostSortFilter(SortMovingToFrontFilter):
""" """
host = self.get_host(item) host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key)) LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key return host == sort_key
def get_host(self, item): def get_host(self, item):
@ -867,7 +867,7 @@ class ComputeSpecSortFilter(BaseFilter):
result = items result = items
if sort_key not in self.accept_keys: if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key) LOG.warning("Invalid key is specified: %s", sort_key)
else: else:
result = self.get_sorted_items(items, sort_key) result = self.get_sorted_items(items, sort_key)
@ -912,11 +912,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: memory size of item :returns: memory size of item
""" """
LOG.debug("item: %s, flavors: %s" % (item, flavors)) LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors: for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor)) LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id: if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.ram: %s" % flavor.ram) LOG.debug("flavor.ram: %s", flavor.ram)
return flavor.ram return flavor.ram
def get_vcpu_num(self, item, flavors): def get_vcpu_num(self, item, flavors):
@ -927,11 +927,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: vcpu number of item :returns: vcpu number of item
""" """
LOG.debug("item: %s, flavors: %s" % (item, flavors)) LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors: for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor)) LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id: if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.vcpus: %s" % flavor.vcpus) LOG.debug("flavor.vcpus: %s", flavor.vcpus)
return flavor.vcpus return flavor.vcpus
def get_disk_size(self, item, flavors): def get_disk_size(self, item, flavors):
@ -942,11 +942,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: disk size of item :returns: disk size of item
""" """
LOG.debug("item: %s, flavors: %s" % (item, flavors)) LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors: for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor)) LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id: if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.disk: %s" % flavor.disk) LOG.debug("flavor.disk: %s", flavor.disk)
return flavor.disk return flavor.disk
@ -960,7 +960,7 @@ class StorageSpecSortFilter(BaseFilter):
result = items result = items
if sort_key not in self.accept_keys: if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key) LOG.warning("Invalid key is specified: %s", sort_key)
return result return result
if sort_key == 'created_at': if sort_key == 'created_at':