Merge "Add a hacking rule for string interpolation at logging"

This commit is contained in:
Zuul 2018-02-28 12:24:37 +00:00 committed by Gerrit Code Review
commit b33b7a0474
16 changed files with 160 additions and 140 deletions

View File

@ -55,7 +55,7 @@ filename = *.py,app.wsgi
show-source=True
ignore= H105,E123,E226,N320,H202
builtins= _
enable-extensions = H106,H203
enable-extensions = H106,H203,H904
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
[testenv:wheel]

View File

@ -113,8 +113,10 @@ class Migrate(base.BaseAction):
dest_hostname=destination)
except nova_helper.nvexceptions.ClientException as e:
LOG.debug("Nova client exception occurred while live "
"migrating instance %s.Exception: %s" %
(self.instance_uuid, e))
"migrating instance "
"%(instance)s.Exception: %(exception)s",
{'instance': self.instance_uuid, 'exception': e})
except Exception as e:
LOG.exception(e)
LOG.critical("Unexpected error occurred. Migration failed for "

View File

@ -40,10 +40,10 @@ def main():
if host == '127.0.0.1':
LOG.info('serving on 127.0.0.1:%(port)s, '
'view at %(protocol)s://127.0.0.1:%(port)s' %
'view at %(protocol)s://127.0.0.1:%(port)s',
dict(protocol=protocol, port=port))
else:
LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
dict(protocol=protocol, host=host, port=port))
api_schedule = scheduling.APISchedulingService()

View File

@ -139,13 +139,13 @@ class CinderHelper(object):
volume = self.get_volume(volume.id)
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
LOG.debug("retry count: %s", retry)
LOG.debug("Waiting to complete deletion of volume %s", volume.id)
if self._can_get_volume(volume.id):
LOG.error("Volume deletion error: %s" % volume.id)
LOG.error("Volume deletion error: %s", volume.id)
return False
LOG.debug("Volume %s was deleted successfully." % volume.id)
LOG.debug("Volume %s was deleted successfully.", volume.id)
return True
def check_migrated(self, volume, retry_interval=10):
@ -179,8 +179,7 @@ class CinderHelper(object):
LOG.error(error_msg)
return False
LOG.debug(
"Volume migration succeeded : "
"volume %s is now on host '%s'." % (
"Volume migration succeeded : volume %s is now on host '%s'.", (
volume.id, host_name))
return True
@ -194,8 +193,8 @@ class CinderHelper(object):
message=(_("Volume type must be same for migrating")))
source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug("Volume %s found on host '%s'."
% (volume.id, source_node))
LOG.debug("Volume %s found on host '%s'.",
(volume.id, source_node))
self.cinder.volumes.migrate_volume(
volume, dest_node, False, True)
@ -211,8 +210,8 @@ class CinderHelper(object):
source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug(
"Volume %s found on host '%s'." % (
volume.id, source_node))
"Volume %s found on host '%s'.",
(volume.id, source_node))
self.cinder.volumes.retype(
volume, dest_type, "on-demand")
@ -234,14 +233,14 @@ class CinderHelper(object):
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
LOG.debug("retry count: %s", retry)
if getattr(new_volume, 'status') != 'available':
error_msg = (_("Failed to create volume '%(volume)s. ") %
{'volume': new_volume.id})
raise Exception(error_msg)
LOG.debug("Volume %s was created successfully." % new_volume)
LOG.debug("Volume %s was created successfully.", new_volume)
return new_volume
def delete_volume(self, volume):

View File

@ -106,7 +106,7 @@ class NovaHelper(object):
return True
else:
LOG.debug("confirm resize failed for the "
"instance %s" % instance.id)
"instance %s", instance.id)
return False
def wait_for_volume_status(self, volume, status, timeout=60,
@ -154,19 +154,20 @@ class NovaHelper(object):
"""
new_image_name = ""
LOG.debug(
"Trying a non-live migrate of instance '%s' " % instance_id)
"Trying a non-live migrate of instance '%s' ", instance_id)
# Looking for the instance to migrate
instance = self.find_instance(instance_id)
if not instance:
LOG.debug("Instance %s not found !" % instance_id)
LOG.debug("Instance %s not found !", instance_id)
return False
else:
# NOTE: If destination node is None call Nova API to migrate
# instance
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name))
"Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
if dest_hostname is None:
previous_status = getattr(instance, 'status')
@ -186,12 +187,12 @@ class NovaHelper(object):
return False
LOG.debug(
"cold migration succeeded : "
"instance %s is now on host '%s'." % (
"instance %s is now on host '%s'.", (
instance_id, new_hostname))
return True
else:
LOG.debug(
"cold migration for instance %s failed" % instance_id)
"cold migration for instance %s failed", instance_id)
return False
if not keep_original_image_name:
@ -220,7 +221,7 @@ class NovaHelper(object):
for network_name, network_conf_obj in addresses.items():
LOG.debug(
"Extracting network configuration for network '%s'" %
"Extracting network configuration for network '%s'",
network_name)
network_names_list.append(network_name)
@ -241,7 +242,7 @@ class NovaHelper(object):
stopped_ok = self.stop_instance(instance_id)
if not stopped_ok:
LOG.debug("Could not stop instance: %s" % instance_id)
LOG.debug("Could not stop instance: %s", instance_id)
return False
# Building the temporary image which will be used
@ -251,7 +252,7 @@ class NovaHelper(object):
if not image_uuid:
LOG.debug(
"Could not build temporary image of instance: %s" %
"Could not build temporary image of instance: %s",
instance_id)
return False
@ -299,8 +300,10 @@ class NovaHelper(object):
blocks.append(
block_device_mapping_v2_item)
LOG.debug("Detaching volume %s from instance: %s" % (
volume_id, instance_id))
LOG.debug(
"Detaching volume %(volume)s from "
"instance: %(instance)s",
{'volume': volume_id, 'instance': instance_id})
# volume.detach()
self.nova.volumes.delete_server_volume(instance_id,
volume_id)
@ -308,11 +311,12 @@ class NovaHelper(object):
if not self.wait_for_volume_status(volume, "available", 5,
10):
LOG.debug(
"Could not detach volume %s from instance: %s" % (
volume_id, instance_id))
"Could not detach volume %(volume)s "
"from instance: %(instance)s",
{'volume': volume_id, 'instance': instance_id})
return False
except ciexceptions.NotFound:
LOG.debug("Volume '%s' not found " % image_id)
LOG.debug("Volume '%s' not found ", image_id)
return False
# We create the new instance from
@ -331,18 +335,21 @@ class NovaHelper(object):
if not new_instance:
LOG.debug(
"Could not create new instance "
"for non-live migration of instance %s" % instance_id)
"for non-live migration of instance %s", instance_id)
return False
try:
LOG.debug("Detaching floating ip '%s' from instance %s" % (
floating_ip, instance_id))
LOG.debug(
"Detaching floating ip '%(floating_ip)s' "
"from instance %(instance)s",
{'floating_ip': floating_ip, 'instance': instance_id})
# We detach the floating ip from the current instance
instance.remove_floating_ip(floating_ip)
LOG.debug(
"Attaching floating ip '%s' to the new instance %s" % (
floating_ip, new_instance.id))
"Attaching floating ip '%(ip)s' to the new "
"instance %(id)s",
{'ip': floating_ip, 'id': new_instance.id})
# We attach the same floating ip to the new instance
new_instance.add_floating_ip(floating_ip)
@ -354,12 +361,12 @@ class NovaHelper(object):
# Deleting the old instance (because no more useful)
delete_ok = self.delete_instance(instance_id)
if not delete_ok:
LOG.debug("Could not delete instance: %s" % instance_id)
LOG.debug("Could not delete instance: %s", instance_id)
return False
LOG.debug(
"Instance %s has been successfully migrated "
"to new host '%s' and its new id is %s." % (
"to new host '%s' and its new id is %s.", (
instance_id, new_host_name, new_instance.id))
return True
@ -376,8 +383,10 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to resize.
:param flavor: the name or ID of the flavor to resize to.
"""
LOG.debug("Trying a resize of instance %s to flavor '%s'" % (
instance_id, flavor))
LOG.debug(
"Trying a resize of instance %(instance)s to "
"flavor '%(flavor)s'",
{'instance': instance_id, 'flavor': flavor})
# Looking for the instance to resize
instance = self.find_instance(instance_id)
@ -394,17 +403,17 @@ class NovaHelper(object):
"instance %s. Exception: %s", instance_id, e)
if not flavor_id:
LOG.debug("Flavor not found: %s" % flavor)
LOG.debug("Flavor not found: %s", flavor)
return False
if not instance:
LOG.debug("Instance not found: %s" % instance_id)
LOG.debug("Instance not found: %s", instance_id)
return False
instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
LOG.debug(
"Instance %s is in '%s' status." % (instance_id,
instance_status))
"Instance %(id)s is in '%(status)s' status.",
{'id': instance_id, 'status': instance_status})
instance.resize(flavor=flavor_id)
while getattr(instance,
@ -442,17 +451,20 @@ class NovaHelper(object):
destination_node is None, nova scheduler choose
the destination host
"""
LOG.debug("Trying to live migrate instance %s " % (instance_id))
LOG.debug(
"Trying a live migrate instance %(instance)s ",
{'instance': instance_id})
# Looking for the instance to migrate
instance = self.find_instance(instance_id)
if not instance:
LOG.debug("Instance not found: %s" % instance_id)
LOG.debug("Instance not found: %s", instance_id)
return False
else:
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name))
"Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
# From nova api version 2.25(Mitaka release), the default value of
# block_migration is None which is mapped to 'auto'.
@ -474,7 +486,7 @@ class NovaHelper(object):
if host_name != new_hostname and instance.status == 'ACTIVE':
LOG.debug(
"Live migration succeeded : "
"instance %s is now on host '%s'." % (
"instance %s is now on host '%s'.", (
instance_id, new_hostname))
return True
else:
@ -485,7 +497,7 @@ class NovaHelper(object):
and retry:
instance = self.nova.servers.get(instance.id)
if not getattr(instance, 'OS-EXT-STS:task_state'):
LOG.debug("Instance task state: %s is null" % instance_id)
LOG.debug("Instance task state: %s is null", instance_id)
break
LOG.debug(
'Waiting the migration of {0} to {1}'.format(
@ -501,13 +513,13 @@ class NovaHelper(object):
LOG.debug(
"Live migration succeeded : "
"instance %s is now on host '%s'." % (
instance_id, host_name))
"instance %(instance)s is now on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
return True
def abort_live_migrate(self, instance_id, source, destination, retry=240):
LOG.debug("Aborting live migration of instance %s" % instance_id)
LOG.debug("Aborting live migration of instance %s", instance_id)
migration = self.get_running_migration(instance_id)
if migration:
migration_id = getattr(migration[0], "id")
@ -520,7 +532,7 @@ class NovaHelper(object):
LOG.exception(e)
else:
LOG.debug(
"No running migrations found for instance %s" % instance_id)
"No running migrations found for instance %s", instance_id)
while retry:
instance = self.nova.servers.get(instance_id)
@ -585,7 +597,7 @@ class NovaHelper(object):
host = self.nova.hosts.get(hostname)
if not host:
LOG.debug("host not found: %s" % hostname)
LOG.debug("host not found: %s", hostname)
return False
else:
host[0].update(
@ -607,18 +619,19 @@ class NovaHelper(object):
key-value pairs to associate to the image as metadata.
"""
LOG.debug(
"Trying to create an image from instance %s ..." % instance_id)
"Trying to create an image from instance %s ...", instance_id)
# Looking for the instance
instance = self.find_instance(instance_id)
if not instance:
LOG.debug("Instance not found: %s" % instance_id)
LOG.debug("Instance not found: %s", instance_id)
return None
else:
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
LOG.debug(
"Instance %s found on host '%s'." % (instance_id, host_name))
"Instance %(instance)s found on host '%(host)s'.",
{'instance': instance_id, 'host': host_name})
# We need to wait for an appropriate status
# of the instance before we can build an image from it
@ -645,14 +658,15 @@ class NovaHelper(object):
if not image:
break
status = image.status
LOG.debug("Current image status: %s" % status)
LOG.debug("Current image status: %s", status)
if not image:
LOG.debug("Image not found: %s" % image_uuid)
LOG.debug("Image not found: %s", image_uuid)
else:
LOG.debug(
"Image %s successfully created for instance %s" % (
image_uuid, instance_id))
"Image %(image)s successfully created for "
"instance %(instance)s",
{'image': image_uuid, 'instance': instance_id})
return image_uuid
return None
@ -661,16 +675,16 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to delete.
"""
LOG.debug("Trying to remove instance %s ..." % instance_id)
LOG.debug("Trying to remove instance %s ...", instance_id)
instance = self.find_instance(instance_id)
if not instance:
LOG.debug("Instance not found: %s" % instance_id)
LOG.debug("Instance not found: %s", instance_id)
return False
else:
self.nova.servers.delete(instance_id)
LOG.debug("Instance %s removed." % instance_id)
LOG.debug("Instance %s removed.", instance_id)
return True
def stop_instance(self, instance_id):
@ -678,21 +692,21 @@ class NovaHelper(object):
:param instance_id: the unique id of the instance to stop.
"""
LOG.debug("Trying to stop instance %s ..." % instance_id)
LOG.debug("Trying to stop instance %s ...", instance_id)
instance = self.find_instance(instance_id)
if not instance:
LOG.debug("Instance not found: %s" % instance_id)
LOG.debug("Instance not found: %s", instance_id)
return False
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
LOG.debug("Instance has been stopped: %s" % instance_id)
LOG.debug("Instance has been stopped: %s", instance_id)
return True
else:
self.nova.servers.stop(instance_id)
if self.wait_for_instance_state(instance, "stopped", 8, 10):
LOG.debug("Instance %s stopped." % instance_id)
LOG.debug("Instance %s stopped.", instance_id)
return True
else:
return False
@ -733,11 +747,11 @@ class NovaHelper(object):
return False
while instance.status not in status_list and retry:
LOG.debug("Current instance status: %s" % instance.status)
LOG.debug("Current instance status: %s", instance.status)
time.sleep(sleep)
instance = self.nova.servers.get(instance.id)
retry -= 1
LOG.debug("Current instance status: %s" % instance.status)
LOG.debug("Current instance status: %s", instance.status)
return instance.status in status_list
def create_instance(self, node_id, inst_name="test", image_id=None,
@ -753,26 +767,26 @@ class NovaHelper(object):
It returns the unique id of the created instance.
"""
LOG.debug(
"Trying to create new instance '%s' "
"from image '%s' with flavor '%s' ..." % (
inst_name, image_id, flavor_name))
"Trying to create new instance '%(inst)s' "
"from image '%(image)s' with flavor '%(flavor)s' ...",
{'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
try:
self.nova.keypairs.findall(name=keypair_name)
except nvexceptions.NotFound:
LOG.debug("Key pair '%s' not found " % keypair_name)
LOG.debug("Key pair '%s' not found ", keypair_name)
return
try:
image = self.glance.images.get(image_id)
except glexceptions.NotFound:
LOG.debug("Image '%s' not found " % image_id)
LOG.debug("Image '%s' not found ", image_id)
return
try:
flavor = self.nova.flavors.find(name=flavor_name)
except nvexceptions.NotFound:
LOG.debug("Flavor '%s' not found " % flavor_name)
LOG.debug("Flavor '%s' not found ", flavor_name)
return
# Make sure all security groups exist
@ -780,7 +794,7 @@ class NovaHelper(object):
group_id = self.get_security_group_id_from_name(sec_group_name)
if not group_id:
LOG.debug("Security group '%s' not found " % sec_group_name)
LOG.debug("Security group '%s' not found ", sec_group_name)
return
net_list = list()
@ -789,7 +803,7 @@ class NovaHelper(object):
nic_id = self.get_network_id_from_name(network_name)
if not nic_id:
LOG.debug("Network '%s' not found " % network_name)
LOG.debug("Network '%s' not found ", network_name)
return
net_obj = {"net-id": nic_id}
net_list.append(net_obj)
@ -815,14 +829,16 @@ class NovaHelper(object):
if create_new_floating_ip and instance.status == 'ACTIVE':
LOG.debug(
"Creating a new floating IP"
" for instance '%s'" % instance.id)
" for instance '%s'", instance.id)
# Creating floating IP for the new instance
floating_ip = self.nova.floating_ips.create()
instance.add_floating_ip(floating_ip)
LOG.debug("Instance %s associated to Floating IP '%s'" % (
instance.id, floating_ip.ip))
LOG.debug(
"Instance %(instance)s associated to "
"Floating IP '%(ip)s'",
{'instance': instance.id, 'ip': floating_ip.ip})
return instance
@ -896,7 +912,7 @@ class NovaHelper(object):
LOG.debug('Waiting volume update to {0}'.format(new_volume))
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
LOG.debug("retry count: %s", retry)
if getattr(new_volume, 'status') != "in-use":
LOG.error("Volume update retry timeout or error")
return False
@ -904,5 +920,6 @@ class NovaHelper(object):
host_name = getattr(new_volume, "os-vol-host-attr:host")
LOG.debug(
"Volume update succeeded : "
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
"Volume %s is now on host '%s'.",
(new_volume.id, host_name))
return True

View File

@ -48,7 +48,7 @@ class AuditEndpoint(object):
self._oneshot_handler.execute(audit, context)
def trigger_audit(self, context, audit_uuid):
LOG.debug("Trigger audit %s" % audit_uuid)
LOG.debug("Trigger audit %s", audit_uuid)
self.executor.submit(self.do_trigger_audit,
context,
audit_uuid)

View File

@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))

View File

@ -229,7 +229,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -275,7 +275,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -310,7 +310,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -337,7 +337,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -372,7 +372,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -399,7 +399,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -426,7 +426,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -459,7 +459,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -486,7 +486,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@ -513,7 +513,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))

View File

@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
for name in engines.keys():
se_impl = default.DefaultScoringLoader().load(name)
LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name())
LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
_scoring_engine_map[se_impl.get_name()] = se_impl
engine_containers = \
default.DefaultScoringContainerLoader().list_available()
for container_id, container_cls in engine_containers.items():
LOG.debug("Found Scoring Engine container plugin: %s" %
LOG.debug("Found Scoring Engine container plugin: %s",
container_id)
for se in container_cls.get_scoring_engine_list():
LOG.debug("Found Scoring Engine plugin: %s" %
LOG.debug("Found Scoring Engine plugin: %s",
se.get_name())
_scoring_engine_map[se.get_name()] = se

View File

@ -277,7 +277,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
resource_id = "%s_%s" % (node.uuid, node.hostname)
LOG.error(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=resource_id,
metric_name=self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage']))
@ -297,7 +297,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
if instance_cpu_utilization is None:
LOG.error(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=instance.uuid,
metric_name=self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage']))

View File

@ -199,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
hosts_need_release[node.uuid] = {
'priority_vm': potential_priority_instance,
'noisy_vm': potential_noisy_instance}
LOG.debug("Priority VM found: %s" % (
potential_priority_instance.uuid))
LOG.debug("Noisy VM found: %s" % (
potential_noisy_instance.uuid))
LOG.debug("Priority VM found: %s",
potential_priority_instance.uuid)
LOG.debug("Noisy VM found: %s",
potential_noisy_instance.uuid)
loop_break_flag = True
break

View File

@ -194,7 +194,8 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
LOG.warning("%s: no outlet temp data", resource_id)
continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
LOG.debug("%(resource)s: outlet temperature %(temp)f",
{'resource': resource_id, 'temp': outlet_temp})
instance_data = {'node': node, 'outlet_temp': outlet_temp}
if outlet_temp >= self.threshold:
# mark the node to release resources

View File

@ -318,7 +318,8 @@ class UniformAirflow(base.BaseStrategy):
LOG.warning("%s: no airflow data", resource_id)
continue
LOG.debug("%s: airflow %f" % (resource_id, airflow))
LOG.debug("%(resource)s: airflow %(airflow)f",
{'resource': resource_id, 'airflow': airflow})
nodemap = {'node': node, 'airflow': airflow}
if airflow >= self.threshold_airflow:
# mark the node to release resources

View File

@ -168,7 +168,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return instance.state.value
else:
LOG.error('Unexpected instance state type, '
'state=%(state)s, state_type=%(st)s.' %
'state=%(state)s, state_type=%(st)s.',
dict(state=instance.state,
st=type(instance.state)))
raise exception.WatcherException
@ -184,7 +184,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return node.status.value
else:
LOG.error('Unexpected node status type, '
'status=%(status)s, status_type=%(st)s.' %
'status=%(status)s, status_type=%(st)s.',
dict(status=node.status,
st=type(node.status)))
raise exception.WatcherException
@ -233,7 +233,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# migration mechanism to move non active VMs.
LOG.error(
'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.' % dict(
'state=%(instance_state)s.', dict(
instance_uuid=instance.uuid,
instance_state=instance_state_str))
return

View File

@ -203,7 +203,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
if avg_meter is None:
LOG.warning(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=instance.uuid, metric_name=meter))
return
if meter == 'cpu_util':
@ -375,12 +375,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
normalized_load = self.normalize_hosts_load(hosts_load)
for metric in self.metrics:
metric_sd = self.get_sd(normalized_load, metric)
LOG.info("Standard deviation for %s is %s."
% (metric, metric_sd))
LOG.info("Standard deviation for %s is %s.",
(metric, metric_sd))
if metric_sd > float(self.thresholds[metric]):
LOG.info("Standard deviation of %s exceeds"
" appropriate threshold %s."
% (metric, metric_sd))
" appropriate threshold %s.",
(metric, metric_sd))
return self.simulate_migrations(hosts_load)
def add_migration(self,

View File

@ -312,7 +312,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
else:
self.instances_migration(targets, action_counter)
LOG.debug("action total: %s, pools: %s, nodes %s " % (
LOG.debug("action total: %s, pools: %s, nodes %s ", (
action_counter.total_count,
action_counter.per_pool_count,
action_counter.per_node_count))
@ -413,13 +413,13 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
pool = getattr(volume, 'os-vol-host-attr:host')
if action_counter.is_pool_max(pool):
LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % pool)
" reached the limit of parallelization.", pool)
continue
src_type = volume.volume_type
dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type)
LOG.debug(src_type)
LOG.debug("%s %s" % (dst_pool, dst_type))
LOG.debug("%s %s", (dst_pool, dst_type))
if self.is_available(volume):
if src_type == dst_type:
@ -448,7 +448,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
if action_counter.is_node_max(src_node):
LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % src_node)
" reached the limit of parallelization.", src_node)
continue
dst_node = self.get_dst_node(src_node)
@ -643,7 +643,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_pool_max(pool):
self.per_pool_count[pool] += 1
self.total_count += 1
LOG.debug("total: %s, per_pool: %s" % (
LOG.debug("total: %s, per_pool: %s", (
self.total_count, self.per_pool_count))
return True
return False
@ -660,7 +660,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_node_max(node):
self.per_node_count[node] += 1
self.total_count += 1
LOG.debug("total: %s, per_node: %s" % (
LOG.debug("total: %s, per_node: %s", (
self.total_count, self.per_node_count))
return True
return False
@ -679,9 +679,9 @@ class ActionCounter(object):
"""
if pool not in self.per_pool_count:
self.per_pool_count[pool] = 0
LOG.debug("the number of parallel per pool %s is %s " %
LOG.debug("the number of parallel per pool %s is %s ",
(pool, self.per_pool_count[pool]))
LOG.debug("per pool limit is %s" % self.per_pool_limit)
LOG.debug("per pool limit is %s", self.per_pool_limit)
return self.per_pool_count[pool] >= self.per_pool_limit
def is_node_max(self, node):
@ -724,7 +724,7 @@ class BaseFilter(object):
for k, v in six.iteritems(targets):
if not self.is_allowed(k):
continue
LOG.debug("filter:%s with the key: %s" % (cond, k))
LOG.debug("filter:%s with the key: %s", (cond, k))
targets[k] = self.exec_filter(v, cond)
LOG.debug(targets)
@ -778,7 +778,7 @@ class ProjectSortFilter(SortMovingToFrontFilter):
"""
project_id = self.get_project_id(item)
LOG.debug("project_id: %s, sort_key: %s" % (project_id, sort_key))
LOG.debug("project_id: %s, sort_key: %s", (project_id, sort_key))
return project_id == sort_key
def get_project_id(self, item):
@ -812,7 +812,7 @@ class ComputeHostSortFilter(SortMovingToFrontFilter):
"""
host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key
def get_host(self, item):
@ -840,7 +840,7 @@ class StorageHostSortFilter(SortMovingToFrontFilter):
"""
host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key
def get_host(self, item):
@ -867,7 +867,7 @@ class ComputeSpecSortFilter(BaseFilter):
result = items
if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key)
LOG.warning("Invalid key is specified: %s", sort_key)
else:
result = self.get_sorted_items(items, sort_key)
@ -912,11 +912,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: memory size of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.ram: %s" % flavor.ram)
LOG.debug("flavor.ram: %s", flavor.ram)
return flavor.ram
def get_vcpu_num(self, item, flavors):
@ -927,11 +927,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: vcpu number of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.vcpus: %s" % flavor.vcpus)
LOG.debug("flavor.vcpus: %s", flavor.vcpus)
return flavor.vcpus
def get_disk_size(self, item, flavors):
@ -942,11 +942,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: disk size of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.disk: %s" % flavor.disk)
LOG.debug("flavor.disk: %s", flavor.disk)
return flavor.disk
@ -960,7 +960,7 @@ class StorageSpecSortFilter(BaseFilter):
result = items
if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key)
LOG.warning("Invalid key is specified: %s", sort_key)
return result
if sort_key == 'created_at':