Merge "Fix string interpolations at logging calls"
This commit is contained in:
commit
6b2e8ed9bd
@ -291,7 +291,7 @@ def remove_trailing_version_from_href(href):
|
||||
# NOTE: this should match vX.X or vX
|
||||
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
||||
if not expression.match(url_parts.pop()):
|
||||
LOG.debug('href %s does not contain version' % href)
|
||||
LOG.debug('href %s does not contain version', href)
|
||||
raise ValueError(_('href %s does not contain version') % href)
|
||||
|
||||
new_path = url_join(*url_parts)
|
||||
|
@ -184,7 +184,7 @@ class FloatingIPActionController(wsgi.Controller):
|
||||
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
|
||||
if not cached_nwinfo:
|
||||
LOG.warning(
|
||||
_LW('Info cache is %r during associate') % instance.info_cache,
|
||||
_LW('Info cache is %r during associate'), instance.info_cache,
|
||||
instance=instance)
|
||||
msg = _('No nw_info cache associated with instance')
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
|
@ -183,9 +183,9 @@ class HostController(object):
|
||||
on the host
|
||||
"""
|
||||
if enabled:
|
||||
LOG.info(_LI("Enabling host %s.") % host_name)
|
||||
LOG.info(_LI("Enabling host %s."), host_name)
|
||||
else:
|
||||
LOG.info(_LI("Disabling host %s.") % host_name)
|
||||
LOG.info(_LI("Disabling host %s."), host_name)
|
||||
try:
|
||||
result = self.api.set_host_enabled(context, host_name=host_name,
|
||||
enabled=enabled)
|
||||
|
@ -953,7 +953,7 @@ class API(base.Base):
|
||||
context, instance_type, min_count, max_count)
|
||||
security_groups = self.security_group_api.populate_security_groups(
|
||||
security_groups)
|
||||
LOG.debug("Going to run %s instances..." % num_instances)
|
||||
LOG.debug("Going to run %s instances...", num_instances)
|
||||
instances = []
|
||||
try:
|
||||
for i in range(num_instances):
|
||||
@ -2052,7 +2052,7 @@ class API(base.Base):
|
||||
if search_opts is None:
|
||||
search_opts = {}
|
||||
|
||||
LOG.debug("Searching by: %s" % str(search_opts))
|
||||
LOG.debug("Searching by: %s", str(search_opts))
|
||||
|
||||
# Fixups for the DB call
|
||||
filters = {}
|
||||
|
@ -117,7 +117,7 @@ class Claim(NopClaim):
|
||||
"""Compute operation requiring claimed resources has failed or
|
||||
been aborted.
|
||||
"""
|
||||
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
|
||||
LOG.debug("Aborting claim: %s", self, instance=self.instance)
|
||||
self.tracker.abort_instance_claim(self.context, self.instance)
|
||||
|
||||
def _claim_test(self, resources, limits=None):
|
||||
@ -312,7 +312,7 @@ class MoveClaim(Claim):
|
||||
"""Compute operation requiring claimed resources has failed or
|
||||
been aborted.
|
||||
"""
|
||||
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
|
||||
LOG.debug("Aborting claim: %s", self, instance=self.instance)
|
||||
self.tracker.drop_move_claim(
|
||||
self.context,
|
||||
self.instance, instance_type=self.instance_type,
|
||||
|
@ -6441,7 +6441,7 @@ class ComputeManager(manager.Manager):
|
||||
# Delete orphan compute node not reported by driver but still in db
|
||||
for cn in compute_nodes_in_db:
|
||||
if cn.hypervisor_hostname not in nodenames:
|
||||
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
|
||||
LOG.info(_LI("Deleting orphan compute node %s"), cn.id)
|
||||
cn.destroy()
|
||||
|
||||
def _get_compute_nodes_in_db(self, context, use_slave=False):
|
||||
|
@ -80,14 +80,13 @@ class MonitorHandler(object):
|
||||
namespace_parts = ept_parts[0].split('.')
|
||||
namespace = '.'.join(namespace_parts[0:-1])
|
||||
if self.type_monitor_loaded[namespace] is not False:
|
||||
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
||||
"Already loaded %(loaded_monitor)s.")
|
||||
msg = msg % {
|
||||
'namespace': namespace,
|
||||
'monitor_name': ext.name,
|
||||
'loaded_monitor': self.type_monitor_loaded[namespace]
|
||||
}
|
||||
LOG.warn(msg)
|
||||
LOG.warning(_LW("Excluding %(namespace)s monitor "
|
||||
"%(monitor_name)s. Already loaded "
|
||||
"%(loaded_monitor)s."),
|
||||
{'namespace': namespace,
|
||||
'monitor_name': ext.name,
|
||||
'loaded_monitor': self.type_monitor_loaded[namespace]
|
||||
})
|
||||
return False
|
||||
|
||||
# NOTE(jaypipes): We used to only have CPU monitors, so
|
||||
@ -107,12 +106,8 @@ class MonitorHandler(object):
|
||||
if namespace + '.' + ext.name in cfg_monitors:
|
||||
self.type_monitor_loaded[namespace] = ext.name
|
||||
return True
|
||||
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
||||
"Not in the list of enabled monitors "
|
||||
"(CONF.compute_monitors).")
|
||||
msg = msg % {
|
||||
'namespace': namespace,
|
||||
'monitor_name': ext.name,
|
||||
}
|
||||
LOG.warn(msg)
|
||||
LOG.warning(_LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
||||
"Not in the list of enabled monitors "
|
||||
"(CONF.compute_monitors)."),
|
||||
{'namespace': namespace, 'monitor_name': ext.name})
|
||||
return False
|
||||
|
@ -633,15 +633,15 @@ class ResourceTracker(object):
|
||||
vcpus = resources['vcpus']
|
||||
if vcpus:
|
||||
free_vcpus = vcpus - resources['vcpus_used']
|
||||
LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus)
|
||||
LOG.debug("Hypervisor: free VCPUs: %s", free_vcpus)
|
||||
else:
|
||||
free_vcpus = 'unknown'
|
||||
LOG.debug("Hypervisor: VCPU information unavailable")
|
||||
|
||||
if ('pci_passthrough_devices' in resources and
|
||||
resources['pci_passthrough_devices']):
|
||||
LOG.debug("Hypervisor: assignable PCI devices: %s" %
|
||||
resources['pci_passthrough_devices'])
|
||||
LOG.debug("Hypervisor: assignable PCI devices: %s",
|
||||
resources['pci_passthrough_devices'])
|
||||
|
||||
pci_devices = resources.get('pci_passthrough_devices')
|
||||
|
||||
@ -762,7 +762,7 @@ class ResourceTracker(object):
|
||||
return
|
||||
|
||||
uuid = migration.instance_uuid
|
||||
LOG.info(_LI("Updating from migration %s") % uuid)
|
||||
LOG.info(_LI("Updating from migration %s"), uuid)
|
||||
|
||||
incoming = (migration.dest_compute == self.host and
|
||||
migration.dest_node == self.nodename)
|
||||
|
@ -108,7 +108,7 @@ class RequestContext(context.RequestContext):
|
||||
# safely ignore this as we don't use it.
|
||||
kwargs.pop('user_identity', None)
|
||||
if kwargs:
|
||||
LOG.warning(_LW('Arguments dropped when creating context: %s') %
|
||||
LOG.warning(_LW('Arguments dropped when creating context: %s'),
|
||||
str(kwargs))
|
||||
|
||||
# FIXME(dims): user_id and project_id duplicate information that is
|
||||
|
@ -2953,7 +2953,7 @@ def instance_extra_update_by_uuid(context, instance_uuid, values):
|
||||
filter_by(instance_uuid=instance_uuid).\
|
||||
update(values)
|
||||
if not rows_updated:
|
||||
LOG.debug("Created instance_extra for %s" % instance_uuid)
|
||||
LOG.debug("Created instance_extra for %s", instance_uuid)
|
||||
create_values = copy.copy(values)
|
||||
create_values["instance_uuid"] = instance_uuid
|
||||
_instance_extra_create(context, create_values)
|
||||
|
@ -1222,7 +1222,7 @@ def _host_dhcp(fixedip):
|
||||
# to truncate the hostname to only 63 characters.
|
||||
hostname = fixedip.instance.hostname
|
||||
if len(hostname) > 63:
|
||||
LOG.warning(_LW('hostname %s too long, truncating.') % (hostname))
|
||||
LOG.warning(_LW('hostname %s too long, truncating.'), hostname)
|
||||
hostname = fixedip.instance.hostname[:2] + '-' +\
|
||||
fixedip.instance.hostname[-60:]
|
||||
if CONF.use_single_default_gateway:
|
||||
|
@ -286,7 +286,7 @@ class API(base_api.NetworkAPI):
|
||||
raise exception.NoMoreFixedIps(net=network_id)
|
||||
except neutron_client_exc.MacAddressInUseClient:
|
||||
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
|
||||
'in use on network %(network)s.') %
|
||||
'in use on network %(network)s.'),
|
||||
{'mac': mac_address, 'network': network_id},
|
||||
instance=instance)
|
||||
raise exception.PortInUse(port_id=mac_address)
|
||||
|
@ -145,15 +145,17 @@ def _update_service_ref(this_service, context):
|
||||
this_service.binary)
|
||||
if not service:
|
||||
LOG.error(_LE('Unable to find a service record to update for '
|
||||
'%(binary)s on %(host)s') % {
|
||||
'binary': this_service.binary,
|
||||
'host': this_service.host})
|
||||
'%(binary)s on %(host)s'),
|
||||
{'binary': this_service.binary,
|
||||
'host': this_service.host})
|
||||
return
|
||||
if service.version != service_obj.SERVICE_VERSION:
|
||||
LOG.info(_LI('Updating service version for %(binary)s on '
|
||||
'%(host)s from %(old)i to %(new)i') % dict(
|
||||
binary=this_service.binary, host=this_service.host,
|
||||
old=service.version, new=service_obj.SERVICE_VERSION))
|
||||
'%(host)s from %(old)i to %(new)i'),
|
||||
{'binary': this_service.binary,
|
||||
'host': this_service.host,
|
||||
'old': service.version,
|
||||
'new': service_obj.SERVICE_VERSION})
|
||||
service.version = service_obj.SERVICE_VERSION
|
||||
service.save()
|
||||
|
||||
|
@ -82,7 +82,9 @@ class ContextTestCase(test.NoDBTestCase):
|
||||
def test_extra_args_to_context_get_logged(self):
|
||||
info = {}
|
||||
|
||||
def fake_warn(log_msg):
|
||||
def fake_warn(log_msg, *args):
|
||||
if args:
|
||||
log_msg = log_msg % args
|
||||
info['log_msg'] = log_msg
|
||||
|
||||
self.stub_out('nova.context.LOG.warning', fake_warn)
|
||||
|
@ -392,7 +392,7 @@ class IptablesFirewallDriver(FirewallDriver):
|
||||
if not self.iptables.ipv4['filter'].has_chain(chain_name):
|
||||
LOG.info(
|
||||
_LI('instance chain %s disappeared during refresh, '
|
||||
'skipping') % chain_name,
|
||||
'skipping'), chain_name,
|
||||
instance=instance)
|
||||
return
|
||||
self.remove_filters_for_instance(instance)
|
||||
|
@ -157,7 +157,7 @@ class VMOps(object):
|
||||
instance_uuids.append(str(notes[0]))
|
||||
else:
|
||||
LOG.debug("Notes not found or not resembling a GUID for "
|
||||
"instance: %s" % instance_name)
|
||||
"instance: %s", instance_name)
|
||||
return instance_uuids
|
||||
|
||||
def list_instances(self):
|
||||
@ -235,7 +235,7 @@ class VMOps(object):
|
||||
flavor_size=new_size, image_size=old_size)
|
||||
elif new_size > old_size:
|
||||
LOG.debug("Resizing VHD %(vhd_path)s to new "
|
||||
"size %(new_size)s" %
|
||||
"size %(new_size)s",
|
||||
{'new_size': new_size,
|
||||
'vhd_path': vhd_path},
|
||||
instance=instance)
|
||||
|
@ -149,7 +149,7 @@ def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0):
|
||||
|
||||
if fmt != "raw" and CONF.force_raw_images:
|
||||
staged = "%s.converted" % path
|
||||
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
|
||||
LOG.debug("%s was %s, converting to raw", image_href, fmt)
|
||||
with fileutils.remove_path_on_error(staged):
|
||||
try:
|
||||
convert_image(path_tmp, staged, fmt, 'raw')
|
||||
|
@ -2223,7 +2223,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
raise exception.NovaException(msg)
|
||||
|
||||
# libgfapi delete
|
||||
LOG.debug("XML: %s" % xml)
|
||||
LOG.debug("XML: %s", xml)
|
||||
|
||||
LOG.debug("active disk object: %s", active_disk_object)
|
||||
|
||||
|
@ -255,7 +255,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
|
||||
{'name': name, 'e': e})
|
||||
u = uuid.uuid4().hex
|
||||
|
||||
LOG.debug("UUID for filter '%s' is '%s'" % (name, u))
|
||||
LOG.debug("UUID for filter '%s' is '%s'", name, u)
|
||||
return u
|
||||
|
||||
def _define_filter(self, xml):
|
||||
|
@ -123,7 +123,7 @@ class Guest(object):
|
||||
domain = host.write_instance_config(xml)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Error defining a domain with XML: %s') %
|
||||
LOG.error(_LE('Error defining a domain with XML: %s'),
|
||||
encodeutils.safe_decode(xml))
|
||||
return cls(domain)
|
||||
|
||||
@ -138,7 +138,7 @@ class Guest(object):
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Error launching a defined domain '
|
||||
'with XML: %s') %
|
||||
'with XML: %s'),
|
||||
self._encoded_xml, errors='ignore')
|
||||
|
||||
def poweroff(self):
|
||||
@ -166,7 +166,7 @@ class Guest(object):
|
||||
check_exit_code=[0, 1])
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
|
||||
LOG.error(_LE('Error enabling hairpin mode with XML: %s'),
|
||||
self._encoded_xml, errors='ignore')
|
||||
|
||||
def get_interfaces(self):
|
||||
|
@ -891,7 +891,7 @@ class Host(object):
|
||||
return secret
|
||||
except libvirt.libvirtError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
|
||||
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
|
||||
|
||||
def delete_secret(self, usage_type, usage_id):
|
||||
"""Delete a secret.
|
||||
@ -942,7 +942,7 @@ class Host(object):
|
||||
dom_mem = int(guest._get_domain_info(self)[2])
|
||||
except libvirt.libvirtError as e:
|
||||
LOG.warn(_LW("couldn't obtain the memory from domain:"
|
||||
" %(uuid)s, exception: %(ex)s") %
|
||||
" %(uuid)s, exception: %(ex)s"),
|
||||
{"uuid": guest.uuid, "ex": e})
|
||||
continue
|
||||
# skip dom0
|
||||
|
@ -165,11 +165,11 @@ def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method):
|
||||
dom_id = session.VM.get_domid(vm_ref)
|
||||
|
||||
if dom_id and dom_id != -1 and dom_id != old_dom_id:
|
||||
LOG.debug("Found new dom_id %s" % dom_id)
|
||||
LOG.debug("Found new dom_id %s", dom_id)
|
||||
return
|
||||
|
||||
if time.time() > expiration:
|
||||
LOG.debug("Timed out waiting for new dom_id %s" % dom_id)
|
||||
LOG.debug("Timed out waiting for new dom_id %s", dom_id)
|
||||
raise exception.AgentTimeout(method=method)
|
||||
|
||||
time.sleep(1)
|
||||
|
@ -259,7 +259,7 @@ class HostState(object):
|
||||
if (data['host_hostname'] !=
|
||||
self._stats.get('host_hostname', data['host_hostname'])):
|
||||
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. '
|
||||
'A restart is required to take effect.') %
|
||||
'A restart is required to take effect.'),
|
||||
{'old': self._stats['host_hostname'],
|
||||
'new': data['host_hostname']})
|
||||
data['host_hostname'] = self._stats['host_hostname']
|
||||
|
@ -469,8 +469,7 @@ def destroy_vdi(session, vdi_ref):
|
||||
try:
|
||||
session.call_xenapi('VDI.destroy', vdi_ref)
|
||||
except session.XenAPI.Failure:
|
||||
msg = "Unable to destroy VDI %s" % vdi_ref
|
||||
LOG.debug(msg, exc_info=True)
|
||||
LOG.debug("Unable to destroy VDI %s", vdi_ref, exc_info=True)
|
||||
msg = _("Unable to destroy VDI %s") % vdi_ref
|
||||
LOG.error(msg)
|
||||
raise exception.StorageError(reason=msg)
|
||||
@ -482,8 +481,7 @@ def safe_destroy_vdis(session, vdi_refs):
|
||||
try:
|
||||
destroy_vdi(session, vdi_ref)
|
||||
except exception.StorageError:
|
||||
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
|
||||
LOG.debug(msg)
|
||||
LOG.debug("Ignoring error while destroying VDI: %s", vdi_ref)
|
||||
|
||||
|
||||
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
|
||||
@ -683,7 +681,7 @@ def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
|
||||
# ensure garbage collector has been run
|
||||
_scan_sr(session, sr_ref)
|
||||
|
||||
LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots,
|
||||
LOG.info(_LI("Deleted %s snapshots."), number_of_snapshots,
|
||||
instance=instance)
|
||||
|
||||
|
||||
@ -2108,7 +2106,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
|
||||
'good_parent_uuids': good_parent_uuids},
|
||||
instance=instance)
|
||||
else:
|
||||
LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid,
|
||||
LOG.debug("Coalesce detected, because parent is: %s", parent_uuid,
|
||||
instance=instance)
|
||||
return
|
||||
|
||||
|
@ -1707,7 +1707,7 @@ class VMOps(object):
|
||||
|
||||
if instances_info["instance_count"] > 0:
|
||||
LOG.info(_LI("Found %(instance_count)d hung reboots "
|
||||
"older than %(timeout)d seconds") % instances_info)
|
||||
"older than %(timeout)d seconds"), instances_info)
|
||||
|
||||
for instance in instances:
|
||||
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
|
||||
|
@ -226,7 +226,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
|
||||
|
||||
def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
|
||||
if vdi_uuid:
|
||||
LOG.debug("vdi_uuid: %s" % vdi_uuid)
|
||||
LOG.debug("vdi_uuid: %s", vdi_uuid)
|
||||
return session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
|
||||
elif target_lun:
|
||||
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
|
||||
|
Loading…
Reference in New Issue
Block a user