Fix string interpolations at logging calls
Skip creating the formatted log message if the message is not going to be emitted because of the log level. TrivialFix Change-Id: Iba9f47163a0ac3aca612818272db6d536b238975
This commit is contained in:
parent
3781ef8e0c
commit
897cb7c2d3
@ -291,7 +291,7 @@ def remove_trailing_version_from_href(href):
|
|||||||
# NOTE: this should match vX.X or vX
|
# NOTE: this should match vX.X or vX
|
||||||
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
||||||
if not expression.match(url_parts.pop()):
|
if not expression.match(url_parts.pop()):
|
||||||
LOG.debug('href %s does not contain version' % href)
|
LOG.debug('href %s does not contain version', href)
|
||||||
raise ValueError(_('href %s does not contain version') % href)
|
raise ValueError(_('href %s does not contain version') % href)
|
||||||
|
|
||||||
new_path = url_join(*url_parts)
|
new_path = url_join(*url_parts)
|
||||||
|
@ -184,7 +184,7 @@ class FloatingIPActionController(wsgi.Controller):
|
|||||||
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
|
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
|
||||||
if not cached_nwinfo:
|
if not cached_nwinfo:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Info cache is %r during associate') % instance.info_cache,
|
_LW('Info cache is %r during associate'), instance.info_cache,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
msg = _('No nw_info cache associated with instance')
|
msg = _('No nw_info cache associated with instance')
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
@ -183,9 +183,9 @@ class HostController(object):
|
|||||||
on the host
|
on the host
|
||||||
"""
|
"""
|
||||||
if enabled:
|
if enabled:
|
||||||
LOG.info(_LI("Enabling host %s.") % host_name)
|
LOG.info(_LI("Enabling host %s."), host_name)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Disabling host %s.") % host_name)
|
LOG.info(_LI("Disabling host %s."), host_name)
|
||||||
try:
|
try:
|
||||||
result = self.api.set_host_enabled(context, host_name=host_name,
|
result = self.api.set_host_enabled(context, host_name=host_name,
|
||||||
enabled=enabled)
|
enabled=enabled)
|
||||||
|
@ -953,7 +953,7 @@ class API(base.Base):
|
|||||||
context, instance_type, min_count, max_count)
|
context, instance_type, min_count, max_count)
|
||||||
security_groups = self.security_group_api.populate_security_groups(
|
security_groups = self.security_group_api.populate_security_groups(
|
||||||
security_groups)
|
security_groups)
|
||||||
LOG.debug("Going to run %s instances..." % num_instances)
|
LOG.debug("Going to run %s instances...", num_instances)
|
||||||
instances = []
|
instances = []
|
||||||
try:
|
try:
|
||||||
for i in range(num_instances):
|
for i in range(num_instances):
|
||||||
@ -2052,7 +2052,7 @@ class API(base.Base):
|
|||||||
if search_opts is None:
|
if search_opts is None:
|
||||||
search_opts = {}
|
search_opts = {}
|
||||||
|
|
||||||
LOG.debug("Searching by: %s" % str(search_opts))
|
LOG.debug("Searching by: %s", str(search_opts))
|
||||||
|
|
||||||
# Fixups for the DB call
|
# Fixups for the DB call
|
||||||
filters = {}
|
filters = {}
|
||||||
|
@ -117,7 +117,7 @@ class Claim(NopClaim):
|
|||||||
"""Compute operation requiring claimed resources has failed or
|
"""Compute operation requiring claimed resources has failed or
|
||||||
been aborted.
|
been aborted.
|
||||||
"""
|
"""
|
||||||
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
|
LOG.debug("Aborting claim: %s", self, instance=self.instance)
|
||||||
self.tracker.abort_instance_claim(self.context, self.instance)
|
self.tracker.abort_instance_claim(self.context, self.instance)
|
||||||
|
|
||||||
def _claim_test(self, resources, limits=None):
|
def _claim_test(self, resources, limits=None):
|
||||||
@ -312,7 +312,7 @@ class MoveClaim(Claim):
|
|||||||
"""Compute operation requiring claimed resources has failed or
|
"""Compute operation requiring claimed resources has failed or
|
||||||
been aborted.
|
been aborted.
|
||||||
"""
|
"""
|
||||||
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
|
LOG.debug("Aborting claim: %s", self, instance=self.instance)
|
||||||
self.tracker.drop_move_claim(
|
self.tracker.drop_move_claim(
|
||||||
self.context,
|
self.context,
|
||||||
self.instance, instance_type=self.instance_type,
|
self.instance, instance_type=self.instance_type,
|
||||||
|
@ -6401,7 +6401,7 @@ class ComputeManager(manager.Manager):
|
|||||||
# Delete orphan compute node not reported by driver but still in db
|
# Delete orphan compute node not reported by driver but still in db
|
||||||
for cn in compute_nodes_in_db:
|
for cn in compute_nodes_in_db:
|
||||||
if cn.hypervisor_hostname not in nodenames:
|
if cn.hypervisor_hostname not in nodenames:
|
||||||
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
|
LOG.info(_LI("Deleting orphan compute node %s"), cn.id)
|
||||||
cn.destroy()
|
cn.destroy()
|
||||||
|
|
||||||
def _get_compute_nodes_in_db(self, context, use_slave=False):
|
def _get_compute_nodes_in_db(self, context, use_slave=False):
|
||||||
|
@ -80,14 +80,13 @@ class MonitorHandler(object):
|
|||||||
namespace_parts = ept_parts[0].split('.')
|
namespace_parts = ept_parts[0].split('.')
|
||||||
namespace = '.'.join(namespace_parts[0:-1])
|
namespace = '.'.join(namespace_parts[0:-1])
|
||||||
if self.type_monitor_loaded[namespace] is not False:
|
if self.type_monitor_loaded[namespace] is not False:
|
||||||
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
LOG.warning(_LW("Excluding %(namespace)s monitor "
|
||||||
"Already loaded %(loaded_monitor)s.")
|
"%(monitor_name)s. Already loaded "
|
||||||
msg = msg % {
|
"%(loaded_monitor)s."),
|
||||||
'namespace': namespace,
|
{'namespace': namespace,
|
||||||
'monitor_name': ext.name,
|
'monitor_name': ext.name,
|
||||||
'loaded_monitor': self.type_monitor_loaded[namespace]
|
'loaded_monitor': self.type_monitor_loaded[namespace]
|
||||||
}
|
})
|
||||||
LOG.warn(msg)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# NOTE(jaypipes): We used to only have CPU monitors, so
|
# NOTE(jaypipes): We used to only have CPU monitors, so
|
||||||
@ -107,12 +106,8 @@ class MonitorHandler(object):
|
|||||||
if namespace + '.' + ext.name in cfg_monitors:
|
if namespace + '.' + ext.name in cfg_monitors:
|
||||||
self.type_monitor_loaded[namespace] = ext.name
|
self.type_monitor_loaded[namespace] = ext.name
|
||||||
return True
|
return True
|
||||||
msg = _LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
LOG.warning(_LW("Excluding %(namespace)s monitor %(monitor_name)s. "
|
||||||
"Not in the list of enabled monitors "
|
"Not in the list of enabled monitors "
|
||||||
"(CONF.compute_monitors).")
|
"(CONF.compute_monitors)."),
|
||||||
msg = msg % {
|
{'namespace': namespace, 'monitor_name': ext.name})
|
||||||
'namespace': namespace,
|
|
||||||
'monitor_name': ext.name,
|
|
||||||
}
|
|
||||||
LOG.warn(msg)
|
|
||||||
return False
|
return False
|
||||||
|
@ -623,15 +623,15 @@ class ResourceTracker(object):
|
|||||||
vcpus = resources['vcpus']
|
vcpus = resources['vcpus']
|
||||||
if vcpus:
|
if vcpus:
|
||||||
free_vcpus = vcpus - resources['vcpus_used']
|
free_vcpus = vcpus - resources['vcpus_used']
|
||||||
LOG.debug("Hypervisor: free VCPUs: %s" % free_vcpus)
|
LOG.debug("Hypervisor: free VCPUs: %s", free_vcpus)
|
||||||
else:
|
else:
|
||||||
free_vcpus = 'unknown'
|
free_vcpus = 'unknown'
|
||||||
LOG.debug("Hypervisor: VCPU information unavailable")
|
LOG.debug("Hypervisor: VCPU information unavailable")
|
||||||
|
|
||||||
if ('pci_passthrough_devices' in resources and
|
if ('pci_passthrough_devices' in resources and
|
||||||
resources['pci_passthrough_devices']):
|
resources['pci_passthrough_devices']):
|
||||||
LOG.debug("Hypervisor: assignable PCI devices: %s" %
|
LOG.debug("Hypervisor: assignable PCI devices: %s",
|
||||||
resources['pci_passthrough_devices'])
|
resources['pci_passthrough_devices'])
|
||||||
|
|
||||||
pci_devices = resources.get('pci_passthrough_devices')
|
pci_devices = resources.get('pci_passthrough_devices')
|
||||||
|
|
||||||
@ -752,7 +752,7 @@ class ResourceTracker(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
uuid = migration.instance_uuid
|
uuid = migration.instance_uuid
|
||||||
LOG.info(_LI("Updating from migration %s") % uuid)
|
LOG.info(_LI("Updating from migration %s"), uuid)
|
||||||
|
|
||||||
incoming = (migration.dest_compute == self.host and
|
incoming = (migration.dest_compute == self.host and
|
||||||
migration.dest_node == self.nodename)
|
migration.dest_node == self.nodename)
|
||||||
|
@ -108,7 +108,7 @@ class RequestContext(context.RequestContext):
|
|||||||
# safely ignore this as we don't use it.
|
# safely ignore this as we don't use it.
|
||||||
kwargs.pop('user_identity', None)
|
kwargs.pop('user_identity', None)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
LOG.warning(_LW('Arguments dropped when creating context: %s') %
|
LOG.warning(_LW('Arguments dropped when creating context: %s'),
|
||||||
str(kwargs))
|
str(kwargs))
|
||||||
|
|
||||||
# FIXME(dims): user_id and project_id duplicate information that is
|
# FIXME(dims): user_id and project_id duplicate information that is
|
||||||
|
@ -2953,7 +2953,7 @@ def instance_extra_update_by_uuid(context, instance_uuid, values):
|
|||||||
filter_by(instance_uuid=instance_uuid).\
|
filter_by(instance_uuid=instance_uuid).\
|
||||||
update(values)
|
update(values)
|
||||||
if not rows_updated:
|
if not rows_updated:
|
||||||
LOG.debug("Created instance_extra for %s" % instance_uuid)
|
LOG.debug("Created instance_extra for %s", instance_uuid)
|
||||||
create_values = copy.copy(values)
|
create_values = copy.copy(values)
|
||||||
create_values["instance_uuid"] = instance_uuid
|
create_values["instance_uuid"] = instance_uuid
|
||||||
_instance_extra_create(context, create_values)
|
_instance_extra_create(context, create_values)
|
||||||
|
@ -1222,7 +1222,7 @@ def _host_dhcp(fixedip):
|
|||||||
# to truncate the hostname to only 63 characters.
|
# to truncate the hostname to only 63 characters.
|
||||||
hostname = fixedip.instance.hostname
|
hostname = fixedip.instance.hostname
|
||||||
if len(hostname) > 63:
|
if len(hostname) > 63:
|
||||||
LOG.warning(_LW('hostname %s too long, truncating.') % (hostname))
|
LOG.warning(_LW('hostname %s too long, truncating.'), hostname)
|
||||||
hostname = fixedip.instance.hostname[:2] + '-' +\
|
hostname = fixedip.instance.hostname[:2] + '-' +\
|
||||||
fixedip.instance.hostname[-60:]
|
fixedip.instance.hostname[-60:]
|
||||||
if CONF.use_single_default_gateway:
|
if CONF.use_single_default_gateway:
|
||||||
|
@ -286,7 +286,7 @@ class API(base_api.NetworkAPI):
|
|||||||
raise exception.NoMoreFixedIps(net=network_id)
|
raise exception.NoMoreFixedIps(net=network_id)
|
||||||
except neutron_client_exc.MacAddressInUseClient:
|
except neutron_client_exc.MacAddressInUseClient:
|
||||||
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
|
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
|
||||||
'in use on network %(network)s.') %
|
'in use on network %(network)s.'),
|
||||||
{'mac': mac_address, 'network': network_id},
|
{'mac': mac_address, 'network': network_id},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
raise exception.PortInUse(port_id=mac_address)
|
raise exception.PortInUse(port_id=mac_address)
|
||||||
|
@ -145,15 +145,17 @@ def _update_service_ref(this_service, context):
|
|||||||
this_service.binary)
|
this_service.binary)
|
||||||
if not service:
|
if not service:
|
||||||
LOG.error(_LE('Unable to find a service record to update for '
|
LOG.error(_LE('Unable to find a service record to update for '
|
||||||
'%(binary)s on %(host)s') % {
|
'%(binary)s on %(host)s'),
|
||||||
'binary': this_service.binary,
|
{'binary': this_service.binary,
|
||||||
'host': this_service.host})
|
'host': this_service.host})
|
||||||
return
|
return
|
||||||
if service.version != service_obj.SERVICE_VERSION:
|
if service.version != service_obj.SERVICE_VERSION:
|
||||||
LOG.info(_LI('Updating service version for %(binary)s on '
|
LOG.info(_LI('Updating service version for %(binary)s on '
|
||||||
'%(host)s from %(old)i to %(new)i') % dict(
|
'%(host)s from %(old)i to %(new)i'),
|
||||||
binary=this_service.binary, host=this_service.host,
|
{'binary': this_service.binary,
|
||||||
old=service.version, new=service_obj.SERVICE_VERSION))
|
'host': this_service.host,
|
||||||
|
'old': service.version,
|
||||||
|
'new': service_obj.SERVICE_VERSION})
|
||||||
service.version = service_obj.SERVICE_VERSION
|
service.version = service_obj.SERVICE_VERSION
|
||||||
service.save()
|
service.save()
|
||||||
|
|
||||||
|
@ -82,7 +82,9 @@ class ContextTestCase(test.NoDBTestCase):
|
|||||||
def test_extra_args_to_context_get_logged(self):
|
def test_extra_args_to_context_get_logged(self):
|
||||||
info = {}
|
info = {}
|
||||||
|
|
||||||
def fake_warn(log_msg):
|
def fake_warn(log_msg, *args):
|
||||||
|
if args:
|
||||||
|
log_msg = log_msg % args
|
||||||
info['log_msg'] = log_msg
|
info['log_msg'] = log_msg
|
||||||
|
|
||||||
self.stub_out('nova.context.LOG.warning', fake_warn)
|
self.stub_out('nova.context.LOG.warning', fake_warn)
|
||||||
|
@ -392,7 +392,7 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||||||
if not self.iptables.ipv4['filter'].has_chain(chain_name):
|
if not self.iptables.ipv4['filter'].has_chain(chain_name):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI('instance chain %s disappeared during refresh, '
|
_LI('instance chain %s disappeared during refresh, '
|
||||||
'skipping') % chain_name,
|
'skipping'), chain_name,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
return
|
return
|
||||||
self.remove_filters_for_instance(instance)
|
self.remove_filters_for_instance(instance)
|
||||||
|
@ -157,7 +157,7 @@ class VMOps(object):
|
|||||||
instance_uuids.append(str(notes[0]))
|
instance_uuids.append(str(notes[0]))
|
||||||
else:
|
else:
|
||||||
LOG.debug("Notes not found or not resembling a GUID for "
|
LOG.debug("Notes not found or not resembling a GUID for "
|
||||||
"instance: %s" % instance_name)
|
"instance: %s", instance_name)
|
||||||
return instance_uuids
|
return instance_uuids
|
||||||
|
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
@ -235,7 +235,7 @@ class VMOps(object):
|
|||||||
flavor_size=new_size, image_size=old_size)
|
flavor_size=new_size, image_size=old_size)
|
||||||
elif new_size > old_size:
|
elif new_size > old_size:
|
||||||
LOG.debug("Resizing VHD %(vhd_path)s to new "
|
LOG.debug("Resizing VHD %(vhd_path)s to new "
|
||||||
"size %(new_size)s" %
|
"size %(new_size)s",
|
||||||
{'new_size': new_size,
|
{'new_size': new_size,
|
||||||
'vhd_path': vhd_path},
|
'vhd_path': vhd_path},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
@ -149,7 +149,7 @@ def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0):
|
|||||||
|
|
||||||
if fmt != "raw" and CONF.force_raw_images:
|
if fmt != "raw" and CONF.force_raw_images:
|
||||||
staged = "%s.converted" % path
|
staged = "%s.converted" % path
|
||||||
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
|
LOG.debug("%s was %s, converting to raw", image_href, fmt)
|
||||||
with fileutils.remove_path_on_error(staged):
|
with fileutils.remove_path_on_error(staged):
|
||||||
try:
|
try:
|
||||||
convert_image(path_tmp, staged, fmt, 'raw')
|
convert_image(path_tmp, staged, fmt, 'raw')
|
||||||
|
@ -2224,7 +2224,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
raise exception.NovaException(msg)
|
raise exception.NovaException(msg)
|
||||||
|
|
||||||
# libgfapi delete
|
# libgfapi delete
|
||||||
LOG.debug("XML: %s" % xml)
|
LOG.debug("XML: %s", xml)
|
||||||
|
|
||||||
LOG.debug("active disk object: %s", active_disk_object)
|
LOG.debug("active disk object: %s", active_disk_object)
|
||||||
|
|
||||||
|
@ -255,7 +255,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
|
|||||||
{'name': name, 'e': e})
|
{'name': name, 'e': e})
|
||||||
u = uuid.uuid4().hex
|
u = uuid.uuid4().hex
|
||||||
|
|
||||||
LOG.debug("UUID for filter '%s' is '%s'" % (name, u))
|
LOG.debug("UUID for filter '%s' is '%s'", name, u)
|
||||||
return u
|
return u
|
||||||
|
|
||||||
def _define_filter(self, xml):
|
def _define_filter(self, xml):
|
||||||
|
@ -123,7 +123,7 @@ class Guest(object):
|
|||||||
domain = host.write_instance_config(xml)
|
domain = host.write_instance_config(xml)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Error defining a domain with XML: %s') %
|
LOG.error(_LE('Error defining a domain with XML: %s'),
|
||||||
encodeutils.safe_decode(xml))
|
encodeutils.safe_decode(xml))
|
||||||
return cls(domain)
|
return cls(domain)
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ class Guest(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Error launching a defined domain '
|
LOG.error(_LE('Error launching a defined domain '
|
||||||
'with XML: %s') %
|
'with XML: %s'),
|
||||||
self._encoded_xml, errors='ignore')
|
self._encoded_xml, errors='ignore')
|
||||||
|
|
||||||
def poweroff(self):
|
def poweroff(self):
|
||||||
@ -166,7 +166,7 @@ class Guest(object):
|
|||||||
check_exit_code=[0, 1])
|
check_exit_code=[0, 1])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
|
LOG.error(_LE('Error enabling hairpin mode with XML: %s'),
|
||||||
self._encoded_xml, errors='ignore')
|
self._encoded_xml, errors='ignore')
|
||||||
|
|
||||||
def get_interfaces(self):
|
def get_interfaces(self):
|
||||||
|
@ -891,7 +891,7 @@ class Host(object):
|
|||||||
return secret
|
return secret
|
||||||
except libvirt.libvirtError:
|
except libvirt.libvirtError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
|
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
|
||||||
|
|
||||||
def delete_secret(self, usage_type, usage_id):
|
def delete_secret(self, usage_type, usage_id):
|
||||||
"""Delete a secret.
|
"""Delete a secret.
|
||||||
@ -942,7 +942,7 @@ class Host(object):
|
|||||||
dom_mem = int(guest._get_domain_info(self)[2])
|
dom_mem = int(guest._get_domain_info(self)[2])
|
||||||
except libvirt.libvirtError as e:
|
except libvirt.libvirtError as e:
|
||||||
LOG.warn(_LW("couldn't obtain the memory from domain:"
|
LOG.warn(_LW("couldn't obtain the memory from domain:"
|
||||||
" %(uuid)s, exception: %(ex)s") %
|
" %(uuid)s, exception: %(ex)s"),
|
||||||
{"uuid": guest.uuid, "ex": e})
|
{"uuid": guest.uuid, "ex": e})
|
||||||
continue
|
continue
|
||||||
# skip dom0
|
# skip dom0
|
||||||
|
@ -165,11 +165,11 @@ def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method):
|
|||||||
dom_id = session.VM.get_domid(vm_ref)
|
dom_id = session.VM.get_domid(vm_ref)
|
||||||
|
|
||||||
if dom_id and dom_id != -1 and dom_id != old_dom_id:
|
if dom_id and dom_id != -1 and dom_id != old_dom_id:
|
||||||
LOG.debug("Found new dom_id %s" % dom_id)
|
LOG.debug("Found new dom_id %s", dom_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
if time.time() > expiration:
|
if time.time() > expiration:
|
||||||
LOG.debug("Timed out waiting for new dom_id %s" % dom_id)
|
LOG.debug("Timed out waiting for new dom_id %s", dom_id)
|
||||||
raise exception.AgentTimeout(method=method)
|
raise exception.AgentTimeout(method=method)
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -259,7 +259,7 @@ class HostState(object):
|
|||||||
if (data['host_hostname'] !=
|
if (data['host_hostname'] !=
|
||||||
self._stats.get('host_hostname', data['host_hostname'])):
|
self._stats.get('host_hostname', data['host_hostname'])):
|
||||||
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. '
|
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. '
|
||||||
'A restart is required to take effect.') %
|
'A restart is required to take effect.'),
|
||||||
{'old': self._stats['host_hostname'],
|
{'old': self._stats['host_hostname'],
|
||||||
'new': data['host_hostname']})
|
'new': data['host_hostname']})
|
||||||
data['host_hostname'] = self._stats['host_hostname']
|
data['host_hostname'] = self._stats['host_hostname']
|
||||||
|
@ -469,8 +469,7 @@ def destroy_vdi(session, vdi_ref):
|
|||||||
try:
|
try:
|
||||||
session.call_xenapi('VDI.destroy', vdi_ref)
|
session.call_xenapi('VDI.destroy', vdi_ref)
|
||||||
except session.XenAPI.Failure:
|
except session.XenAPI.Failure:
|
||||||
msg = "Unable to destroy VDI %s" % vdi_ref
|
LOG.debug("Unable to destroy VDI %s", vdi_ref, exc_info=True)
|
||||||
LOG.debug(msg, exc_info=True)
|
|
||||||
msg = _("Unable to destroy VDI %s") % vdi_ref
|
msg = _("Unable to destroy VDI %s") % vdi_ref
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.StorageError(reason=msg)
|
raise exception.StorageError(reason=msg)
|
||||||
@ -482,8 +481,7 @@ def safe_destroy_vdis(session, vdi_refs):
|
|||||||
try:
|
try:
|
||||||
destroy_vdi(session, vdi_ref)
|
destroy_vdi(session, vdi_ref)
|
||||||
except exception.StorageError:
|
except exception.StorageError:
|
||||||
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
|
LOG.debug("Ignoring error while destroying VDI: %s", vdi_ref)
|
||||||
LOG.debug(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
|
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
|
||||||
@ -683,7 +681,7 @@ def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
|
|||||||
# ensure garbage collector has been run
|
# ensure garbage collector has been run
|
||||||
_scan_sr(session, sr_ref)
|
_scan_sr(session, sr_ref)
|
||||||
|
|
||||||
LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots,
|
LOG.info(_LI("Deleted %s snapshots."), number_of_snapshots,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
|
|
||||||
@ -2108,7 +2106,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
|
|||||||
'good_parent_uuids': good_parent_uuids},
|
'good_parent_uuids': good_parent_uuids},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid,
|
LOG.debug("Coalesce detected, because parent is: %s", parent_uuid,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1707,7 +1707,7 @@ class VMOps(object):
|
|||||||
|
|
||||||
if instances_info["instance_count"] > 0:
|
if instances_info["instance_count"] > 0:
|
||||||
LOG.info(_LI("Found %(instance_count)d hung reboots "
|
LOG.info(_LI("Found %(instance_count)d hung reboots "
|
||||||
"older than %(timeout)d seconds") % instances_info)
|
"older than %(timeout)d seconds"), instances_info)
|
||||||
|
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
|
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
|
||||||
|
@ -226,7 +226,7 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
|
|||||||
|
|
||||||
def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
|
def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
|
||||||
if vdi_uuid:
|
if vdi_uuid:
|
||||||
LOG.debug("vdi_uuid: %s" % vdi_uuid)
|
LOG.debug("vdi_uuid: %s", vdi_uuid)
|
||||||
return session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
|
return session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
|
||||||
elif target_lun:
|
elif target_lun:
|
||||||
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
|
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
|
||||||
|
Loading…
Reference in New Issue
Block a user