Remove remaning log translation in scheduler

* Removed log translation.
* Fixed minor misaligned log lines.

Change-Id: I04fb2726d5218d9f7762e843cfcd64d70c55adcb
This commit is contained in:
Erik Olof Gunnar Andersson 2018-05-09 18:39:51 -07:00
parent e34c8c3f70
commit 3508263f23
16 changed files with 99 additions and 115 deletions

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -54,8 +53,8 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
try:
prop = image_props.get(key)
except AttributeError:
LOG.warning(_LW("Host '%(host)s' has a metadata key '%(key)s' "
"that is not present in the image metadata."),
LOG.warning("Host '%(host)s' has a metadata key '%(key)s' "
"that is not present in the image metadata.",
{"host": host_state.host, "key": key})
continue

View File

@ -61,7 +61,8 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
key = scope[0]
aggregate_vals = metadata.get(key, None)
if not aggregate_vals:
LOG.debug("%(host_state)s fails instance_type extra_specs "
LOG.debug(
"%(host_state)s fails instance_type extra_specs "
"requirements. Extra_spec %(key)s is not in aggregate.",
{'host_state': host_state, 'key': key})
return False
@ -70,8 +71,8 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
break
else:
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements. '%(aggregate_vals)s' do not "
"match '%(req)s'",
"requirements. '%(aggregate_vals)s' do not "
"match '%(req)s'",
{'host_state': host_state, 'req': req,
'aggregate_vals': aggregate_vals})
return False

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova import servicegroup
@ -43,7 +42,7 @@ class ComputeFilter(filters.BaseHostFilter):
return False
else:
if not self.servicegroup_api.service_is_up(service):
LOG.warning(_LW("%(host_state)s has not been heard from in a "
"while"), {'host_state': host_state})
LOG.warning("%(host_state)s has not been heard from in a "
"while", {'host_state': host_state})
return False
return True

View File

@ -17,7 +17,6 @@
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -40,7 +39,7 @@ class BaseCoreFilter(filters.BaseHostFilter):
"""
if not host_state.vcpus_total:
# Fail safe
LOG.warning(_LW("VCPUs not set; assuming CPU collection broken"))
LOG.warning("VCPUs not set; assuming CPU collection broken")
return True
instance_vcpus = spec_obj.vcpus
@ -97,7 +96,7 @@ class AggregateCoreFilter(BaseCoreFilter):
ratio = utils.validate_num_values(
aggregate_vals, host_state.cpu_allocation_ratio, cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e)
LOG.warning("Could not decode cpu_allocation_ratio: '%s'", e)
ratio = host_state.cpu_allocation_ratio
return ratio

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -61,10 +60,10 @@ class DiskFilter(filters.BaseHostFilter):
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
@ -90,7 +89,7 @@ class AggregateDiskFilter(DiskFilter):
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
LOG.warning("Could not decode disk_allocation_ratio: '%s'", e)
ratio = host_state.disk_allocation_ratio
return ratio

View File

@ -61,8 +61,8 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
# advertised by the host.
if not supp_instances:
LOG.debug("Instance contains properties %(image_props)s, "
"but no corresponding supported_instances are "
"advertised by the compute node",
"but no corresponding supported_instances are "
"advertised by the compute node",
{'image_props': image_props})
return False
@ -87,9 +87,9 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
return True
LOG.debug("Instance contains properties %(image_props)s "
"that are not provided by the compute node "
"supported_instances %(supp_instances)s or "
"hypervisor version %(hypervisor_version)s do not match",
"that are not provided by the compute node "
"supported_instances %(supp_instances)s or "
"hypervisor version %(hypervisor_version)s do not match",
{'image_props': image_props,
'supp_instances': supp_instances,
'hypervisor_version': hypervisor_version})
@ -106,6 +106,6 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
if not self._instance_supported(host_state, image_props,
host_state.hypervisor_version):
LOG.debug("%(host_state)s does not support requested "
"instance_properties", {'host_state': host_state})
"instance_properties", {'host_state': host_state})
return False
return True

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -43,9 +42,9 @@ class IoOpsFilter(filters.BaseHostFilter):
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
@ -65,7 +64,7 @@ class AggregateIoOpsFilter(IoOpsFilter):
value = utils.validate_num_values(
aggregate_vals, max_io_ops_per_host, cast_to=int)
except ValueError as e:
LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
LOG.warning("Could not decode max_io_ops_per_host: '%s'", e)
value = max_io_ops_per_host
return value

View File

@ -47,7 +47,7 @@ class MetricsFilter(filters.BaseHostFilter):
if not self.keys.issubset(metrics_on_host):
unavail = metrics_on_host - self.keys
LOG.debug("%(host_state)s does not have the following "
"metrics: %(metrics)s",
"metrics: %(metrics)s",
{'host_state': host_state,
'metrics': ', '.join(unavail)})
return False

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -40,9 +39,9 @@ class NumInstancesFilter(filters.BaseHostFilter):
passes = num_instances < max_instances
if not passes:
LOG.debug("%(host_state)s fails num_instances check: Max "
"instances per host is set to %(max_instances)s",
{'host_state': host_state,
'max_instances': max_instances})
"instances per host is set to %(max_instances)s",
{'host_state': host_state,
'max_instances': max_instances})
return passes
@ -63,8 +62,7 @@ class AggregateNumInstancesFilter(NumInstancesFilter):
value = utils.validate_num_values(
aggregate_vals, max_instances_per_host, cast_to=int)
except ValueError as e:
LOG.warning(_LW("Could not decode max_instances_per_host: '%s'"),
e)
LOG.warning("Could not decode max_instances_per_host: '%s'", e)
value = max_instances_per_host
return value

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
@ -55,10 +54,10 @@ class BaseRamFilter(filters.BaseHostFilter):
usable_ram = memory_mb_limit - used_ram_mb
if not usable_ram >= requested_ram:
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
"usable ram, it only has %(usable_ram)s MB usable ram.",
{'host_state': host_state,
'requested_ram': requested_ram,
'usable_ram': usable_ram})
"usable ram, it only has %(usable_ram)s MB usable ram.",
{'host_state': host_state,
'requested_ram': requested_ram,
'usable_ram': usable_ram})
return False
# save oversubscription limit for compute node to test against:
@ -88,7 +87,7 @@ class AggregateRamFilter(BaseRamFilter):
ratio = utils.validate_num_values(
aggregate_vals, host_state.ram_allocation_ratio, cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode ram_allocation_ratio: '%s'"), e)
LOG.warning("Could not decode ram_allocation_ratio: '%s'", e)
ratio = host_state.ram_allocation_ratio
return ratio

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging
from nova.i18n import _LI
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
@ -46,8 +45,8 @@ class RetryFilter(filters.BaseHostFilter):
passes = host not in hosts
if not passes:
LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s"), {'host': host, 'hosts': hosts})
LOG.info("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s", {'host': host, 'hosts': hosts})
# Host passes if it's not in the list of previously attempted hosts:
return passes

View File

@ -19,8 +19,6 @@ import collections
from oslo_log import log as logging
import six
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
@ -61,12 +59,12 @@ def validate_num_values(vals, default=None, cast_to=int, based_on=min):
if num_values > 1:
if based_on == min:
LOG.info(_LI("%(num_values)d values found, "
"of which the minimum value will be used."),
LOG.info("%(num_values)d values found, "
"of which the minimum value will be used.",
{'num_values': num_values})
else:
LOG.info(_LI("%(num_values)d values found, "
"of which the maximum value will be used."),
LOG.info("%(num_values)d values found, "
"of which the maximum value will be used.",
{'num_values': num_values})
return based_on([cast_to(val) for val in vals])

View File

@ -34,7 +34,6 @@ import six
import nova.conf
from nova import context as context_module
from nova import exception
from nova.i18n import _LI, _LW
from nova import objects
from nova.pci import stats as pci_stats
from nova.scheduler import filters
@ -88,8 +87,8 @@ def set_update_time_on_success(function):
# Ignores exception raised from consume_from_request() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning(_LW("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s"),
LOG.warning("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s",
{'host': self.host, 'error': e})
else:
now = timeutils.utcnow()
@ -205,11 +204,11 @@ class HostState(object):
if least_gb is not None:
if least_gb > free_gb:
# can occur when an instance in database is not on host
LOG.warning(_LW("Host %(hostname)s has more disk space than "
"database expected "
"(%(physical)s GB > %(database)s GB)"),
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
LOG.warning(
"Host %(hostname)s has more disk space than database "
"expected (%(physical)s GB > %(database)s GB)",
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
free_gb = min(least_gb, free_gb)
free_disk_mb = free_gb * 1024
@ -500,7 +499,7 @@ class HostManager(object):
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
LOG.info(_LI('Host filter ignoring hosts: %s'), ignored_hosts_str)
LOG.info('Host filter ignoring hosts: %s', ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
@ -512,12 +511,12 @@ class HostManager(object):
forced_hosts.append(hostname)
if host_map:
forced_hosts_str = ', '.join(forced_hosts)
msg = _LI('Host filter forcing available hosts to %s')
LOG.info('Host filter forcing available hosts to %s',
forced_hosts_str)
else:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching "
"'force_hosts' value of '%s'")
LOG.info(msg, forced_hosts_str)
LOG.info("No hosts matched due to not matching "
"'force_hosts' value of '%s'", forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
@ -528,12 +527,12 @@ class HostManager(object):
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
msg = _LI('Host filter forcing available nodes to %s')
LOG.info('Host filter forcing available nodes to %s',
forced_nodes_str)
else:
forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.info(msg, forced_nodes_str)
LOG.info("No nodes matched due to not matching "
"'force_nodes' value of '%s'", forced_nodes_str)
def _get_hosts_matching_request(hosts, requested_destination):
(host, node) = (requested_destination.host,
@ -541,14 +540,14 @@ class HostManager(object):
requested_nodes = [x for x in hosts
if x.host == host and x.nodename == node]
if requested_nodes:
LOG.info(_LI('Host filter only checking host %(host)s and '
'node %(node)s'), {'host': host, 'node': node})
LOG.info('Host filter only checking host %(host)s and '
'node %(node)s', {'host': host, 'node': node})
else:
# NOTE(sbauza): The API level should prevent the user from
# providing a wrong destination but let's make sure a wrong
# destination doesn't trample the scheduler still.
LOG.info(_LI('No hosts matched due to not matching requested '
'destination (%(host)s, %(node)s)'),
LOG.info('No hosts matched due to not matching requested '
'destination (%(host)s, %(node)s)',
{'host': host, 'node': node})
return iter(requested_nodes)
@ -710,8 +709,8 @@ class HostManager(object):
service = services.get(compute.host)
if not service:
LOG.warning(_LW(
"No compute service record found for host %(host)s"),
LOG.warning(
"No compute service record found for host %(host)s",
{'host': compute.host})
continue
host = compute.host
@ -807,8 +806,8 @@ class HostManager(object):
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received an update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
LOG.info("Received an update from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def delete_instance_info(self, context, host_name, instance_uuid):
@ -825,8 +824,8 @@ class HostManager(object):
host_info["updated"] = True
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a delete update from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
LOG.info("Received a delete update from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)
@utils.synchronized(HOST_INSTANCE_SEMAPHORE)
def sync_instance_info(self, context, host_name, instance_uuids):
@ -843,13 +842,13 @@ class HostManager(object):
compute_set = set(instance_uuids)
if not local_set == compute_set:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("The instance sync for host '%s' did not match. "
"Re-created its InstanceList."), host_name)
LOG.info("The instance sync for host '%s' did not match. "
"Re-created its InstanceList.", host_name)
return
host_info["updated"] = True
LOG.debug("Successfully synced instances from host '%s'.",
host_name)
else:
self._recreate_instance_info(context, host_name)
LOG.info(_LI("Received a sync request from an unknown host '%s'. "
"Re-created its InstanceList."), host_name)
LOG.info("Received a sync request from an unknown host '%s'. "
"Re-created its InstanceList.", host_name)

View File

@ -29,7 +29,6 @@ from stevedore import driver
import nova.conf
from nova import exception
from nova.i18n import _LI
from nova import manager
from nova import objects
from nova.objects import host_mapping as host_mapping_obj
@ -71,7 +70,7 @@ class SchedulerManager(manager.Manager):
def _discover_hosts_in_cells(self, context):
host_mappings = host_mapping_obj.discover_hosts(context)
if host_mappings:
LOG.info(_LI('Discovered %(count)i new hosts: %(hosts)s'),
LOG.info('Discovered %(count)i new hosts: %(hosts)s',
{'count': len(host_mappings),
'hosts': ','.join(['%s:%s' % (hm.cell_mapping.name,
hm.host)

View File

@ -28,7 +28,7 @@ from nova.compute import flavors
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance as obj_instance
@ -565,12 +565,10 @@ def _log_compute_error(instance_uuid, retry):
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_LE('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
LOG.error(
'Error from last host: %(last_host)s (node %(last_node)s): %(exc)s',
{'last_host': last_host, 'last_node': last_node, 'exc': exc},
instance_uuid=instance_uuid)
def _add_retry_host(filter_properties, host, node):
@ -611,10 +609,9 @@ def parse_options(opts, sep='=', converter=str, name=""):
else:
bad.append(opt)
if bad:
LOG.warning(_LW("Ignoring the invalid elements of the option "
"%(name)s: %(options)s"),
{'name': name,
'options': ", ".join(bad)})
LOG.warning("Ignoring the invalid elements of the option "
"%(name)s: %(options)s",
{'name': name, 'options': ", ".join(bad)})
return good
@ -738,11 +735,11 @@ def retry_on_timeout(retries=1):
except messaging.MessagingTimeout:
attempt += 1
if attempt <= retries:
LOG.warning(_LW(
LOG.warning(
"Retrying %(name)s after a MessagingTimeout, "
"attempt %(attempt)s of %(retries)s."),
{'attempt': attempt, 'retries': retries,
'name': func.__name__})
"attempt %(attempt)s of %(retries)s.",
{'attempt': attempt, 'retries': retries,
'name': func.__name__})
else:
raise
return wrapped

View File

@ -25,7 +25,6 @@ by preferring the hosts that has less instances from the given group.
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import weights
CONF = cfg.CONF
@ -60,13 +59,13 @@ class ServerGroupSoftAffinityWeigher(_SoftAffinityWeigherBase):
def weight_multiplier(self):
if (CONF.filter_scheduler.soft_affinity_weight_multiplier < 0 and
not self.warning_sent):
LOG.warning(_LW('For the soft_affinity_weight_multiplier only a '
'positive value is meaningful as a negative value '
'would mean that the affinity weigher would '
'prefer non-collocating placement. Future '
'versions of nova will restrict the config '
'option to values >=0. Update your configuration '
'file to mitigate future upgrade issues.'))
LOG.warning('For the soft_affinity_weight_multiplier only a '
'positive value is meaningful as a negative value '
'would mean that the affinity weigher would '
'prefer non-collocating placement. Future '
'versions of nova will restrict the config '
'option to values >=0. Update your configuration '
'file to mitigate future upgrade issues.')
self.warning_sent = True
return CONF.filter_scheduler.soft_affinity_weight_multiplier
@ -79,14 +78,14 @@ class ServerGroupSoftAntiAffinityWeigher(_SoftAffinityWeigherBase):
def weight_multiplier(self):
if (CONF.filter_scheduler.soft_anti_affinity_weight_multiplier < 0 and
not self.warning_sent):
LOG.warning(_LW('For the soft_anti_affinity_weight_multiplier '
'only a positive value is meaningful as a '
'negative value would mean that the anti-affinity '
'weigher would prefer collocating placement. '
'Future versions of nova will restrict the '
'config option to values >=0. Update your '
'configuration file to mitigate future upgrade '
'issues.'))
LOG.warning('For the soft_anti_affinity_weight_multiplier '
'only a positive value is meaningful as a '
'negative value would mean that the anti-affinity '
'weigher would prefer collocating placement. '
'Future versions of nova will restrict the '
'config option to values >=0. Update your '
'configuration file to mitigate future upgrade '
'issues.')
self.warning_sent = True
return CONF.filter_scheduler.soft_anti_affinity_weight_multiplier