Don't translate debug level scheduler logs
Our translation policy (https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation) calls for not translating debug level logs. This is to help prioritize log translation. Furthermore translation has a performance overhead, even if the log isn't used (since nova doesn't support lazy translation yet). Remove unnecessary debug level log translation in the scheduler to comply with our translation policy. This has the added benefit of making the scheduler slightly faster too. Add a local hacking rule to enforce this. Change-Id: Ie1068d2d2c7c37f27c62d0e4e5a64a1a504af9f5
This commit is contained in:
@@ -193,6 +193,25 @@ def assert_equal_none(logical_line):
|
||||
"sentences not allowed")
|
||||
|
||||
|
||||
def no_translate_debug_logs(logical_line, filename):
|
||||
"""Check for 'LOG.debug(_('
|
||||
|
||||
As per our translation policy,
|
||||
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
|
||||
we shouldn't translate debug level logs.
|
||||
|
||||
* This check assumes that 'LOG' is a logger.
|
||||
* Use filename so we can start enforcing this in specific folders instead
|
||||
of needing to do so all at once.
|
||||
|
||||
N319
|
||||
"""
|
||||
dirs = ["nova/scheduler"]
|
||||
if max([name in filename for name in dirs]):
|
||||
if logical_line.startswith("LOG.debug(_("):
|
||||
yield(0, "N319 Don't translate debug level logs")
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(import_no_db_in_virt)
|
||||
register(no_db_session_in_public_api)
|
||||
@@ -205,3 +224,4 @@ def factory(register):
|
||||
register(assert_true_instance)
|
||||
register(assert_equal_type)
|
||||
register(assert_equal_none)
|
||||
register(no_translate_debug_logs)
|
||||
|
||||
@@ -80,7 +80,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
"uuids: %(instance_uuids)s"),
|
||||
{'num_instances': len(instance_uuids),
|
||||
'instance_uuids': instance_uuids})
|
||||
LOG.debug(_("Request Spec: %s") % request_spec)
|
||||
LOG.debug("Request Spec: %s" % request_spec)
|
||||
|
||||
weighed_hosts = self._schedule(context, request_spec,
|
||||
filter_properties, instance_uuids)
|
||||
@@ -329,12 +329,12 @@ class FilterScheduler(driver.Scheduler):
|
||||
# Can't get any more locally.
|
||||
break
|
||||
|
||||
LOG.debug(_("Filtered %(hosts)s"), {'hosts': hosts})
|
||||
LOG.debug("Filtered %(hosts)s", {'hosts': hosts})
|
||||
|
||||
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
|
||||
filter_properties)
|
||||
|
||||
LOG.debug(_("Weighed %(hosts)s"), {'hosts': weighed_hosts})
|
||||
LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts})
|
||||
|
||||
scheduler_host_subset_size = CONF.scheduler_host_subset_size
|
||||
if scheduler_host_subset_size > len(weighed_hosts):
|
||||
|
||||
@@ -19,7 +19,6 @@ import netaddr
|
||||
import six
|
||||
|
||||
from nova.compute import api as compute
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -114,8 +113,8 @@ class _GroupAntiAffinityFilter(AffinityFilter):
|
||||
return True
|
||||
|
||||
group_hosts = filter_properties.get('group_hosts') or []
|
||||
LOG.debug(_("Group anti affinity: check if %(host)s not "
|
||||
"in %(configured)s"), {'host': host_state.host,
|
||||
LOG.debug("Group anti affinity: check if %(host)s not "
|
||||
"in %(configured)s", {'host': host_state.host,
|
||||
'configured': group_hosts})
|
||||
if group_hosts:
|
||||
return not host_state.host in group_hosts
|
||||
@@ -150,8 +149,8 @@ class _GroupAffinityFilter(AffinityFilter):
|
||||
return True
|
||||
|
||||
group_hosts = filter_properties.get('group_hosts', [])
|
||||
LOG.debug(_("Group affinity: check if %(host)s in "
|
||||
"%(configured)s"), {'host': host_state.host,
|
||||
LOG.debug("Group affinity: check if %(host)s in "
|
||||
"%(configured)s", {'host': host_state.host,
|
||||
'configured': group_hosts})
|
||||
if group_hosts:
|
||||
return host_state.host in group_hosts
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova import db
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -58,9 +57,9 @@ class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
|
||||
continue
|
||||
prop = image_props.get(key)
|
||||
if prop and prop not in options:
|
||||
LOG.debug(_("%(host_state)s fails image aggregate properties "
|
||||
LOG.debug("%(host_state)s fails image aggregate properties "
|
||||
"requirements. Property %(prop)s does not "
|
||||
"match %(options)s."),
|
||||
"match %(options)s.",
|
||||
{'host_state': host_state,
|
||||
'prop': prop,
|
||||
'options': options})
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# under the License.
|
||||
|
||||
from nova import db
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import extra_specs_ops
|
||||
@@ -56,17 +55,17 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
|
||||
key = scope[0]
|
||||
aggregate_vals = metadata.get(key, None)
|
||||
if not aggregate_vals:
|
||||
LOG.debug(_("%(host_state)s fails instance_type extra_specs "
|
||||
"requirements. Extra_spec %(key)s is not in aggregate."),
|
||||
LOG.debug("%(host_state)s fails instance_type extra_specs "
|
||||
"requirements. Extra_spec %(key)s is not in aggregate.",
|
||||
{'host_state': host_state, 'key': key})
|
||||
return False
|
||||
for aggregate_val in aggregate_vals:
|
||||
if extra_specs_ops.match(aggregate_val, req):
|
||||
break
|
||||
else:
|
||||
LOG.debug(_("%(host_state)s fails instance_type extra_specs "
|
||||
LOG.debug("%(host_state)s fails instance_type extra_specs "
|
||||
"requirements. '%(aggregate_vals)s' do not "
|
||||
"match '%(req)s'"),
|
||||
"match '%(req)s'",
|
||||
{'host_state': host_state, 'req': req,
|
||||
'aggregate_vals': aggregate_vals})
|
||||
return False
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# under the License.
|
||||
|
||||
from nova import db
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -45,6 +44,6 @@ class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
|
||||
|
||||
if metadata != {}:
|
||||
if tenant_id not in metadata["filter_tenant_id"]:
|
||||
LOG.debug(_("%s fails tenant id on aggregate"), host_state)
|
||||
LOG.debug("%s fails tenant id on aggregate", host_state)
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler.filters import extra_specs_ops
|
||||
@@ -59,8 +58,8 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
|
||||
if cap is None:
|
||||
return False
|
||||
if not extra_specs_ops.match(str(cap), req):
|
||||
LOG.debug(_("extra_spec requirement '%(req)s' does not match "
|
||||
"'%(cap)s'"), {'req': req, 'cap': cap})
|
||||
LOG.debug("extra_spec requirement '%(req)s' does not match "
|
||||
"'%(cap)s'", {'req': req, 'cap': cap})
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -69,7 +68,7 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
|
||||
instance_type = filter_properties.get('instance_type')
|
||||
if not self._satisfies_extra_specs(host_state,
|
||||
instance_type):
|
||||
LOG.debug(_("%(host_state)s fails instance_type extra_specs "
|
||||
"requirements"), {'host_state': host_state})
|
||||
LOG.debug("%(host_state)s fails instance_type extra_specs "
|
||||
"requirements", {'host_state': host_state})
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -38,7 +38,7 @@ class ComputeFilter(filters.BaseHostFilter):
|
||||
"""Returns True for only active compute nodes."""
|
||||
service = host_state.service
|
||||
if service['disabled']:
|
||||
LOG.debug(_("%(host_state)s is disabled, reason: %(reason)s"),
|
||||
LOG.debug("%(host_state)s is disabled, reason: %(reason)s",
|
||||
{'host_state': host_state,
|
||||
'reason': service.get('disabled_reason')})
|
||||
return False
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -46,9 +45,9 @@ class DiskFilter(filters.BaseHostFilter):
|
||||
usable_disk_mb = disk_mb_limit - used_disk_mb
|
||||
|
||||
if not usable_disk_mb >= requested_disk:
|
||||
LOG.debug(_("%(host_state)s does not have %(requested_disk)s MB "
|
||||
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
|
||||
"usable disk, it only has %(usable_disk_mb)s MB usable "
|
||||
"disk."), {'host_state': host_state,
|
||||
"disk.", {'host_state': host_state,
|
||||
'requested_disk': requested_disk,
|
||||
'usable_disk_mb': usable_disk_mb})
|
||||
return False
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
from distutils import versionpredicate
|
||||
|
||||
from nova.compute import vm_mode
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova import utils
|
||||
@@ -55,9 +54,9 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
|
||||
# Not supported if an instance property is requested but nothing
|
||||
# advertised by the host.
|
||||
if not supp_instances:
|
||||
LOG.debug(_("Instance contains properties %(image_props)s, "
|
||||
LOG.debug("Instance contains properties %(image_props)s, "
|
||||
"but no corresponding supported_instances are "
|
||||
"advertised by the compute node"),
|
||||
"advertised by the compute node",
|
||||
{'image_props': image_props})
|
||||
return False
|
||||
|
||||
@@ -81,10 +80,10 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
|
||||
if _compare_product_version(hypervisor_version, image_props):
|
||||
return True
|
||||
|
||||
LOG.debug(_("Instance contains properties %(image_props)s "
|
||||
LOG.debug("Instance contains properties %(image_props)s "
|
||||
"that are not provided by the compute node "
|
||||
"supported_instances %(supp_instances)s or "
|
||||
"hypervisor version %(hypervisor_version)s do not match"),
|
||||
"hypervisor version %(hypervisor_version)s do not match",
|
||||
{'image_props': image_props,
|
||||
'supp_instances': supp_instances,
|
||||
'hypervisor_version': hypervisor_version})
|
||||
@@ -101,7 +100,7 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
|
||||
|
||||
if not self._instance_supported(host_state, image_props,
|
||||
host_state.hypervisor_version):
|
||||
LOG.debug(_("%(host_state)s does not support requested "
|
||||
"instance_properties"), {'host_state': host_state})
|
||||
LOG.debug("%(host_state)s does not support requested "
|
||||
"instance_properties", {'host_state': host_state})
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -40,8 +39,8 @@ class IoOpsFilter(filters.BaseHostFilter):
|
||||
max_io_ops = CONF.max_io_ops_per_host
|
||||
passes = num_io_ops < max_io_ops
|
||||
if not passes:
|
||||
LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host "
|
||||
"is set to %(max_io_ops)s"),
|
||||
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
|
||||
"is set to %(max_io_ops)s",
|
||||
{'host_state': host_state,
|
||||
'max_io_ops': max_io_ops})
|
||||
return passes
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova.scheduler import utils
|
||||
@@ -47,8 +46,8 @@ class MetricsFilter(filters.BaseHostFilter):
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
unavail = [i for i in self.keys if i not in host_state.metrics]
|
||||
if unavail:
|
||||
LOG.debug(_("%(host_state)s does not have the following "
|
||||
"metrics: %(metrics)s"),
|
||||
LOG.debug("%(host_state)s does not have the following "
|
||||
"metrics: %(metrics)s",
|
||||
{'host_state': host_state,
|
||||
'metrics': ', '.join(unavail)})
|
||||
return len(unavail) == 0
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -37,8 +36,8 @@ class NumInstancesFilter(filters.BaseHostFilter):
|
||||
max_instances = CONF.max_instances_per_host
|
||||
passes = num_instances < max_instances
|
||||
if not passes:
|
||||
LOG.debug(_("%(host_state)s fails num_instances check: Max "
|
||||
"instances per host is set to %(max_instances)s"),
|
||||
LOG.debug("%(host_state)s fails num_instances check: Max "
|
||||
"instances per host is set to %(max_instances)s",
|
||||
{'host_state': host_state,
|
||||
'max_instances': max_instances})
|
||||
return passes
|
||||
|
||||
@@ -53,8 +53,8 @@ class BaseRamFilter(filters.BaseHostFilter):
|
||||
used_ram_mb = total_usable_ram_mb - free_ram_mb
|
||||
usable_ram = memory_mb_limit - used_ram_mb
|
||||
if not usable_ram >= requested_ram:
|
||||
LOG.debug(_("%(host_state)s does not have %(requested_ram)s MB "
|
||||
"usable ram, it only has %(usable_ram)s MB usable ram."),
|
||||
LOG.debug("%(host_state)s does not have %(requested_ram)s MB "
|
||||
"usable ram, it only has %(usable_ram)s MB usable ram.",
|
||||
{'host_state': host_state,
|
||||
'requested_ram': requested_ram,
|
||||
'usable_ram': usable_ram})
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
|
||||
@@ -30,7 +29,7 @@ class RetryFilter(filters.BaseHostFilter):
|
||||
retry = filter_properties.get('retry', None)
|
||||
if not retry:
|
||||
# Re-scheduling is disabled
|
||||
LOG.debug(_("Re-scheduling is disabled"))
|
||||
LOG.debug("Re-scheduling is disabled")
|
||||
return True
|
||||
|
||||
hosts = retry.get('hosts', [])
|
||||
@@ -39,8 +38,8 @@ class RetryFilter(filters.BaseHostFilter):
|
||||
passes = host not in hosts
|
||||
|
||||
if not passes:
|
||||
LOG.debug(_("Host %(host)s fails. Previously tried hosts: "
|
||||
"%(hosts)s"), {'host': host, 'hosts': hosts})
|
||||
LOG.debug("Host %(host)s fails. Previously tried hosts: "
|
||||
"%(hosts)s", {'host': host, 'hosts': hosts})
|
||||
|
||||
# Host passes if it's not in the list of previously attempted hosts:
|
||||
return passes
|
||||
|
||||
@@ -80,3 +80,13 @@ class HackingTestCase(test.NoDBTestCase):
|
||||
|
||||
self.assertEqual(
|
||||
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
|
||||
|
||||
def test_no_translate_debug_logs(self):
|
||||
self.assertEqual(len(list(checks.no_translate_debug_logs(
|
||||
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
|
||||
|
||||
self.assertEqual(len(list(checks.no_translate_debug_logs(
|
||||
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
|
||||
|
||||
self.assertEqual(len(list(checks.no_translate_debug_logs(
|
||||
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
|
||||
|
||||
Reference in New Issue
Block a user