Update oslo log messages with translation domains

Update the incubator code to use different domains for log
messages at different levels.

Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.

bp log-messages-translation-domain

Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19
This commit is contained in:
Doug Hellmann 2014-01-08 19:35:05 +00:00
parent 50948eff45
commit fcf517d72c
34 changed files with 229 additions and 223 deletions

View File

@ -25,7 +25,7 @@ import logging
import time
from openstack.common.db import exception
from openstack.common.gettextutils import _ # noqa
from openstack.common.gettextutils import _LE
from openstack.common import importutils
@ -68,11 +68,11 @@ class wrap_db_retry(object):
return f(*args, **kwargs)
except exception.DBConnectionError as e:
if remaining == 0:
LOG.exception(_('DB exceeded retry limit.'))
LOG.exception(_LE('DB exceeded retry limit.'))
raise exception.DBError(e)
if remaining != -1:
remaining -= 1
LOG.exception(_('DB connection error.'))
LOG.exception(_LE('DB connection error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.

View File

@ -16,7 +16,7 @@ import os
from openstack.common.db.sqlalchemy import migration
from openstack.common.db.sqlalchemy.migration_cli import ext_base
from openstack.common.db.sqlalchemy import session as db_session
from openstack.common.gettextutils import _ # noqa
from openstack.common.gettextutils import _LE
LOG = logging.getLogger(__name__)
@ -58,9 +58,9 @@ class MigrateExtension(ext_base.MigrationExtensionBase):
init_version=self.init_version)
except ValueError:
LOG.error(
_('Migration number for migrate plugin must be valid '
'integer or empty, if you want to downgrade '
'to initial state')
_LE('Migration number for migrate plugin must be valid '
'integer or empty, if you want to downgrade '
'to initial state')
)
raise

View File

@ -291,7 +291,7 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from openstack.common.db import exception
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LW
from openstack.common import timeutils
@ -442,7 +442,7 @@ def _wrap_db_error(f):
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
@ -579,11 +579,11 @@ def create_engine(sql_connection, sqlite_fk=False,
sqlalchemy.event.listen(engine, 'checkout',
_set_mode_traditional)
else:
LOG.warning(_("This application has not enabled MySQL "
"traditional mode, which means silent "
"data corruption may occur. "
"Please encourage the application "
"developers to enable this mode."))
LOG.warning(_LW("This application has not enabled MySQL "
"traditional mode, which means silent "
"data corruption may occur. "
"Please encourage the application "
"developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
@ -603,7 +603,7 @@ def create_engine(sql_connection, sqlite_fk=False,
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1

View File

@ -26,7 +26,7 @@ import sqlalchemy
import sqlalchemy.exc
from openstack.common.db.sqlalchemy import utils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE
from openstack.common import test
LOG = logging.getLogger(__name__)
@ -60,10 +60,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("OSLO_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug(_('Got lock "%s"') % f.__name__)
LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug(_('Lock released "%s"') % f.__name__)
LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
@ -264,6 +264,6 @@ class WalkVersionsMixin(object):
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine))
raise

View File

@ -37,7 +37,7 @@ from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LI, _LW
from openstack.common import timeutils
@ -93,7 +93,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
@ -276,8 +276,8 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\

View File

@ -29,7 +29,7 @@ import eventlet.backdoor
import greenlet
from oslo.config import cfg
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LI
from openstack.common import log as logging
help_for_backdoor_port = (
@ -137,8 +137,10 @@ def initialize_if_enabled():
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -24,7 +24,7 @@ import traceback
import six
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE
class save_and_reraise_exception(object):
@ -59,7 +59,7 @@ class save_and_reraise_exception(object):
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
logging.error(_LE('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
@ -88,8 +88,8 @@ def forever_retry_uncaught_exceptions(infunc):
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
_LE('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0

View File

@ -19,7 +19,6 @@ import os
import tempfile
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -59,7 +58,7 @@ def read_cached_file(filename, force_reload=False):
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime

View File

@ -46,7 +46,7 @@ import functools
import stevedore
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -92,7 +92,7 @@ class HookManager(object):
else:
hook_method(*args, **kwargs)
except Exception:
LOG.exception(_('Error during %s-hook') % method_type)
LOG.exception(_LE('Error during %s-hook') % method_type)
def run_pre(self, name, args, kwargs, f=None):
"""Execute optional pre methods of loaded hooks.

View File

@ -28,7 +28,7 @@ import weakref
from oslo.config import cfg
from openstack.common import fileutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LI
from openstack.common import log as logging
@ -79,7 +79,7 @@ class _InterProcessLock(object):
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_('Created lock path: %s'), basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
@ -90,7 +90,7 @@ class _InterProcessLock(object):
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug(_('Got file lock "%s"'), self.fname)
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
@ -114,9 +114,9 @@ class _InterProcessLock(object):
try:
self.unlock()
self.lockfile.close()
LOG.debug(_('Released file lock "%s"'), self.fname)
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
@ -158,7 +158,7 @@ _semaphores_lock = threading.Lock()
def external_lock(name, lock_file_prefix=None):
with internal_lock(name):
LOG.debug(_('Attempting to grab external lock "%(lock)s"'),
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
# NOTE(mikal): the lock name cannot contain directory
@ -184,7 +184,7 @@ def internal_lock(name):
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@ -241,11 +241,11 @@ def synchronized(name, lock_file_prefix=None, external=False):
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap

View File

@ -20,7 +20,7 @@ import sys
from eventlet import event
from eventlet import greenthread
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE, _LW
from openstack.common import log as logging
from openstack.common import timeutils
@ -79,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
LOG.warn(_LW('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
@ -126,14 +126,14 @@ class DynamicLoopingCall(LoopingCallBase):
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
LOG.debug('Dynamic looping call sleeping for %.02f '
'seconds', idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:

View File

@ -22,7 +22,7 @@ to hide internal errors from API response.
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
@ -37,7 +37,7 @@ class CatchErrorsMiddleware(base.Middleware):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response

View File

@ -24,7 +24,7 @@ import six
import webob.dec
from openstack.common import context
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
from openstack.common.notifier import api
@ -37,8 +37,8 @@ def log_and_ignore_error(fn):
try:
return fn(*args, **kwargs)
except Exception as e:
LOG.exception(_('An exception occurred processing '
'the API call: %s ') % e)
LOG.exception(_LE('An exception occurred processing '
'the API call: %s ') % e)
return wrapped

View File

@ -19,7 +19,7 @@ import uuid
from oslo.config import cfg
from openstack.common import context
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import importutils
from openstack.common import jsonutils
from openstack.common import log as logging
@ -142,9 +142,9 @@ def notify(context, publisher_id, event_type, priority, payload):
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
LOG.exception(_LE("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
@ -161,8 +161,8 @@ def _get_drivers():
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
LOG.exception(_LE("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values()

View File

@ -16,7 +16,7 @@
from oslo.config import cfg
from openstack.common import context as req_context
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common import rpc
@ -42,6 +42,6 @@ def notify(context, message):
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -18,7 +18,7 @@
from oslo.config import cfg
from openstack.common import context as req_context
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common import rpc
@ -48,6 +48,6 @@ def notify(context, message):
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -16,7 +16,7 @@ import time
from oslo.config import cfg
import six
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LI
from openstack.common import log as logging
@ -122,13 +122,13 @@ class _PeriodicTasksMeta(type):
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_('Skipping periodic task %(task)s because '
'its interval is negative'),
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_('Skipping periodic task %(task)s because '
'it is disabled'),
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
@ -168,7 +168,7 @@ class PeriodicTasks(object):
idle_for = min(idle_for, due - now)
continue
LOG.debug(_("Running periodic task %(full_task_name)s"),
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = time.time()
@ -177,7 +177,7 @@ class PeriodicTasks(object):
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)

View File

@ -64,7 +64,7 @@ import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from openstack.common import fileutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import jsonutils
from openstack.common import log as logging
@ -209,7 +209,7 @@ class Enforcer(object):
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules)
LOG.debug(_("Rules successfully reloaded"))
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self):
"""Locate the policy json data file.
@ -255,7 +255,7 @@ class Enforcer(object):
# NOTE(flaper87): Not logging target or creds to avoid
# potential security issues.
LOG.debug(_("Rule %s will be now enforced") % rule)
LOG.debug("Rule %s will be now enforced" % rule)
self.load_rules()
@ -270,7 +270,7 @@ class Enforcer(object):
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug(_("Rule [%s] doesn't exist") % rule)
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
@ -478,7 +478,7 @@ def _parse_check(rule):
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %s") % rule)
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
@ -488,7 +488,7 @@ def _parse_check(rule):
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s") % kind)
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
@ -758,7 +758,7 @@ def _parse_text_rule(rule):
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %r") % rule)
LOG.exception(_LE("Failed to understand rule %r") % rule)
# Fail closed
return FalseCheck()

View File

@ -151,7 +151,8 @@ def execute(*cmd, **kwargs):
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
@ -184,7 +185,7 @@ def execute(*cmd, **kwargs):
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, _('Result was %s') % _returncode)
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
@ -196,7 +197,7 @@ def execute(*cmd, **kwargs):
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
@ -235,7 +236,7 @@ def trycmd(*args, **kwargs):
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
@ -256,7 +257,7 @@ def ssh_execute(ssh, cmd, process_input=None,
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,

View File

@ -21,7 +21,7 @@ import datetime
from oslo.config import cfg
import six
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import importutils
from openstack.common import log as logging
from openstack.common import timeutils
@ -72,9 +72,9 @@ class QuotaException(Exception):
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
LOG.error(_LE("%s: %s") % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(QuotaException, self).__init__(message)
@ -186,11 +186,12 @@ class DbQuotaDriver(object):
default_quotas = self.db.quota_class_get_default(context)
for resource in resources.values():
if resource.name not in default_quotas:
LOG.deprecated(_("Default quota for resource: %(res)s is set "
"by the default quota flag: quota_%(res)s, "
"it is now deprecated. Please use the "
"the default quota class for default "
"quota.") % {'res': resource.name})
LOG.deprecated(
"Default quota for resource: %(res)s is set "
"by the default quota flag: quota_%(res)s, "
"it is now deprecated. Please use the "
"the default quota class for default "
"quota." % {'res': resource.name})
quotas[resource.name] = default_quotas.get(resource.name,
resource.default)
@ -1065,7 +1066,7 @@ class QuotaEngine(object):
project_id=project_id,
user_id=user_id)
LOG.debug(_("Created reservations %s"), reservations)
LOG.debug("Created reservations %s", reservations)
return reservations
@ -1088,9 +1089,10 @@ class QuotaEngine(object):
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations %s"), reservations)
LOG.exception(_LE("Failed to commit reservations %s"),
reservations)
return
LOG.debug(_("Committed reservations %s"), reservations)
LOG.debug("Committed reservations %s", reservations)
def rollback(self, context, reservations, project_id=None, user_id=None):
"""Roll back reservations.
@ -1111,10 +1113,10 @@ class QuotaEngine(object):
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations %s"),
LOG.exception(_LE("Failed to roll back reservations %s"),
reservations)
return
LOG.debug(_("Rolled back reservations %s"), reservations)
LOG.debug("Rolled back reservations %s", reservations)
def usage_reset(self, context, resources):
"""Reset the usage records.

View File

@ -19,7 +19,7 @@ Utilities for linking request ID's across service calls.
import logging
from openstack.common.gettextutils import _ # noqa
from openstack.common.gettextutils import _, _LI
LOG = logging.getLogger(__name__)
@ -73,12 +73,14 @@ def link_request_ids(context, source_id, target_id=None, stage=None,
if target_name or target_id:
arrow = " -> "
LOG.info(_("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
"%(target_name)s%(target_id)s") % {"event_name": event_name,
"source_id": source_id,
"target_name": rtarget_name,
"arrow": arrow,
"target_id": rtarget_id})
LOG.info(_LI("Request ID Link: %(event_name)s "
"'%(source_id)s'%(arrow)s"
"%(target_name)s%(target_id)s") % {
"event_name": event_name,
"source_id": source_id,
"target_name": rtarget_name,
"arrow": arrow,
"target_id": rtarget_id})
if notifier:
payload = {"source_request_id": source_id,

View File

@ -37,7 +37,7 @@ import six
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import local
from openstack.common import log as logging
from openstack.common.rpc import common as rpc_common
@ -72,7 +72,7 @@ class Pool(pools.Pool):
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf)
def empty(self):
@ -287,7 +287,7 @@ def unpack_context(conf, msg):
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
return ctx
@ -339,7 +339,7 @@ def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
class _ThreadPoolWithWait(object):
@ -432,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
@ -469,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
LOG.debug('Expected exception during message handling (%s)' %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
@ -477,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
LOG.error(_LE('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
@ -551,10 +551,10 @@ _reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
LOG.debug('Making synchronous call on %s ...', topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
LOG.debug('MSG_ID is %s' % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
@ -580,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
LOG.debug('Making asynchronous cast on %s...', topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
@ -589,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
LOG.debug('Making asynchronous fanout cast...')
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
@ -617,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
LOG.debug('Sending %(event_type)s on %(topic)s',
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)

View File

@ -22,7 +22,7 @@ import traceback
from oslo.config import cfg
import six
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE
from openstack.common import importutils
from openstack.common import jsonutils
from openstack.common import local
@ -85,7 +85,7 @@ class RPCException(Exception):
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
@ -289,7 +289,7 @@ def serialize_remote_exception(failure_info, log_failure=True):
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)

View File

@ -29,7 +29,7 @@ from oslo.config import cfg
import six
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LI
from openstack.common import network_utils
from openstack.common.rpc import amqp as rpc_amqp
from openstack.common.rpc import common as rpc_common
@ -153,12 +153,12 @@ class ConsumerBase(object):
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_("Failed to process message"
" ... skipping it."))
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
LOG.exception(_LE("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
@ -492,7 +492,7 @@ class Connection(object):
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
LOG.info(_LI("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
@ -514,7 +514,7 @@ class Connection(object):
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
@ -565,9 +565,9 @@ class Connection(object):
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
@ -619,7 +619,7 @@ class Connection(object):
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
@ -637,11 +637,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
@ -680,7 +680,7 @@ class Connection(object):
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():

View File

@ -23,7 +23,7 @@ from oslo.config import cfg
import six
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LI
from openstack.common import importutils
from openstack.common import jsonutils
from openstack.common import log as logging
@ -188,7 +188,7 @@ class ConsumerBase(object):
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
LOG.exception(_LE("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
@ -512,13 +512,13 @@ class Connection(object):
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
msg = _LE("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
LOG.info(_LI('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
@ -531,7 +531,7 @@ class Connection(object):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, *args, **kwargs):
while True:
@ -570,7 +570,7 @@ class Connection(object):
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
@ -585,11 +585,11 @@ class Connection(object):
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
LOG.debug('Timed out waiting for RPC response: %s' %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc))
def _consume():
@ -597,7 +597,7 @@ class Connection(object):
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
LOG.exception(_LE("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
@ -624,7 +624,7 @@ class Connection(object):
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():

View File

@ -27,7 +27,7 @@ import six
from six import moves
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LE, _LI
from openstack.common import importutils
from openstack.common import jsonutils
from openstack.common.rpc import common as rpc_common
@ -93,12 +93,12 @@ def _serialize(data):
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
LOG.error(_LE("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
LOG.debug("Deserializing: %s", data)
return jsonutils.loads(data)
@ -133,9 +133,9 @@ class ZmqSocket(object):
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug("-> bind: %(bind)s", str_data)
try:
if bind:
@ -155,7 +155,7 @@ class ZmqSocket(object):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
LOG.debug("Subscribing to %s", msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
@ -192,7 +192,7 @@ class ZmqSocket(object):
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_("ZeroMQ socket could not be closed."))
LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
@ -264,7 +264,7 @@ class InternalContext(object):
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
@ -277,13 +277,13 @@ class InternalContext(object):
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
LOG.error(_LE("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
@ -302,7 +302,7 @@ class InternalContext(object):
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
@ -336,7 +336,7 @@ class ConsumerBase(object):
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
LOG.error(_LE("RPC message did not include method."))
return
# Internal method
@ -368,7 +368,7 @@ class ZmqBaseReactor(ConsumerBase):
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
@ -380,12 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
LOG.info(_LI("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
LOG.info(_LI("Consuming socket"))
while True:
self.consume(sock)
@ -435,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
LOG.info(_LI("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
@ -473,14 +473,14 @@ class ZmqProxy(ZmqBaseReactor):
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
LOG.error(_LE("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
@ -495,8 +495,8 @@ class ZmqProxy(ZmqBaseReactor):
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
LOG.error(_LE("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
@ -504,11 +504,11 @@ class ZmqProxy(ZmqBaseReactor):
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
@ -541,7 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
proxy = self.proxies[sock]
@ -560,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
@ -588,14 +588,14 @@ class Connection(rpc_common.Connection):
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
LOG.info(_LI("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
@ -647,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
LOG.debug("Creating payload")
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
@ -660,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
@ -673,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
LOG.debug("Received message: %s", msg)
LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
@ -719,10 +719,10 @@ def _multi_send(method, context, topic, msg, timeout=None,
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results
if not queues:

View File

@ -22,7 +22,7 @@ import contextlib
import eventlet
from oslo.config import cfg
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _, _LI
from openstack.common import log as logging
@ -213,7 +213,7 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):

View File

@ -22,7 +22,7 @@ import json
from oslo.config import cfg
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LW
from openstack.common import log as logging
from openstack.common.rpc import matchmaker as mm
@ -72,8 +72,8 @@ class RoundRobinRingExchange(RingExchange):
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
@ -90,8 +90,8 @@ class FanoutRingExchange(RingExchange):
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])

View File

@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common.gettextutils import _
from openstack.common import log as logging
from openstack.common import rpc
from openstack.common.rpc import dispatcher as rpc_dispatcher
@ -44,7 +43,7 @@ class Service(service.Service):
super(Service, self).start()
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],

View File

@ -53,14 +53,14 @@ class BaseFilterHandler(base_handler.BaseHandler):
def get_filtered_objects(self, filter_classes, objs,
filter_properties):
list_objs = list(objs)
LOG.debug(_("Starting with %d host(s)"), len(list_objs))
LOG.debug("Starting with %d host(s)", len(list_objs))
for filter_cls in filter_classes:
cls_name = filter_cls.__name__
filter_class = filter_cls()
objs = filter_class.filter_all(list_objs, filter_properties)
if objs is None:
LOG.debug(_("Filter %(cls_name)s says to stop filtering"),
LOG.debug("Filter %(cls_name)s says to stop filtering",
{'cls_name': cls_name})
return
list_objs = list(objs)

View File

@ -15,7 +15,6 @@
import six
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.scheduler import filters
from openstack.common.scheduler.filters import extra_specs_ops
@ -51,8 +50,9 @@ class CapabilitiesFilter(filters.BaseHostFilter):
if cap is None:
return False
if not extra_specs_ops.match(cap, req):
LOG.debug(_("extra_spec requirement '%(req)s' does not match "
"'%(cap)s'"), {'req': req, 'cap': cap})
LOG.debug("extra_spec requirement '%(req)s' "
"does not match '%(cap)s'",
{'req': req, 'cap': cap})
return False
return True
@ -64,7 +64,7 @@ class CapabilitiesFilter(filters.BaseHostFilter):
resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities,
resource_type):
LOG.debug(_("%(host_state)s fails resource_type extra_specs "
"requirements"), {'host_state': host_state})
LOG.debug("%(host_state)s fails resource_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.scheduler import filters
@ -40,7 +39,7 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
attempted = filter_properties.get('retry', None)
if not attempted:
# Re-scheduling is disabled
LOG.debug(_("Re-scheduling is disabled."))
LOG.debug("Re-scheduling is disabled.")
return True
hosts = attempted.get('hosts', [])
@ -49,8 +48,8 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
passes = host not in hosts
pass_msg = "passes" if passes else "fails"
LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s") % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s" % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
return passes

View File

@ -38,7 +38,7 @@ from eventlet import event
from oslo.config import cfg
from openstack.common import eventlet_backdoor
from openstack.common.gettextutils import _
from openstack.common.gettextutils import _LE, _LI, _LW
from openstack.common import importutils
from openstack.common import log as logging
from openstack.common import threadgroup
@ -163,7 +163,7 @@ class ServiceLauncher(Launcher):
status = None
signo = 0
LOG.debug(_('Full set of CONF:'))
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
@ -172,7 +172,7 @@ class ServiceLauncher(Launcher):
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
@ -184,7 +184,7 @@ class ServiceLauncher(Launcher):
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_('Exception during rpc cleanup.'))
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
@ -235,7 +235,7 @@ class ProcessLauncher(object):
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
@ -266,13 +266,13 @@ class ProcessLauncher(object):
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_('Caught %s, exiting'), signame)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
@ -305,7 +305,7 @@ class ProcessLauncher(object):
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
@ -324,7 +324,7 @@ class ProcessLauncher(object):
os._exit(status)
LOG.info(_('Started child %d'), pid)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
@ -334,7 +334,7 @@ class ProcessLauncher(object):
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@ -351,15 +351,15 @@ class ProcessLauncher(object):
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
@ -381,7 +381,7 @@ class ProcessLauncher(object):
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:'))
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
while True:
@ -389,7 +389,7 @@ class ProcessLauncher(object):
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_('Caught %s, stopping children'), signame)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
@ -407,7 +407,7 @@ class ProcessLauncher(object):
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()

View File

@ -59,4 +59,6 @@ commands = flake8
[hacking]
import_exceptions = openstack.common.gettextutils._
import_exceptions =
openstack.common.gettextutils