Cleaned up logging

* Removed all remaning log translation.
* Always lazy-load logging.

Logs should no longer be translated (starting with Pike)[1].

Also, according to OpenStack Guideline[2], logged string message should be
interpolated by the logger.

[1]: http://lists.openstack.org/pipermail/openstack-dev/2017-March/thread.html#113365
[2]: https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages

Change-Id: Ice91e70aec959b7433eb7df493ee589f9aeecf2c
This commit is contained in:
Erik Olof Gunnar Andersson 2018-04-02 19:37:43 -07:00
parent 66e524b5e1
commit 98d8cf30a3
14 changed files with 61 additions and 85 deletions

View File

@ -46,9 +46,8 @@ class VersionNegotiationFilter(wsgi.Middleware):
If there is a version identifier in the URI, simply return the correct If there is a version identifier in the URI, simply return the correct
API controller, otherwise, if we find an Accept: header, process it API controller, otherwise, if we find an Accept: header, process it
""" """
msg = ("Processing request: %(m)s %(p)s Accept: %(a)s" % LOG.debug("Processing request: %(m)s %(p)s Accept: %(a)s",
{'m': req.method, 'p': req.path, 'a': req.accept}) {'m': req.method, 'p': req.path, 'a': req.accept})
LOG.debug(msg)
# If the request is for /versions, just return the versions container # If the request is for /versions, just return the versions container
if req.path_info_peek() in ("versions", ""): if req.path_info_peek() in ("versions", ""):
@ -62,8 +61,8 @@ class VersionNegotiationFilter(wsgi.Middleware):
self._check_version_request(req, controller) self._check_version_request(req, controller)
major = req.environ['api.major'] major = req.environ['api.major']
minor = req.environ['api.minor'] minor = req.environ['api.minor']
LOG.debug("Matched versioned URI. Version: %(major)d.%(minor)d" LOG.debug("Matched versioned URI. Version: %(major)d.%(minor)d",
% {'major': major, 'minor': minor}) {'major': major, 'minor': minor})
# Strip the version from the path # Strip the version from the path
req.path_info_pop() req.path_info_pop()
path = req.path_info_peek() path = req.path_info_peek()

View File

@ -11,7 +11,6 @@
# under the License. # under the License.
from oslo_log import log as logging from oslo_log import log as logging
import six
from six.moves.urllib import parse as urlparse from six.moves.urllib import parse as urlparse
import webob import webob
@ -105,8 +104,7 @@ class WebhookMiddleware(wsgi.Middleware):
try: try:
token = driver_base.SenlinDriver().identity.get_token(**kwargs) token = driver_base.SenlinDriver().identity.get_token(**kwargs)
except Exception as ex: except Exception as ex:
LOG.exception(_('Webhook failed authentication: %s.'), LOG.exception('Webhook failed authentication: %s.', ex)
six.text_type(ex))
raise exc.Forbidden() raise exc.Forbidden()
return token return token

View File

@ -52,7 +52,7 @@ class SenlinException(Exception):
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception('Exception in string format operation') LOG.exception('Exception in string format operation')
for name, value in kwargs.items(): for name, value in kwargs.items():
LOG.error("%s: %s" % (name, value)) # noqa LOG.error("%s: %s", name, value) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS: if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise raise

View File

@ -122,10 +122,9 @@ class SchemaBase(collections.Mapping):
'key': key} 'key': key}
raise exc.ESchema(message=msg) raise exc.ESchema(message=msg)
else: else:
msg = _('Warning: %(key)s will be deprecated after version ' LOG.warning('Warning: %(key)s will be deprecated after '
'%(version)s!') % {'key': key, 'version %(version)s!',
'version': self.max_version} {'key': key, 'version': self.max_version})
LOG.warning(msg)
def __getitem__(self, key): def __getitem__(self, key):
if key == self.DESCRIPTION: if key == self.DESCRIPTION:

View File

@ -1209,8 +1209,7 @@ def action_acquire(context, action_id, owner, timestamp):
return None return None
if action.status != consts.ACTION_READY: if action.status != consts.ACTION_READY:
msg = 'The action is not executable: %s' % action.status LOG.warning('The action is not executable: %s', action.status)
LOG.warning(msg)
return None return None
action.owner = owner action.owner = owner
action.start_time = timestamp action.start_time = timestamp
@ -1331,8 +1330,7 @@ def action_delete_by_target(context, target, action=None,
action_excluded=None, status=None, action_excluded=None, status=None,
project_safe=True): project_safe=True):
if action and action_excluded: if action and action_excluded:
msg = "action and action_excluded cannot be both specified." LOG.warning("action and action_excluded cannot be both specified.")
LOG.warning(msg)
return None return None
with session_for_write() as session: with session_for_write() as session:

View File

@ -74,9 +74,7 @@ class LoadBalancerDriver(base.DriverBase):
try: try:
lb = self.oc().loadbalancer_get(lb_id, ignore_missing=True) lb = self.oc().loadbalancer_get(lb_id, ignore_missing=True)
except exception.InternalError as ex: except exception.InternalError as ex:
msg = ('Failed in getting loadbalancer: %s.' LOG.exception('Failed in getting loadbalancer: %s.', ex)
% six.text_type(ex))
LOG.exception(msg)
return False return False
if lb is None: if lb is None:
lb_ready = ignore_not_found lb_ready = ignore_not_found
@ -112,7 +110,7 @@ class LoadBalancerDriver(base.DriverBase):
try: try:
subnet = self.nc().subnet_get(vip['subnet']) subnet = self.nc().subnet_get(vip['subnet'])
except exception.InternalError as ex: except exception.InternalError as ex:
msg = 'Failed in getting subnet: %s.' % six.text_type(ex) msg = 'Failed in getting subnet: %s.' % ex
LOG.exception(msg) LOG.exception(msg)
return False, msg return False, msg
subnet_id = subnet.id subnet_id = subnet.id
@ -280,9 +278,8 @@ class LoadBalancerDriver(base.DriverBase):
net = self.nc().network_get(net_id) net = self.nc().network_get(net_id)
except exception.InternalError as ex: except exception.InternalError as ex:
resource = 'subnet' if subnet in ex.message else 'network' resource = 'subnet' if subnet in ex.message else 'network'
msg = ('Failed in getting %(resource)s: %(msg)s.' LOG.exception('Failed in getting %(resource)s: %(msg)s.',
% {'resource': resource, 'msg': six.text_type(ex)}) {'resource': resource, 'msg': ex})
LOG.exception(msg)
return None return None
net_name = net.name net_name = net.name
@ -291,8 +288,7 @@ class LoadBalancerDriver(base.DriverBase):
node_detail = node_obj.get_details(ctx) node_detail = node_obj.get_details(ctx)
addresses = node_detail.get('addresses') addresses = node_detail.get('addresses')
if net_name not in addresses: if net_name not in addresses:
msg = 'Node is not in subnet %(subnet)s' LOG.error('Node is not in subnet %(subnet)s', {'subnet': subnet})
LOG.error(msg, {'subnet': subnet})
return None return None
# Use the first IP address if more than one are found in target network # Use the first IP address if more than one are found in target network
@ -312,9 +308,7 @@ class LoadBalancerDriver(base.DriverBase):
member = self.oc().pool_member_create(pool_id, address, port, member = self.oc().pool_member_create(pool_id, address, port,
subnet_obj.id) subnet_obj.id)
except (exception.InternalError, exception.Error) as ex: except (exception.InternalError, exception.Error) as ex:
msg = ('Failed in creating lb pool member: %s.' LOG.exception('Failed in creating lb pool member: %s.', ex)
% six.text_type(ex))
LOG.exception(msg)
return None return None
res = self._wait_for_lb_ready(lb_id) res = self._wait_for_lb_ready(lb_id)
if res is False: if res is False:
@ -345,10 +339,8 @@ class LoadBalancerDriver(base.DriverBase):
raise exception.Error(msg) raise exception.Error(msg)
self.oc().pool_member_delete(pool_id, member_id) self.oc().pool_member_delete(pool_id, member_id)
except (exception.InternalError, exception.Error) as ex: except (exception.InternalError, exception.Error) as ex:
msg = ('Failed in removing member %(m)s from pool %(p)s: ' LOG.exception('Failed in removing member %(m)s from pool %(p)s: '
'%(ex)s' % {'m': member_id, 'p': pool_id, '%(ex)s', {'m': member_id, 'p': pool_id, 'ex': ex})
'ex': six.text_type(ex)})
LOG.exception(msg)
return None return None
res = self._wait_for_lb_ready(lb_id) res = self._wait_for_lb_ready(lb_id)
if res is False: if res is False:

View File

@ -195,8 +195,8 @@ class ClusterAction(base.Action):
def _update_nodes(self, profile_id, nodes_obj): def _update_nodes(self, profile_id, nodes_obj):
# Get batching policy data if any # Get batching policy data if any
fmt = "Updating cluster '%(cluster)s': profile='%(profile)s'." LOG.info("Updating cluster '%(cluster)s': profile='%(profile)s'.",
LOG.info(fmt, {'cluster': self.entity.id, 'profile': profile_id}) {'cluster': self.entity.id, 'profile': profile_id})
plan = [] plan = []
pd = self.data.get('update', None) pd = self.data.get('update', None)
@ -917,9 +917,9 @@ class ClusterAction(base.Action):
# desired is checked when strict is True # desired is checked when strict is True
curr_size = no.Node.count_by_cluster(self.context, self.target) curr_size = no.Node.count_by_cluster(self.context, self.target)
if count > curr_size: if count > curr_size:
msg = ("Triming count (%(count)s) to current " LOG.warning("Triming count (%(count)s) to current cluster size "
"cluster size (%(curr)s) for scaling in") "(%(curr)s) for scaling in",
LOG.warning(msg, {'count': count, 'curr': curr_size}) {'count': count, 'curr': curr_size})
count = curr_size count = curr_size
new_size = curr_size - count new_size = curr_size - count

View File

@ -18,7 +18,6 @@ from oslo_utils import uuidutils
from senlin.common import consts from senlin.common import consts
from senlin.common import context as senlin_context from senlin.common import context as senlin_context
from senlin.common import exception from senlin.common import exception
from senlin.common.i18n import _
from senlin.common import utils from senlin.common import utils
from senlin.drivers import base as driver_base from senlin.drivers import base as driver_base
from senlin.objects import credential as co from senlin.objects import credential as co
@ -218,9 +217,7 @@ class Receiver(object):
try: try:
base = kc.get_senlin_endpoint() base = kc.get_senlin_endpoint()
except exception.InternalError as ex: except exception.InternalError as ex:
msg = _('Senlin endpoint can not be found: %s.' LOG.warning('Senlin endpoint can not be found: %s.', ex)
) % ex
LOG.warning(msg)
return base return base

View File

@ -63,11 +63,11 @@ class Message(base.Receiver):
# is not provided in configuration file # is not provided in configuration file
base = self._get_base_url() base = self._get_base_url()
if not base: if not base:
msg = _('Receiver notification host is not specified in ' LOG.warning('Receiver notification host is not specified in '
'configuration file and Senlin service endpoint can ' 'configuration file and Senlin service '
'not be found, using local hostname (%(host)s) for ' 'endpoint can not be found, using local'
'subscriber url.') % {'host': host} ' hostname (%(host)s) for subscriber url.',
LOG.warning(msg) {'host': host})
host = socket.gethostname() host = socket.gethostname()
if not base: if not base:
@ -98,11 +98,13 @@ class Message(base.Receiver):
self.project, self.project,
roles) roles)
except exc.InternalError as ex: except exc.InternalError as ex:
msg = _('Can not build trust between user %(user)s and zaqar ' LOG.error('Can not build trust between user %(user)s and zaqar '
'service user %(zaqar)s for receiver %(receiver)s.' 'service user %(zaqar)s for receiver %(receiver)s.',
) % {'user': self.user, 'zaqar': zaqar_trustee_user_id, {
'receiver': self.id} 'user': self.user,
LOG.error(msg) 'zaqar': zaqar_trustee_user_id,
'receiver': self.id
})
raise exc.EResourceCreation(type='trust', raise exc.EResourceCreation(type='trust',
message=six.text_type(ex)) message=six.text_type(ex))
return trust.id return trust.id
@ -253,7 +255,7 @@ class Message(base.Receiver):
claim = self.zaqar().claim_create(queue_name) claim = self.zaqar().claim_create(queue_name)
messages = claim.messages messages = claim.messages
except exc.InternalError as ex: except exc.InternalError as ex:
LOG.error(_('Failed in claiming message: %s'), str(ex)) LOG.error('Failed in claiming message: %s', ex)
return return
# Build actions # Build actions
@ -264,21 +266,17 @@ class Message(base.Receiver):
action_id = self._build_action(context, message) action_id = self._build_action(context, message)
actions.append(action_id) actions.append(action_id)
except exc.InternalError as ex: except exc.InternalError as ex:
LOG.error(_('Failed in building action: %s'), LOG.error('Failed in building action: %s', ex)
ex)
try: try:
self.zaqar().message_delete(queue_name, message['id'], self.zaqar().message_delete(queue_name, message['id'],
claim.id) claim.id)
except exc.InternalError as ex: except exc.InternalError as ex:
LOG.error(_('Failed in deleting message %(id)s: %(reason)s' LOG.error('Failed in deleting message %(id)s: %(reason)s',
), {'id': message['id'], {'id': message['id'], 'reason': ex})
'reason': ex.message})
self.zaqar().claim_delete(queue_name, claim.id) self.zaqar().claim_delete(queue_name, claim.id)
LOG.info('Actions %(actions)s were successfully built.',
msg = _('Actions %(actions)s were successfully built.' {'actions': actions})
) % {'actions': actions}
LOG.info(msg)
dispatcher.start_action() dispatcher.start_action()

View File

@ -16,7 +16,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from six.moves.urllib import parse from six.moves.urllib import parse
from senlin.common.i18n import _
from senlin.engine.receivers import base from senlin.engine.receivers import base
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -38,11 +37,11 @@ class Webhook(base.Receiver):
base = self._get_base_url() base = self._get_base_url()
if not base: if not base:
host = socket.gethostname() host = socket.gethostname()
msg = _('Webhook host is not specified in configuration ' LOG.warning(
'file and Senlin service endpoint can not be found,' 'Webhook host is not specified in configuration '
'using local hostname (%(host)s) for webhook url.' 'file and Senlin service endpoint can not be found,'
) % {'host': host} 'using local hostname (%(host)s) for webhook url.',
LOG.warning(msg) {'host': host})
elif base.rfind("v1") == -1: elif base.rfind("v1") == -1:
base = "%s/v1" % base base = "%s/v1" % base

View File

@ -97,7 +97,7 @@ class Registry(object):
details) details)
else: else:
msg = 'Registering %(name)s -> %(value)s' msg = 'Registering %(name)s -> %(value)s'
LOG.info(msg, {'name': name, 'value': str(info.plugin)}) LOG.info(msg, {'name': name, 'value': info.plugin})
info.user_provided = not self.is_global info.user_provided = not self.is_global
registry[name] = info registry[name] = info

View File

@ -196,7 +196,7 @@ def reschedule(action_id, sleep_time=1):
:param sleep_time: seconds to sleep; if None, no sleep; :param sleep_time: seconds to sleep; if None, no sleep;
''' '''
if sleep_time is not None: if sleep_time is not None:
LOG.debug('Action %s sleep for %s seconds' % (action_id, sleep_time)) LOG.debug('Action %s sleep for %s seconds', action_id, sleep_time)
eventlet.sleep(sleep_time) eventlet.sleep(sleep_time)

View File

@ -1245,14 +1245,14 @@ class EngineService(service.Service):
if res: if res:
raise exception.BadRequest(msg=res) raise exception.BadRequest(msg=res)
fmt = _("Resizing cluster '%(cluster)s': type=%(adj_type)s, " LOG.info("Resizing cluster '%(cluster)s': type=%(adj_type)s, "
"number=%(number)s, min_size=%(min_size)s, " "number=%(number)s, min_size=%(min_size)s, "
"max_size=%(max_size)s, min_step=%(min_step)s, " "max_size=%(max_size)s, min_step=%(min_step)s, "
"strict=%(strict)s.") "strict=%(strict)s.",
LOG.info(fmt, {'cluster': req.identity, 'adj_type': adj_type, {'cluster': req.identity, 'adj_type': adj_type,
'number': number, 'min_size': min_size, 'number': number, 'min_size': min_size,
'max_size': max_size, 'min_step': min_step, 'max_size': max_size, 'min_step': min_step,
'strict': strict}) 'strict': strict})
params = { params = {
'name': 'cluster_resize_%s' % db_cluster.id[:8], 'name': 'cluster_resize_%s' % db_cluster.id[:8],

View File

@ -18,7 +18,6 @@ https://docs.openstack.org/senlin/latest/contributor/policies/affinity_v1.html
""" """
import re import re
import six
from oslo_log import log as logging from oslo_log import log as logging
from senlin.common import constraints from senlin.common import constraints
@ -158,8 +157,7 @@ class AffinityPolicy(base.Policy):
except exc.InternalError as ex: except exc.InternalError as ex:
msg = _("Failed in retrieving servergroup '%s'." msg = _("Failed in retrieving servergroup '%s'."
) % group_name ) % group_name
LOG.exception('%(msg)s: %(ex)s' % { LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex})
'msg': msg, 'ex': six.text_type(ex)})
return False, msg return False, msg
if server_group: if server_group:
@ -185,8 +183,7 @@ class AffinityPolicy(base.Policy):
policies=[group.get(self.GROUP_POLICIES)]) policies=[group.get(self.GROUP_POLICIES)])
except Exception as ex: except Exception as ex:
msg = _('Failed in creating servergroup.') msg = _('Failed in creating servergroup.')
LOG.exception('%(msg)s: %(ex)s' % { LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex})
'msg': msg, 'ex': six.text_type(ex)})
return False, msg return False, msg
data['servergroup_id'] = server_group.id data['servergroup_id'] = server_group.id
@ -225,8 +222,7 @@ class AffinityPolicy(base.Policy):
nc.server_group_delete(group_id) nc.server_group_delete(group_id)
except Exception as ex: except Exception as ex:
msg = _('Failed in deleting servergroup.') msg = _('Failed in deleting servergroup.')
LOG.exception('%(msg)s: %(ex)s' % { LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex})
'msg': msg, 'ex': six.text_type(ex)})
return False, msg return False, msg
return True, reason return True, reason