Handle log message interpolation by the logger
According to OpenStack Guideline[1], logged string message should be interpolated by the logger. [1]: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-log-messages Change-Id: Id829e12411a1a2c578db2cd87fc51e5310fb868c Partial-Bug: #1661262
This commit is contained in:
parent
e56a095df3
commit
c3d06fc4c0
|
@ -84,7 +84,7 @@ class JSONResponseSerializer(object):
|
||||||
return six.text_type(obj)
|
return six.text_type(obj)
|
||||||
|
|
||||||
response = jsonutils.dumps(data, default=sanitizer, sort_keys=True)
|
response = jsonutils.dumps(data, default=sanitizer, sort_keys=True)
|
||||||
LOG.debug("JSON response : %s" % response)
|
LOG.debug("JSON response : %s", response)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def default(self, response, result):
|
def default(self, response, result):
|
||||||
|
|
|
@ -208,7 +208,7 @@ class Server(object):
|
||||||
self.pool.spawn_n(self._single_run, self.application, self.sock)
|
self.pool.spawn_n(self._single_run, self.application, self.sock)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Starting %d workers") % self.conf.workers)
|
LOG.info(_LI("Starting %d workers"), self.conf.workers)
|
||||||
signal.signal(signal.SIGTERM, self.kill_children)
|
signal.signal(signal.SIGTERM, self.kill_children)
|
||||||
signal.signal(signal.SIGINT, self.kill_children)
|
signal.signal(signal.SIGINT, self.kill_children)
|
||||||
signal.signal(signal.SIGHUP, self.hup)
|
signal.signal(signal.SIGHUP, self.hup)
|
||||||
|
@ -627,7 +627,7 @@ class Resource(object):
|
||||||
action_result = self.dispatch(self.controller, action,
|
action_result = self.dispatch(self.controller, action,
|
||||||
request, **action_args)
|
request, **action_args)
|
||||||
except TypeError as err:
|
except TypeError as err:
|
||||||
LOG.error(_LE('Exception handling resource: %s') % err)
|
LOG.error(_LE('Exception handling resource: %s'), err)
|
||||||
msg = _('The server could not comply with the request since '
|
msg = _('The server could not comply with the request since '
|
||||||
'it is either malformed or otherwise incorrect.')
|
'it is either malformed or otherwise incorrect.')
|
||||||
err = exc.HTTPBadRequest(msg)
|
err = exc.HTTPBadRequest(msg)
|
||||||
|
|
|
@ -293,7 +293,7 @@ class LoadBalancerDriver(base.DriverBase):
|
||||||
return None
|
return None
|
||||||
res = self._wait_for_lb_ready(lb_id)
|
res = self._wait_for_lb_ready(lb_id)
|
||||||
if res is False:
|
if res is False:
|
||||||
LOG.error(_LE('Failed in creating pool member (%s).') % member.id)
|
LOG.error(_LE('Failed in creating pool member (%s).'), member.id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return member.id
|
return member.id
|
||||||
|
@ -327,7 +327,7 @@ class LoadBalancerDriver(base.DriverBase):
|
||||||
return None
|
return None
|
||||||
res = self._wait_for_lb_ready(lb_id)
|
res = self._wait_for_lb_ready(lb_id)
|
||||||
if res is False:
|
if res is False:
|
||||||
LOG.error(_LE('Failed in deleting pool member (%s).') % member_id)
|
LOG.error(_LE('Failed in deleting pool member (%s).'), member_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -98,14 +98,14 @@ class NovaNotificationEndpoint(object):
|
||||||
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
meta = payload.get('metadata', {})
|
meta = payload.get('metadata', {})
|
||||||
if meta.get('cluster_id') == self.cluster_id:
|
if meta.get('cluster_id') == self.cluster_id:
|
||||||
LOG.warning("publisher=%s" % publisher_id)
|
LOG.warning("publisher=%s", publisher_id)
|
||||||
LOG.warning("event_type=%s" % event_type)
|
LOG.warning("event_type=%s", event_type)
|
||||||
|
|
||||||
def debug(self, ctxt, publisher_id, event_type, payload, metadata):
|
def debug(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
meta = payload.get('metadata', {})
|
meta = payload.get('metadata', {})
|
||||||
if meta.get('cluster_id') == self.cluster_id:
|
if meta.get('cluster_id') == self.cluster_id:
|
||||||
LOG.debug("publisher=%s" % publisher_id)
|
LOG.debug("publisher=%s", publisher_id)
|
||||||
LOG.debug("event_type=%s" % event_type)
|
LOG.debug("event_type=%s", event_type)
|
||||||
|
|
||||||
|
|
||||||
class HeatNotificationEndpoint(object):
|
class HeatNotificationEndpoint(object):
|
||||||
|
|
|
@ -251,7 +251,7 @@ class Message(base.Receiver):
|
||||||
claim = self.zaqar().claim_create(queue_name)
|
claim = self.zaqar().claim_create(queue_name)
|
||||||
messages = claim.messages
|
messages = claim.messages
|
||||||
except exc.InternalError as ex:
|
except exc.InternalError as ex:
|
||||||
LOG.error(_('Failed in claiming message: %s') % str(ex))
|
LOG.error(_('Failed in claiming message: %s'), str(ex))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Build actions
|
# Build actions
|
||||||
|
@ -262,8 +262,7 @@ class Message(base.Receiver):
|
||||||
action_id = self._build_action(context, message)
|
action_id = self._build_action(context, message)
|
||||||
actions.append(action_id)
|
actions.append(action_id)
|
||||||
except exc.InternalError as ex:
|
except exc.InternalError as ex:
|
||||||
LOG.error(_('Failed in building action: %s'
|
LOG.error(_('Failed in building action: %s'), ex.message)
|
||||||
) % ex.message)
|
|
||||||
try:
|
try:
|
||||||
self.zaqar().message_delete(queue_name, message['id'],
|
self.zaqar().message_delete(queue_name, message['id'],
|
||||||
claim.id)
|
claim.id)
|
||||||
|
|
|
@ -466,6 +466,5 @@ class Profile(object):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
msg = ", ".join(non_updatables)
|
msg = ", ".join(non_updatables)
|
||||||
LOG.error(_LE("The following properties are not updatable: %s."
|
LOG.error(_LE("The following properties are not updatable: %s."), msg)
|
||||||
) % msg)
|
|
||||||
return False
|
return False
|
||||||
|
|
Loading…
Reference in New Issue