Another improvement of info level log messages
String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. So we should use- LOG.info(_LI('some message: variable=%s'), variable) instead of LOG.info(_LI('some message: variable=%s') % variable) Reference: http://docs.openstack.org/developer/oslo.i18n/guidelines.html Change-Id: I409358d9e35813f1875993b96fce86a0e2bc940b
This commit is contained in:
parent
bb9de39180
commit
c7eca3fbd8
@ -111,7 +111,7 @@ def rec2des(rec, zonename):
|
|||||||
rectypes.append(k)
|
rectypes.append(k)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Skipping unknown record type "
|
LOG.info(_LI("Skipping unknown record type "
|
||||||
"%(type)s in %(name)s") %
|
"%(type)s in %(name)s"),
|
||||||
{'type': k, 'name': name})
|
{'type': k, 'name': name})
|
||||||
|
|
||||||
desrecs = []
|
desrecs = []
|
||||||
@ -182,14 +182,14 @@ def syncipaservers2des(servers, designatereq, designateurl):
|
|||||||
# first - add servers from ipa not already in designate
|
# first - add servers from ipa not already in designate
|
||||||
for server in servers:
|
for server in servers:
|
||||||
if server in dservers:
|
if server in dservers:
|
||||||
LOG.info(_LI("Skipping ipa server %s already in designate")
|
LOG.info(_LI("Skipping ipa server %s already in designate"),
|
||||||
% server)
|
server)
|
||||||
else:
|
else:
|
||||||
desreq = {"name": server}
|
desreq = {"name": server}
|
||||||
resp = designatereq.post(srvurl, data=json.dumps(desreq))
|
resp = designatereq.post(srvurl, data=json.dumps(desreq))
|
||||||
LOG.debug("Response: %s" % pprint.pformat(resp.json()))
|
LOG.debug("Response: %s" % pprint.pformat(resp.json()))
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
LOG.info(_LI("Added server %s to designate") % server)
|
LOG.info(_LI("Added server %s to designate"), server)
|
||||||
else:
|
else:
|
||||||
raise AddServerError("Unable to add %s: %s" %
|
raise AddServerError("Unable to add %s: %s" %
|
||||||
(server, pprint.pformat(resp.json())))
|
(server, pprint.pformat(resp.json())))
|
||||||
@ -199,7 +199,7 @@ def syncipaservers2des(servers, designatereq, designateurl):
|
|||||||
if server not in servers:
|
if server not in servers:
|
||||||
delresp = designatereq.delete(srvurl + "/" + sid)
|
delresp = designatereq.delete(srvurl + "/" + sid)
|
||||||
if delresp.status_code == 200:
|
if delresp.status_code == 200:
|
||||||
LOG.info(_LI("Deleted server %s") % server)
|
LOG.info(_LI("Deleted server %s"), server)
|
||||||
else:
|
else:
|
||||||
raise DeleteServerError("Unable to delete %s: %s" %
|
raise DeleteServerError("Unable to delete %s: %s" %
|
||||||
(server,
|
(server,
|
||||||
@ -293,7 +293,7 @@ def main():
|
|||||||
exc = None
|
exc = None
|
||||||
fakezoneid = None
|
fakezoneid = None
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
LOG.info(_LI("Added domain %s") % domname)
|
LOG.info(_LI("Added domain %s"), domname)
|
||||||
fakezoneid = resp.json()['id']
|
fakezoneid = resp.json()['id']
|
||||||
delresp = designatereq.delete(domainurl + "/" + fakezoneid)
|
delresp = designatereq.delete(domainurl + "/" + fakezoneid)
|
||||||
if delresp.status_code != 200:
|
if delresp.status_code != 200:
|
||||||
@ -338,7 +338,7 @@ def main():
|
|||||||
desreq = zone2des(zonerec)
|
desreq = zone2des(zonerec)
|
||||||
resp = designatereq.post(domainurl, data=json.dumps(desreq))
|
resp = designatereq.post(domainurl, data=json.dumps(desreq))
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
LOG.info(_LI("Added domain %s") % desreq['name'])
|
LOG.info(_LI("Added domain %s"), desreq['name'])
|
||||||
else:
|
else:
|
||||||
raise AddDomainError("Unable to add domain %s: %s" %
|
raise AddDomainError("Unable to add domain %s: %s" %
|
||||||
(desreq['name'], pprint.pformat(resp.json())))
|
(desreq['name'], pprint.pformat(resp.json())))
|
||||||
@ -354,7 +354,7 @@ def main():
|
|||||||
resp = designatereq.post(recurl, data=json.dumps(desreq))
|
resp = designatereq.post(recurl, data=json.dumps(desreq))
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
LOG.info(_LI("Added record %(record)s "
|
LOG.info(_LI("Added record %(record)s "
|
||||||
"for domain %(domain)s") %
|
"for domain %(domain)s"),
|
||||||
{'record': desreq['name'], 'domain': zonename})
|
{'record': desreq['name'], 'domain': zonename})
|
||||||
else:
|
else:
|
||||||
raise AddRecordError("Could not add record %s: %s" %
|
raise AddRecordError("Could not add record %s: %s" %
|
||||||
|
@ -48,7 +48,7 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
domain = client.domains.get(cfg.CONF.domain_id)
|
domain = client.domains.get(cfg.CONF.domain_id)
|
||||||
|
|
||||||
msg = "Creating %s records" % cfg.CONF.records
|
msg = "Creating %s records", cfg.CONF.records
|
||||||
LOG.info(msg)
|
LOG.info(msg)
|
||||||
for i in range(0, cfg.CONF.records):
|
for i in range(0, cfg.CONF.records):
|
||||||
name = '%s.%s' % (str(uuid.uuid4()), domain.name)
|
name = '%s.%s' % (str(uuid.uuid4()), domain.name)
|
||||||
|
@ -1056,7 +1056,7 @@ class TempestConfigPrivate(object):
|
|||||||
cfg.CONF([], project='tempest', default_config_files=config_files)
|
cfg.CONF([], project='tempest', default_config_files=config_files)
|
||||||
logging.setup('tempest')
|
logging.setup('tempest')
|
||||||
LOG = logging.getLogger('tempest')
|
LOG = logging.getLogger('tempest')
|
||||||
# LOG.info("Using tempest config file %s" % path)
|
# LOG.info("Using tempest config file %s", path)
|
||||||
register_opts()
|
register_opts()
|
||||||
self._set_attrs()
|
self._set_attrs()
|
||||||
if parse_conf:
|
if parse_conf:
|
||||||
|
@ -171,7 +171,7 @@ class Extractor:
|
|||||||
allow_include=True)
|
allow_include=True)
|
||||||
except dns.zone.UnknownOrigin:
|
except dns.zone.UnknownOrigin:
|
||||||
LOG.info(_LI('%(zonefile)s is missing $ORIGIN, '
|
LOG.info(_LI('%(zonefile)s is missing $ORIGIN, '
|
||||||
'inserting %(name)s') %
|
'inserting %(name)s'),
|
||||||
{'zonefile': zonefile, 'name': name})
|
{'zonefile': zonefile, 'name': name})
|
||||||
zone_object = dns.zone.from_file(zonefile,
|
zone_object = dns.zone.from_file(zonefile,
|
||||||
allow_include=True,
|
allow_include=True,
|
||||||
|
@ -53,7 +53,7 @@ class RequestHandler(object):
|
|||||||
master = {'host': raw_server[0], 'port': int(raw_server[1])}
|
master = {'host': raw_server[0], 'port': int(raw_server[1])}
|
||||||
self.masters.append(master)
|
self.masters.append(master)
|
||||||
|
|
||||||
LOG.info(_LI("Agent masters: %(masters)s") %
|
LOG.info(_LI("Agent masters: %(masters)s"),
|
||||||
{'masters': self.masters})
|
{'masters': self.masters})
|
||||||
|
|
||||||
self.allow_notify = CONF['service:agent'].allow_notify
|
self.allow_notify = CONF['service:agent'].allow_notify
|
||||||
|
@ -62,7 +62,7 @@ def auth_pipeline_factory(loader, global_conf, **local_conf):
|
|||||||
"""
|
"""
|
||||||
pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
|
pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
|
||||||
pipeline = pipeline.split()
|
pipeline = pipeline.split()
|
||||||
LOG.info(_LI('Getting auth pipeline: %s') % pipeline[:-1])
|
LOG.info(_LI('Getting auth pipeline: %s'), pipeline[:-1])
|
||||||
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||||
app = loader.get_app(pipeline[-1])
|
app = loader.get_app(pipeline[-1])
|
||||||
filters.reverse()
|
filters.reverse()
|
||||||
|
@ -43,6 +43,6 @@ class Service(service.WSGIService, service.Service):
|
|||||||
msg = 'Unable to determine appropriate api-paste-config file'
|
msg = 'Unable to determine appropriate api-paste-config file'
|
||||||
raise exceptions.ConfigurationError(msg)
|
raise exceptions.ConfigurationError(msg)
|
||||||
|
|
||||||
LOG.info(_LI('Using api-paste-config found at: %s') % config_paths[0])
|
LOG.info(_LI('Using api-paste-config found at: %s'), config_paths[0])
|
||||||
|
|
||||||
return deploy.loadapp("config:%s" % config_paths[0], name='osapi_dns')
|
return deploy.loadapp("config:%s" % config_paths[0], name='osapi_dns')
|
||||||
|
@ -336,7 +336,7 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
|
|||||||
to = eventlet.Timeout(timeout)
|
to = eventlet.Timeout(timeout)
|
||||||
log_info = {'name': zone_name, 'host': srv}
|
log_info = {'name': zone_name, 'host': srv}
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Doing AXFR for %(name)s from %(host)s") % log_info)
|
LOG.info(_LI("Doing AXFR for %(name)s from %(host)s"), log_info)
|
||||||
|
|
||||||
xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
|
xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
|
||||||
timeout=1, port=srv['port'], source=source)
|
timeout=1, port=srv['port'], source=source)
|
||||||
|
@ -178,7 +178,7 @@ class DesignateAdapter(object):
|
|||||||
|
|
||||||
except exceptions.InvalidObject:
|
except exceptions.InvalidObject:
|
||||||
LOG.info(_LI("InvalidObject creating %(name)s with "
|
LOG.info(_LI("InvalidObject creating %(name)s with "
|
||||||
"values %(values)r") %
|
"values %(values)r"),
|
||||||
{"name": output_object.obj_name(), "values": values})
|
{"name": output_object.obj_name(), "values": values})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ def init(default_rule=None):
|
|||||||
msg = 'Unable to determine appropriate policy json file'
|
msg = 'Unable to determine appropriate policy json file'
|
||||||
raise exceptions.ConfigurationError(msg)
|
raise exceptions.ConfigurationError(msg)
|
||||||
|
|
||||||
LOG.info(_LI('Using policy_file found at: %s') % policy_files[0])
|
LOG.info(_LI('Using policy_file found at: %s'), policy_files[0])
|
||||||
|
|
||||||
with open(policy_files[0]) as fh:
|
with open(policy_files[0]) as fh:
|
||||||
policy_string = fh.read()
|
policy_string = fh.read()
|
||||||
@ -98,9 +98,9 @@ def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
|
|||||||
|
|
||||||
if result:
|
if result:
|
||||||
LOG.info(_("Policy check succeeded for rule '%(rule)s' "
|
LOG.info(_("Policy check succeeded for rule '%(rule)s' "
|
||||||
"on target %(target)s") %
|
"on target %(target)s"),
|
||||||
{'rule': rule, 'target': repr(target)}, extra=extra)
|
{'rule': rule, 'target': repr(target)}, extra=extra)
|
||||||
else:
|
else:
|
||||||
LOG.info(_("Policy check failed for rule '%(rule)s' "
|
LOG.info(_("Policy check failed for rule '%(rule)s' "
|
||||||
"on target %(target)s") %
|
"on target %(target)s"),
|
||||||
{'rule': rule, 'target': repr(target)}, extra=extra)
|
{'rule': rule, 'target': repr(target)}, extra=extra)
|
||||||
|
@ -62,7 +62,7 @@ class PoolManagerAPI(object):
|
|||||||
|
|
||||||
def create_zone(self, context, zone):
|
def create_zone(self, context, zone):
|
||||||
LOG.info(_LI("create_zone: Calling pool manager for %(zone)s, "
|
LOG.info(_LI("create_zone: Calling pool manager for %(zone)s, "
|
||||||
"serial:%(serial)s") %
|
"serial:%(serial)s"),
|
||||||
{'zone': zone.name, 'serial': zone.serial})
|
{'zone': zone.name, 'serial': zone.serial})
|
||||||
|
|
||||||
# Modifying the topic so it is pool manager instance specific.
|
# Modifying the topic so it is pool manager instance specific.
|
||||||
@ -73,7 +73,7 @@ class PoolManagerAPI(object):
|
|||||||
|
|
||||||
def delete_zone(self, context, zone):
|
def delete_zone(self, context, zone):
|
||||||
LOG.info(_LI("delete_zone: Calling pool manager for %(zone)s, "
|
LOG.info(_LI("delete_zone: Calling pool manager for %(zone)s, "
|
||||||
"serial:%(serial)s") %
|
"serial:%(serial)s"),
|
||||||
{'zone': zone.name, 'serial': zone.serial})
|
{'zone': zone.name, 'serial': zone.serial})
|
||||||
|
|
||||||
# Modifying the topic so it is pool manager instance specific.
|
# Modifying the topic so it is pool manager instance specific.
|
||||||
@ -84,7 +84,7 @@ class PoolManagerAPI(object):
|
|||||||
|
|
||||||
def update_zone(self, context, zone):
|
def update_zone(self, context, zone):
|
||||||
LOG.info(_LI("update_zone: Calling pool manager for %(zone)s, "
|
LOG.info(_LI("update_zone: Calling pool manager for %(zone)s, "
|
||||||
"serial:%(serial)s") %
|
"serial:%(serial)s"),
|
||||||
{'zone': zone.name, 'serial': zone.serial})
|
{'zone': zone.name, 'serial': zone.serial})
|
||||||
|
|
||||||
# Modifying the topic so it is pool manager instance specific.
|
# Modifying the topic so it is pool manager instance specific.
|
||||||
@ -97,7 +97,7 @@ class PoolManagerAPI(object):
|
|||||||
actual_serial):
|
actual_serial):
|
||||||
LOG.info(_LI("update_status: Calling pool manager for %(zone)s : "
|
LOG.info(_LI("update_status: Calling pool manager for %(zone)s : "
|
||||||
"%(action)s : %(status)s : %(serial)s on nameserver "
|
"%(action)s : %(status)s : %(serial)s on nameserver "
|
||||||
"'%(host)s:%(port)s'") %
|
"'%(host)s:%(port)s'"),
|
||||||
{'zone': zone.name, 'action': zone.action,
|
{'zone': zone.name, 'action': zone.action,
|
||||||
'status': status, 'serial': actual_serial,
|
'status': status, 'serial': actual_serial,
|
||||||
'host': nameserver.host, 'port': nameserver.port})
|
'host': nameserver.host, 'port': nameserver.port})
|
||||||
|
@ -120,8 +120,8 @@ class Service(service.RPCService, coordination.CoordinationMixin,
|
|||||||
topic = super(Service, self)._rpc_topic
|
topic = super(Service, self)._rpc_topic
|
||||||
|
|
||||||
topic = '%s.%s' % (topic, CONF['service:pool_manager'].pool_id)
|
topic = '%s.%s' % (topic, CONF['service:pool_manager'].pool_id)
|
||||||
LOG.info(_LI('Using topic %(topic)s for this pool manager instance.')
|
LOG.info(_LI('Using topic %(topic)s for this pool manager instance.'),
|
||||||
% {'topic': topic})
|
{'topic': topic})
|
||||||
|
|
||||||
return topic
|
return topic
|
||||||
|
|
||||||
@ -382,7 +382,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def _update_zone_on_also_notify(self, context, also_notify, zone):
|
def _update_zone_on_also_notify(self, context, also_notify, zone):
|
||||||
LOG.info(_LI('Updating zone %(zone)s on also_notify %(server)s.') %
|
LOG.info(_LI('Updating zone %(zone)s on also_notify %(server)s.'),
|
||||||
{'zone': zone.name,
|
{'zone': zone.name,
|
||||||
'server': self._get_destination(also_notify)})
|
'server': self._get_destination(also_notify)})
|
||||||
|
|
||||||
@ -502,7 +502,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
|
|||||||
if consensus_serial != 0 and cache_serial <= consensus_serial \
|
if consensus_serial != 0 and cache_serial <= consensus_serial \
|
||||||
and zone.status != 'ACTIVE':
|
and zone.status != 'ACTIVE':
|
||||||
LOG.info(_LI('For zone %(zone)s '
|
LOG.info(_LI('For zone %(zone)s '
|
||||||
'the consensus serial is %(consensus_serial)s.') %
|
'the consensus serial is %(consensus_serial)s.'),
|
||||||
{'zone': zone.name,
|
{'zone': zone.name,
|
||||||
'consensus_serial': consensus_serial})
|
'consensus_serial': consensus_serial})
|
||||||
self.central_api.update_status(
|
self.central_api.update_status(
|
||||||
|
@ -87,12 +87,12 @@ class Service(service.Service):
|
|||||||
def start(self):
|
def start(self):
|
||||||
super(Service, self).start()
|
super(Service, self).start()
|
||||||
|
|
||||||
LOG.info(_('Starting %(name)s service (version: %(version)s)') %
|
LOG.info(_('Starting %(name)s service (version: %(version)s)'),
|
||||||
{'name': self.service_name,
|
{'name': self.service_name,
|
||||||
'version': version.version_info.version_string()})
|
'version': version.version_info.version_string()})
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
LOG.info(_('Stopping %(name)s service') % {'name': self.service_name})
|
LOG.info(_('Stopping %(name)s service'), {'name': self.service_name})
|
||||||
|
|
||||||
super(Service, self).stop()
|
super(Service, self).stop()
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
|
|||||||
self.tg.add_timer(interval, task)
|
self.tg.add_timer(interval, task)
|
||||||
|
|
||||||
def _rebalance(self, my_partitions, members, event):
|
def _rebalance(self, my_partitions, members, event):
|
||||||
LOG.info(_LI("Received rebalance event %s") % event)
|
LOG.info(_LI("Received rebalance event %s"), event)
|
||||||
self.partition_range = my_partitions
|
self.partition_range = my_partitions
|
||||||
|
|
||||||
# Begin RPC Implementation
|
# Begin RPC Implementation
|
||||||
|
@ -126,7 +126,7 @@ class DeletedZonePurgeTask(PeriodicTask):
|
|||||||
"""
|
"""
|
||||||
pstart, pend = self._my_range()
|
pstart, pend = self._my_range()
|
||||||
msg = _LI("Performing deleted zone purging for %(start)s to %(end)s")
|
msg = _LI("Performing deleted zone purging for %(start)s to %(end)s")
|
||||||
LOG.info(msg % {"start": pstart, "end": pend})
|
LOG.info(msg, {"start": pstart, "end": pend})
|
||||||
|
|
||||||
delta = datetime.timedelta(seconds=self.options.time_threshold)
|
delta = datetime.timedelta(seconds=self.options.time_threshold)
|
||||||
time_threshold = timeutils.utcnow() - delta
|
time_threshold = timeutils.utcnow() - delta
|
||||||
@ -169,7 +169,7 @@ class PeriodicExistsTask(PeriodicTask):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
pstart, pend = self._my_range()
|
pstart, pend = self._my_range()
|
||||||
msg = _LI("Emitting zone exist events for %(start)s to %(end)s")
|
msg = _LI("Emitting zone exist events for %(start)s to %(end)s")
|
||||||
LOG.info(msg % {"start": pstart, "end": pend})
|
LOG.info(msg, {"start": pstart, "end": pend})
|
||||||
|
|
||||||
ctxt = context.DesignateContext.get_admin_context()
|
ctxt = context.DesignateContext.get_admin_context()
|
||||||
ctxt.all_tenants = True
|
ctxt.all_tenants = True
|
||||||
@ -202,7 +202,7 @@ class PeriodicSecondaryRefreshTask(PeriodicTask):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
pstart, pend = self._my_range()
|
pstart, pend = self._my_range()
|
||||||
msg = _LI("Refreshing zones between for %(start)s to %(end)s")
|
msg = _LI("Refreshing zones between for %(start)s to %(end)s")
|
||||||
LOG.info(msg % {"start": pstart, "end": pend})
|
LOG.info(msg, {"start": pstart, "end": pend})
|
||||||
|
|
||||||
ctxt = context.DesignateContext.get_admin_context()
|
ctxt = context.DesignateContext.get_admin_context()
|
||||||
ctxt.all_tenants = True
|
ctxt.all_tenants = True
|
||||||
|
Loading…
x
Reference in New Issue
Block a user