Change log statements to meet I18n guidelines

Change-Id: Id76bcb71369302a559d2c2c5bd6046dff05a488e
Implements: blueprint standardize-logging
This commit is contained in:
Jordan Cazamias 2014-06-10 09:36:45 -05:00
parent d9b15425ed
commit a0d9b4cac6
21 changed files with 187 additions and 147 deletions

View File

@ -23,6 +23,9 @@ import json
import copy
from oslo.config import cfg
from designate.backend import impl_ipa
from designate.openstack.common.gettextutils import _LI
from designate.openstack.common.gettextutils import _LW
from designate.openstack.common.gettextutils import _LE
from designate import utils
logging.basicConfig()
@ -104,7 +107,7 @@ def rec2des(rec, zonename):
if k in iparectype2designate:
rectypes.append(k)
else:
LOG.info("Skipping unknown record type %s in %s" %
LOG.info(_LI("Skipping unknown record type %s in %s") %
k, name)
desrecs = []
@ -170,18 +173,19 @@ def syncipaservers2des(servers, designatereq, designateurl):
for srec in resp.json()['servers']:
dservers[srec['name']] = srec['id']
else:
LOG.warn("No servers in designate")
LOG.warn(_LW("No servers in designate"))
# first - add servers from ipa not already in designate
for server in servers:
if server in dservers:
LOG.info("Skipping ipa server %s already in designate" % server)
LOG.info(_LI("Skipping ipa server %s already in designate")
% server)
else:
desreq = {"name": server}
resp = designatereq.post(srvurl, data=json.dumps(desreq))
LOG.debug("Response: %s" % pprint.pformat(resp.json()))
if resp.status_code == 200:
LOG.info("Added server %s to designate" % server)
LOG.info(_LI("Added server %s to designate") % server)
else:
raise AddServerError("Unable to add %s: %s" %
(server, pprint.pformat(resp.json())))
@ -191,7 +195,7 @@ def syncipaservers2des(servers, designatereq, designateurl):
if server not in servers:
delresp = designatereq.delete(srvurl + "/" + sid)
if delresp.status_code == 200:
LOG.info("Deleted server %s" % server)
LOG.info(_LI("Deleted server %s") % server)
else:
raise DeleteServerError("Unable to delete %s: %s" %
(server,
@ -285,11 +289,11 @@ def main():
exc = None
fakezoneid = None
if resp.status_code == 200:
LOG.info("Added domain %s" % domname)
LOG.info(_LI("Added domain %s") % domname)
fakezoneid = resp.json()['id']
delresp = designatereq.delete(domainurl + "/" + fakezoneid)
if delresp.status_code != 200:
LOG.error("Unable to delete %s: %s" %
LOG.error(_LE("Unable to delete %s: %s") %
(domname, pprint.pformat(delresp.json())))
else:
exc = CannotUseIPABackend(cuiberrorstr)
@ -300,7 +304,7 @@ def main():
iparesp = ipabackend._call_and_handle_error(ipareq)
LOG.debug("Response: %s" % pprint.pformat(iparesp))
if iparesp['error']:
LOG.error(pprint.pformat(iparesp))
LOG.error(_LE(pprint.pformat(iparesp)))
if exc:
raise exc
@ -329,7 +333,7 @@ def main():
desreq = zone2des(zonerec)
resp = designatereq.post(domainurl, data=json.dumps(desreq))
if resp.status_code == 200:
LOG.info("Added domain %s" % desreq['name'])
LOG.info(_LI("Added domain %s") % desreq['name'])
else:
raise AddDomainError("Unable to add domain %s: %s" %
(desreq['name'], pprint.pformat(resp.json())))
@ -344,7 +348,7 @@ def main():
for desreq in desreqs:
resp = designatereq.post(recurl, data=json.dumps(desreq))
if resp.status_code == 200:
LOG.info("Added record %s for domain %s" %
LOG.info(_LI("Added record %s for domain %s") %
(desreq['name'], zonename))
else:
raise AddRecordError("Could not add record %s: %s" %

View File

@ -20,6 +20,8 @@ import os
import dns.zone
import argparse
import logging
from designate.openstack.common.gettextutils import _LI
from designate.openstack.common.gettextutils import _LE
logging.basicConfig()
@ -166,13 +168,13 @@ class Extractor:
zone_object = dns.zone.from_file(zonefile,
allow_include=True)
except dns.zone.UnknownOrigin:
LOG.info('%s is missing $ORIGIN, inserting %s' %
LOG.info(_LI('%s is missing $ORIGIN, inserting %s') %
(zonefile, name))
zone_object = dns.zone.from_file(zonefile,
allow_include=True,
origin=name)
except dns.zone.NoSOA:
LOG.error('%s has no SOA' % zonefile)
LOG.error(_LE('%s has no SOA') % zonefile)
zones.append(Zone(zone_object))
return zones

View File

@ -27,6 +27,9 @@ from designate.openstack.common import jsonutils as json
from designate.openstack.common import local
from designate.openstack.common import log as logging
from designate.openstack.common import strutils
from designate.openstack.common.gettextutils import _LI
from designate.openstack.common.gettextutils import _LE
from designate.openstack.common.gettextutils import _LC
LOG = logging.getLogger(__name__)
@ -42,7 +45,7 @@ class MaintenanceMiddleware(wsgi.Middleware):
def __init__(self, application):
super(MaintenanceMiddleware, self).__init__(application)
LOG.info('Starting designate maintenance middleware')
LOG.info(_LI('Starting designate maintenance middleware'))
self.enabled = cfg.CONF['service:api'].maintenance_mode
self.role = cfg.CONF['service:api'].maintenance_mode_role
@ -71,7 +74,7 @@ def auth_pipeline_factory(loader, global_conf, **local_conf):
"""
pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
pipeline = pipeline.split()
LOG.info('Getting auth pipeline: %s' % pipeline[:-1])
LOG.info(_LI('Getting auth pipeline: %s') % pipeline[:-1])
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
@ -97,7 +100,7 @@ class KeystoneContextMiddleware(ContextMiddleware):
def __init__(self, application):
super(KeystoneContextMiddleware, self).__init__(application)
LOG.info('Starting designate keystonecontext middleware')
LOG.info(_LI('Starting designate keystonecontext middleware'))
def process_request(self, request):
headers = request.headers
@ -134,7 +137,7 @@ class NoAuthContextMiddleware(ContextMiddleware):
def __init__(self, application):
super(NoAuthContextMiddleware, self).__init__(application)
LOG.info('Starting designate noauthcontext middleware')
LOG.info(_LI('Starting designate noauthcontext middleware'))
def process_request(self, request):
headers = request.headers
@ -157,8 +160,8 @@ class TestContextMiddleware(ContextMiddleware):
def __init__(self, application, tenant_id=None, user_id=None):
super(TestContextMiddleware, self).__init__(application)
LOG.critical('Starting designate testcontext middleware')
LOG.critical('**** DO NOT USE IN PRODUCTION ****')
LOG.critical(_LC('Starting designate testcontext middleware'))
LOG.critical(_LC('**** DO NOT USE IN PRODUCTION ****'))
self.default_tenant_id = tenant_id
self.default_user_id = user_id
@ -185,7 +188,7 @@ class FaultWrapperMiddleware(wsgi.Middleware):
def __init__(self, application):
super(FaultWrapperMiddleware, self).__init__(application)
LOG.info('Starting designate faultwrapper middleware')
LOG.info(_LI('Starting designate faultwrapper middleware'))
@webob.dec.wsgify
def __call__(self, request):
@ -239,6 +242,7 @@ class FaultWrapperMiddleware(wsgi.Middleware):
if 'type' not in response:
response['type'] = 'unknown'
# Return the new response
if 'context' in request.environ:
response['request_id'] = request.environ['context'].request_id
@ -247,8 +251,7 @@ class FaultWrapperMiddleware(wsgi.Middleware):
else:
#TODO(ekarlso): Remove after verifying that there's actually a
# context always set
LOG.error('Missing context in request, please check.')
LOG.error(_LE('Missing context in request, please check.'))
# Return the new response
return flask.Response(status=status, headers=headers,
response=json.dumps(response))

View File

@ -15,6 +15,7 @@
# under the License.
from paste import deploy
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate.openstack.deprecated import wsgi
from oslo.config import cfg
from designate import exceptions
@ -35,7 +36,7 @@ class Service(wsgi.Service):
msg = 'Unable to determine appropriate api-paste-config file'
raise exceptions.ConfigurationError(msg)
LOG.info('Using api-paste-config found at: %s' % config_paths[0])
LOG.info(_LI('Using api-paste-config found at: %s') % config_paths[0])
policy.init()

View File

@ -15,6 +15,7 @@
# under the License.
import abc
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LW
from designate import exceptions
from designate.context import DesignateContext
from designate.plugin import DriverPlugin
@ -114,8 +115,8 @@ class Backend(DriverPlugin):
except exceptions.DomainNotFound as e:
# NOTE(Kiall): This means a domain was missing from the backend.
# Good thing we're doing a sync!
LOG.warn("Failed to delete domain '%s' during sync. Message: %s",
domain['id'], str(e))
LOG.warn(_LW("Failed to delete domain '%s' during sync. "
"Message: %s"), domain['id'], str(e))
# Next, re-create the domain in the backend.
self.create_domain(context, domain)
@ -139,8 +140,9 @@ class Backend(DriverPlugin):
except exceptions.RecordNotFound as e:
# NOTE(Kiall): This means a record was missing from the backend.
# Good thing we're doing a sync!
LOG.warn("Failed to delete record '%s' in domain '%s' during sync."
" Message: %s", record['id'], domain['id'], str(e))
LOG.warn(_LW("Failed to delete record '%s' in domain '%s' "
"during sync. Message: %s"), record['id'],
domain['id'], str(e))
# Finally, re-create the record in the backend.
self.create_record(context, domain, recordset, record)

View File

@ -16,6 +16,7 @@
import os
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LW
from designate import utils
from designate.backend import base
import glob
@ -57,8 +58,8 @@ class Bind9Backend(base.Backend):
except utils.processutils.ProcessExecutionError as proc_exec_err:
stderr = proc_exec_err.stderr
if stderr.count("rndc: 'reload' failed: not found") is not 0:
LOG.warn("Domain %s (%s) missing from backend, recreating",
domain['name'], domain['id'])
LOG.warn(_LW("Domain %s (%s) missing from backend, "
"recreating"), domain['name'], domain['id'])
self._sync_domain(domain, new_domain_flag=True)
else:
raise proc_exec_err

View File

@ -24,6 +24,8 @@ from requests.adapters import HTTPAdapter
from designate import exceptions
from designate.backend import base
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate.openstack.common.gettextutils import _LW
LOG = logging.getLogger(__name__)
@ -325,7 +327,7 @@ class DynECTBackend(base.Backend):
timings=cfg.CONF[GROUP].timings)
def create_domain(self, context, domain):
LOG.info('Creating domain %s / %s', domain['id'], domain['name'])
LOG.info(_LI('Creating domain %s / %s'), domain['id'], domain['name'])
url = '/Secondary/%s' % domain['name'].rstrip('.')
data = {
@ -347,15 +349,15 @@ class DynECTBackend(base.Backend):
LOG.debug('Discarding update_domain call, not-applicable')
def delete_domain(self, context, domain):
LOG.info('Deleting domain %s / %s', domain['id'], domain['name'])
LOG.info(_LI('Deleting domain %s / %s'), domain['id'], domain['name'])
url = '/Zone/%s' % domain['name'].rstrip('.')
client = self.get_client()
try:
client.delete(url)
except DynClientError as e:
if e.http_status == 404:
msg = "Attempt to delete %s / %s caused 404, ignoring."
LOG.warn(msg, domain['id'], domain['name'])
LOG.warn(_LW("Attempt to delete %s / %s caused 404, "
"ignoring."), domain['id'], domain['name'])
pass
else:
raise

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate.backend import base
LOG = logging.getLogger(__name__)
@ -26,55 +27,58 @@ class FakeBackend(base.Backend):
super(FakeBackend, self).__init__(*args, **kwargs)
def create_tsigkey(self, context, tsigkey):
LOG.info('Create TSIG Key %r' % tsigkey)
LOG.info(_LI('Create TSIG Key %r') % tsigkey)
def update_tsigkey(self, context, tsigkey):
LOG.info('Update TSIG Key %r' % tsigkey)
LOG.info(_LI('Update TSIG Key %r') % tsigkey)
def delete_tsigkey(self, context, tsigkey):
LOG.info('Delete TSIG Key %r' % tsigkey)
LOG.info(_LI('Delete TSIG Key %r') % tsigkey)
def create_server(self, context, server):
LOG.info('Create Server %r' % server)
LOG.info(_LI('Create Server %r') % server)
def update_server(self, context, server):
LOG.info('Update Server %r' % server)
LOG.info(_LI('Update Server %r') % server)
def delete_server(self, context, server):
LOG.info('Delete Server %r' % server)
LOG.info(_LI('Delete Server %r') % server)
def create_domain(self, context, domain):
LOG.info('Create Domain %r' % domain)
LOG.info(_LI('Create Domain %r') % domain)
def update_domain(self, context, domain):
LOG.info('Update Domain %r' % domain)
LOG.info(_LI('Update Domain %r') % domain)
def delete_domain(self, context, domain):
LOG.info('Delete Domain %r' % domain)
LOG.info(_LI('Delete Domain %r') % domain)
def create_recordset(self, context, domain, recordset):
LOG.info('Create RecordSet %r / %r' % (domain, recordset))
LOG.info(_LI('Create RecordSet %r / %r') % (domain, recordset))
def update_recordset(self, context, domain, recordset):
LOG.info('Update RecordSet %r / %r' % (domain, recordset))
LOG.info(_LI('Update RecordSet %r / %r') % (domain, recordset))
def delete_recordset(self, context, domain, recordset):
LOG.info('Delete RecordSet %r / %r' % (domain, recordset))
LOG.info(_LI('Delete RecordSet %r / %r') % (domain, recordset))
def create_record(self, context, domain, recordset, record):
LOG.info('Create Record %r / %r / %r' % (domain, recordset, record))
LOG.info(_LI('Create Record %r / %r / %r') %
(domain, recordset, record))
def update_record(self, context, domain, recordset, record):
LOG.info('Update Record %r / %r / %r' % (domain, recordset, record))
LOG.info(_LI('Update Record %r / %r / %r') %
(domain, recordset, record))
def delete_record(self, context, domain, recordset, record):
LOG.info('Delete Record %r / %r / %r' % (domain, recordset, record))
LOG.info(_LI('Delete Record %r / %r / %r') %
(domain, recordset, record))
def sync_domain(self, context, domain, records):
LOG.info('Sync Domain %r / %r' % (domain, records))
LOG.info(_LI('Sync Domain %r / %r') % (domain, records))
def sync_record(self, context, domain, record):
LOG.info('Sync Record %r / %r' % (domain, record))
LOG.info(_LI('Sync Record %r / %r') % (domain, record))
def ping(self, context):
LOG.info('Ping')
LOG.info(_LI('Ping'))

View File

@ -20,6 +20,7 @@ from designate.openstack.common import log as logging
from designate.openstack.common import importutils
from designate.backend import base
from designate.openstack.common import jsonutils as json
from designate.openstack.common.gettextutils import _LE
import pprint
import time
@ -400,8 +401,8 @@ class IPABackend(base.Backend):
if status_code == 401:
if self.ntries == 0:
# persistent inability to auth
LOG.error("Error: could not authenticate to IPA - "
"please check for correct keytab file")
LOG.error(_LE("Error: could not authenticate to IPA - "
"please check for correct keytab file"))
# reset for next time
self.ntries = cfg.CONF[self.name].ipa_connect_retries
raise IPACommunicationFailure()

View File

@ -19,6 +19,8 @@ import uuid
from requests import auth
import kerberos
from designate.backend.impl_ipa import IPAAuthError
from designate.openstack.common.gettextutils import _LW
from designate.openstack.common.gettextutils import _LE
LOG = logging.getLogger(__name__)
@ -33,7 +35,7 @@ class IPAAuth(auth.AuthBase):
if self.keytab:
os.environ['KRB5_CLIENT_KTNAME'] = self.keytab
else:
LOG.warn('No IPA client kerberos keytab file given')
LOG.warn(_LW('No IPA client kerberos keytab file given'))
def __call__(self, request):
if not self.token:
@ -47,11 +49,11 @@ class IPAAuth(auth.AuthBase):
try:
(_, vc) = kerberos.authGSSClientInit(service, flags)
except kerberos.GSSError as e:
LOG.error("caught kerberos exception %r" % e)
LOG.error(_LE("caught kerberos exception %r") % e)
raise IPAAuthError(str(e))
try:
kerberos.authGSSClientStep(vc, "")
except kerberos.GSSError as e:
LOG.error("caught kerberos exception %r" % e)
LOG.error(_LE("caught kerberos exception %r") % e)
raise IPAAuthError(str(e))
self.token = kerberos.authGSSClientResponse(vc)

View File

@ -17,6 +17,7 @@ from oslo.config import cfg
from oslo import messaging
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate import rpc
@ -51,197 +52,199 @@ class CentralAPI(object):
# Misc Methods
def get_absolute_limits(self, context):
LOG.info("get_absolute_limits: Calling central's get_absolute_limits.")
LOG.info(_LI("get_absolute_limits: "
"Calling central's get_absolute_limits."))
return self.client.call(context, 'get_absolute_limits')
# Quota Methods
def get_quotas(self, context, tenant_id):
LOG.info("get_quotas: Calling central's get_quotas.")
LOG.info(_LI("get_quotas: Calling central's get_quotas."))
return self.client.call(context, 'get_quotas', tenant_id=tenant_id)
def get_quota(self, context, tenant_id, resource):
LOG.info("get_quota: Calling central's get_quota.")
LOG.info(_LI("get_quota: Calling central's get_quota."))
return self.client.call(context, 'get_quota', tenant_id=tenant_id,
resource=resource)
def set_quota(self, context, tenant_id, resource, hard_limit):
LOG.info("set_quota: Calling central's set_quota.")
LOG.info(_LI("set_quota: Calling central's set_quota."))
return self.client.call(context, 'set_quota', tenant_id=tenant_id,
resource=resource, hard_limit=hard_limit)
def reset_quotas(self, context, tenant_id):
LOG.info("reset_quotas: Calling central's reset_quotas.")
LOG.info(_LI("reset_quotas: Calling central's reset_quotas."))
return self.client.call(context, 'reset_quotas', tenant_id=tenant_id)
# Server Methods
def create_server(self, context, server):
LOG.info("create_server: Calling central's create_server.")
LOG.info(_LI("create_server: Calling central's create_server."))
return self.client.call(context, 'create_server', server=server)
def find_servers(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_servers: Calling central's find_servers.")
LOG.info(_LI("find_servers: Calling central's find_servers."))
return self.client.call(context, 'find_servers', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_server(self, context, server_id):
LOG.info("get_server: Calling central's get_server.")
LOG.info(_LI("get_server: Calling central's get_server."))
return self.client.call(context, 'get_server', server_id=server_id)
def update_server(self, context, server_id, values):
LOG.info("update_server: Calling central's update_server.")
LOG.info(_LI("update_server: Calling central's update_server."))
return self.client.call(context, 'update_server', server_id=server_id,
values=values)
def delete_server(self, context, server_id):
LOG.info("delete_server: Calling central's delete_server.")
LOG.info(_LI("delete_server: Calling central's delete_server."))
return self.client.call(context, 'delete_server', server_id=server_id)
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
LOG.info("create_tsigkey: Calling central's create_tsigkey.")
LOG.info(_LI("create_tsigkey: Calling central's create_tsigkey."))
return self.client.call(context, 'create_tsigkey', tsigkey=tsigkey)
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_tsigkeys: Calling central's find_tsigkeys.")
LOG.info(_LI("find_tsigkeys: Calling central's find_tsigkeys."))
return self.client.call(context, 'find_tsigkeys', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tsigkey(self, context, tsigkey_id):
LOG.info("get_tsigkey: Calling central's get_tsigkey.")
LOG.info(_LI("get_tsigkey: Calling central's get_tsigkey."))
return self.client.call(context, 'get_tsigkey', tsigkey_id=tsigkey_id)
def update_tsigkey(self, context, tsigkey_id, values):
LOG.info("update_tsigkey: Calling central's update_tsigkey.")
LOG.info(_LI("update_tsigkey: Calling central's update_tsigkey."))
return self.client.call(context, 'update_tsigkey',
tsigkey_id=tsigkey_id,
values=values)
def delete_tsigkey(self, context, tsigkey_id):
LOG.info("delete_tsigkey: Calling central's delete_tsigkey.")
LOG.info(_LI("delete_tsigkey: Calling central's delete_tsigkey."))
return self.client.call(context, 'delete_tsigkey',
tsigkey_id=tsigkey_id)
# Tenant Methods
def find_tenants(self, context):
LOG.info("find_tenants: Calling central's find_tenants.")
LOG.info(_LI("find_tenants: Calling central's find_tenants."))
return self.client.call(context, 'find_tenants')
def get_tenant(self, context, tenant_id):
LOG.info("get_tenant: Calling central's get_tenant.")
LOG.info(_LI("get_tenant: Calling central's get_tenant."))
return self.client.call(context, 'get_tenant', tenant_id=tenant_id)
def count_tenants(self, context):
LOG.info("count_tenants: Calling central's count_tenants.")
LOG.info(_LI("count_tenants: Calling central's count_tenants."))
return self.client.call(context, 'count_tenants')
# Domain Methods
def create_domain(self, context, domain):
LOG.info("create_domain: Calling central's create_domain.")
LOG.info(_LI("create_domain: Calling central's create_domain."))
return self.client.call(context, 'create_domain', domain=domain)
def get_domain(self, context, domain_id):
LOG.info("get_domain: Calling central's get_domain.")
LOG.info(_LI("get_domain: Calling central's get_domain."))
return self.client.call(context, 'get_domain', domain_id=domain_id)
def get_domain_servers(self, context, domain_id):
LOG.info("get_domain_servers: Calling central's get_domain_servers.")
LOG.info(_LI("get_domain_servers: "
"Calling central's get_domain_servers."))
return self.client.call(context, 'get_domain_servers',
domain_id=domain_id)
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_domains: Calling central's find_domains.")
LOG.info(_LI("find_domains: Calling central's find_domains."))
return self.client.call(context, 'find_domains', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain(self, context, criterion=None):
LOG.info("find_domain: Calling central's find_domain.")
LOG.info(_LI("find_domain: Calling central's find_domain."))
return self.client.call(context, 'find_domain', criterion=criterion)
def update_domain(self, context, domain_id, values, increment_serial=True):
LOG.info("update_domain: Calling central's update_domain.")
LOG.info(_LI("update_domain: Calling central's update_domain."))
return self.client.call(
context, 'update_domain', domain_id=domain_id,
values=values, increment_serial=increment_serial)
def delete_domain(self, context, domain_id):
LOG.info("delete_domain: Calling central's delete_domain.")
LOG.info(_LI("delete_domain: Calling central's delete_domain."))
return self.client.call(context, 'delete_domain', domain_id=domain_id)
def count_domains(self, context, criterion=None):
LOG.info("count_domains: Calling central's count_domains.")
LOG.info(_LI("count_domains: Calling central's count_domains."))
return self.client.call(context, 'count_domains', criterion=criterion)
def touch_domain(self, context, domain_id):
LOG.info("touch_domain: Calling central's touch_domain.")
LOG.info(_LI("touch_domain: Calling central's touch_domain."))
return self.client.call(context, 'touch_domain', domain_id=domain_id)
# TLD Methods
def create_tld(self, context, tld):
LOG.info("create_tld: Calling central's create_tld.")
LOG.info(_LI("create_tld: Calling central's create_tld."))
return self.client.call(context, 'create_tld', tld=tld)
def find_tlds(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_tlds: Calling central's find_tlds.")
LOG.info(_LI("find_tlds: Calling central's find_tlds."))
return self.client.call(context, 'find_tlds', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tld(self, context, tld_id):
LOG.info("get_tld: Calling central's get_tld.")
LOG.info(_LI("get_tld: Calling central's get_tld."))
return self.client.call(context, 'get_tld', tld_id=tld_id)
def update_tld(self, context, tld_id, values):
LOG.info("update_tld: Calling central's update_tld.")
LOG.info(_LI("update_tld: Calling central's update_tld."))
return self.client.call(context, 'update_tld', tld_id=tld_id,
values=values)
def delete_tld(self, context, tld_id):
LOG.info("delete_tld: Calling central's delete_tld.")
LOG.info(_LI("delete_tld: Calling central's delete_tld."))
return self.client.call(context, 'delete_tld', tld_id=tld_id)
# RecordSet Methods
def create_recordset(self, context, domain_id, recordset):
LOG.info("create_recordset: Calling central's create_recordset.")
LOG.info(_LI("create_recordset: Calling central's create_recordset."))
return self.client.call(context, 'create_recordset',
domain_id=domain_id, recordset=recordset)
def get_recordset(self, context, domain_id, recordset_id):
LOG.info("get_recordset: Calling central's get_recordset.")
LOG.info(_LI("get_recordset: Calling central's get_recordset."))
return self.client.call(context, 'get_recordset', domain_id=domain_id,
recordset_id=recordset_id)
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_recordsets: Calling central's find_recordsets.")
LOG.info(_LI("find_recordsets: Calling central's find_recordsets."))
return self.client.call(context, 'find_recordsets',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_recordset(self, context, criterion=None):
LOG.info("find_recordset: Calling central's find_recordset.")
LOG.info(_LI("find_recordset: Calling central's find_recordset."))
return self.client.call(context, 'find_recordset', criterion=criterion)
def update_recordset(self, context, domain_id, recordset_id, values,
increment_serial=True):
LOG.info("update_recordset: Calling central's update_recordset.")
LOG.info(_LI("update_recordset: Calling central's update_recordset."))
return self.client.call(context, 'update_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
@ -250,21 +253,21 @@ class CentralAPI(object):
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
LOG.info("delete_recordset: Calling central's delete_recordset.")
LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
return self.client.call(context, 'delete_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
def count_recordsets(self, context, criterion=None):
LOG.info("count_recordsets: Calling central's count_recordsets.")
LOG.info(_LI("count_recordsets: Calling central's count_recordsets."))
return self.client.call(context, 'count_recordsets',
criterion=criterion)
# Record Methods
def create_record(self, context, domain_id, recordset_id, record,
increment_serial=True):
LOG.info("create_record: Calling central's create_record.")
LOG.info(_LI("create_record: Calling central's create_record."))
return self.client.call(context, 'create_record',
domain_id=domain_id,
recordset_id=recordset_id,
@ -272,7 +275,7 @@ class CentralAPI(object):
increment_serial=increment_serial)
def get_record(self, context, domain_id, recordset_id, record_id):
LOG.info("get_record: Calling central's get_record.")
LOG.info(_LI("get_record: Calling central's get_record."))
return self.client.call(context, 'get_record',
domain_id=domain_id,
recordset_id=recordset_id,
@ -280,18 +283,18 @@ class CentralAPI(object):
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_records: Calling central's find_records.")
LOG.info(_LI("find_records: Calling central's find_records."))
return self.client.call(context, 'find_records', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_record(self, context, criterion=None):
LOG.info("find_record: Calling central's find_record.")
LOG.info(_LI("find_record: Calling central's find_record."))
return self.client.call(context, 'find_record', criterion=criterion)
def update_record(self, context, domain_id, recordset_id, record_id,
values, increment_serial=True):
LOG.info("update_record: Calling central's update_record.")
LOG.info(_LI("update_record: Calling central's update_record."))
return self.client.call(context, 'update_record',
domain_id=domain_id,
recordset_id=recordset_id,
@ -301,7 +304,7 @@ class CentralAPI(object):
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
LOG.info("delete_record: Calling central's delete_record.")
LOG.info(_LI("delete_record: Calling central's delete_record."))
return self.client.call(context, 'delete_record',
domain_id=domain_id,
recordset_id=recordset_id,
@ -309,67 +312,68 @@ class CentralAPI(object):
increment_serial=increment_serial)
def count_records(self, context, criterion=None):
LOG.info("count_records: Calling central's count_records.")
LOG.info(_LI("count_records: Calling central's count_records."))
return self.client.call(context, 'count_records', criterion=criterion)
# Sync Methods
def sync_domains(self, context):
LOG.info("sync_domains: Calling central's sync_domains.")
LOG.info(_LI("sync_domains: Calling central's sync_domains."))
return self.client.call(context, 'sync_domains')
def sync_domain(self, context, domain_id):
LOG.info("sync_domain: Calling central's sync_domains.")
LOG.info(_LI("sync_domain: Calling central's sync_domains."))
return self.client.call(context, 'sync_domain', domain_id=domain_id)
def sync_record(self, context, domain_id, recordset_id, record_id):
LOG.info("sync_record: Calling central's sync_record.")
LOG.info(_LI("sync_record: Calling central's sync_record."))
return self.client.call(context, 'sync_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
def list_floatingips(self, context):
LOG.info("list_floatingips: Calling central's list_floatingips.")
LOG.info(_LI("list_floatingips: Calling central's list_floatingips."))
return self.client.call(context, 'list_floatingips')
def get_floatingip(self, context, region, floatingip_id):
LOG.info("get_floatingip: Calling central's get_floatingip.")
LOG.info(_LI("get_floatingip: Calling central's get_floatingip."))
return self.client.call(context, 'get_floatingip', region=region,
floatingip_id=floatingip_id)
def update_floatingip(self, context, region, floatingip_id, values):
LOG.info("update_floatingip: Calling central's update_floatingip.")
LOG.info(_LI("update_floatingip: "
"Calling central's update_floatingip."))
return self.client.call(context, 'update_floatingip', region=region,
floatingip_id=floatingip_id, values=values)
# Blacklisted Domain Methods
def create_blacklist(self, context, blacklist):
LOG.info("create_blacklist: Calling central's create_blacklist")
LOG.info(_LI("create_blacklist: Calling central's create_blacklist"))
return self.client.call(context, 'create_blacklist',
blacklist=blacklist)
def get_blacklist(self, context, blacklist_id):
LOG.info("get_blacklist: Calling central's get_blacklist.")
LOG.info(_LI("get_blacklist: Calling central's get_blacklist."))
return self.client.call(context, 'get_blacklist',
blacklist_id=blacklist_id)
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info("find_blacklists: Calling central's find_blacklists.")
LOG.info(_LI("find_blacklists: Calling central's find_blacklists."))
return self.client.call(
context, 'find_blacklists', criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_blacklist(self, context, criterion):
LOG.info("find_blacklist: Calling central's find_blacklist.")
LOG.info(_LI("find_blacklist: Calling central's find_blacklist."))
return self.client.call(context, 'find_blacklist', criterion=criterion)
def update_blacklist(self, context, blacklist_id, values):
LOG.info("update_blacklist: Calling central's update_blacklist.")
LOG.info(_LI("update_blacklist: Calling central's update_blacklist."))
return self.client.call(context, 'update_blacklist',
blacklist_id=blacklist_id, values=values)
def delete_blacklist(self, context, blacklist_id):
LOG.info("delete_blacklist: Calling central's delete blacklist.")
LOG.info(_LI("delete_blacklist: Calling central's delete blacklist."))
return self.client.call(context, 'delete_blacklist',
blacklist_id=blacklist_id)

View File

@ -21,6 +21,7 @@ from oslo.config import cfg
from oslo import messaging
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate import backend
from designate import exceptions
from designate import network_api
@ -74,10 +75,10 @@ class Service(service.Service):
tlds = self.storage_api.find_tlds({})
if tlds:
self.check_for_tlds = True
LOG.info("Checking for TLDs")
LOG.info(_LI("Checking for TLDs"))
else:
self.check_for_tlds = False
LOG.info("NOT checking for TLDs")
LOG.info(_LI("NOT checking for TLDs"))
self.backend.start()
@ -1269,7 +1270,7 @@ class Service(service.Service):
zone = self.storage_api.find_domain(
elevated_context, {'name': zone_name})
except exceptions.DomainNotFound:
msg = 'Creating zone for %s:%s - %s zone %s' % \
msg = _LI('Creating zone for %s:%s - %s zone %s') % \
(floatingip_id, region, fip['address'], zone_name)
LOG.info(msg)

View File

@ -16,7 +16,7 @@
import itertools
from designate.openstack.common import context
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _
from designate.openstack.common.gettextutils import _LW
LOG = logging.getLogger(__name__)
@ -28,7 +28,7 @@ class DesignateContext(context.RequestContext):
instance_uuid=None, roles=[], service_catalog=None,
all_tenants=False, **kwargs):
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
LOG.warn(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
super(DesignateContext, self).__init__(
auth_token=auth_token,

View File

@ -19,6 +19,7 @@ from migrate.exceptions import (DatabaseAlreadyControlledError,
DatabaseNotControlledError)
from migrate.versioning import api as versioning_api
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from oslo.config import cfg
from designate.manage import base
@ -36,9 +37,9 @@ class DatabaseCommands(base.Commands):
url = cfg.CONF['storage:sqlalchemy'].database_connection
try:
LOG.info('Attempting to initialize database')
LOG.info(_LI('Attempting to initialize database'))
versioning_api.version_control(url=url, repository=REPOSITORY)
LOG.info('Database initialized successfully')
LOG.info(_LI('Database initialized successfully'))
except DatabaseAlreadyControlledError:
raise Exception('Database already initialized')
@ -57,8 +58,8 @@ class DatabaseCommands(base.Commands):
except DatabaseNotControlledError:
raise Exception('Database not yet initialized')
LOG.info("Attempting to synchronize database from version "
"'%s' to '%s'",
LOG.info(_LI("Attempting to synchronize database from version "
"'%s' to '%s'"),
current_version,
target_version if target_version is not None else "latest")
@ -69,7 +70,7 @@ class DatabaseCommands(base.Commands):
versioning_api.upgrade(url=url, repository=REPOSITORY,
version=version)
LOG.info('Database synchronized successfully')
LOG.info(_LI('Database synchronized successfully'))
def version(self):
url = cfg.CONF['storage:sqlalchemy'].database_connection

View File

@ -18,6 +18,7 @@ from migrate.exceptions import (DatabaseAlreadyControlledError,
DatabaseNotControlledError)
from migrate.versioning import api as versioning_api
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from oslo.config import cfg
from designate.manage import base
@ -38,9 +39,9 @@ class DatabaseCommands(base.Commands):
raise Exception('Migration Repository Not Found')
try:
LOG.info('Attempting to initialize PowerDNS database')
LOG.info(_LI('Attempting to initialize PowerDNS database'))
versioning_api.version_control(url=url, repository=REPOSITORY)
LOG.info('PowerDNS database initialized successfully')
LOG.info(_LI('PowerDNS database initialized successfully'))
except DatabaseAlreadyControlledError:
raise Exception('PowerDNS Database already initialized')
@ -59,8 +60,8 @@ class DatabaseCommands(base.Commands):
except DatabaseNotControlledError:
raise Exception('PowerDNS database not yet initialized')
LOG.info("Attempting to synchronize PowerDNS database from version "
"'%s' to '%s'",
LOG.info(_LI("Attempting to synchronize PowerDNS database "
"from version '%s' to '%s'"),
current_version,
target_version if target_version is not None else "latest")
@ -71,7 +72,7 @@ class DatabaseCommands(base.Commands):
versioning_api.upgrade(url=url, repository=REPOSITORY,
version=version)
LOG.info('PowerDNS database synchronized successfully')
LOG.info(_LI('PowerDNS database synchronized successfully'))
def version(self):
url = cfg.CONF['backend:powerdns'].database_connection

View File

@ -21,6 +21,8 @@ from designate import exceptions
from designate import rpc
from designate.central import rpcapi as central_rpcapi
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LI
from designate.openstack.common.gettextutils import _LE
from designate.manage import base
from designate.schema import format
@ -108,7 +110,7 @@ class TLDCommands(base.Commands):
if not os.path.exists(input_file):
raise Exception('TLD Input file Not Found')
LOG.info("Importing TLDs from %s", input_file)
LOG.info(_LI("Importing TLDs from %s"), input_file)
error_lines = []
tlds_added = 0
@ -128,11 +130,11 @@ class TLDCommands(base.Commands):
tlds_added += self._validate_and_create_tld(line,
error_lines)
LOG.info("Number of tlds added: %d", tlds_added)
LOG.info(_LI("Number of tlds added: %d"), tlds_added)
errors = len(error_lines)
if errors > 0:
LOG.error("Number of errors: %d", errors)
LOG.error(_LE("Number of errors: %d"), errors)
# Sorting the errors and printing them so that it is easier to
# read the errors
LOG.error("Error Lines:\n%s", '\n'.join(sorted(error_lines)))
LOG.error(_LE("Error Lines:\n%s"), '\n'.join(sorted(error_lines)))

View File

@ -25,6 +25,8 @@ from designate import exceptions
from designate.openstack.common import log as logging
from designate.openstack.common import threadgroup
from designate.openstack.common.gettextutils import _LW
from designate.openstack.common.gettextutils import _LE
from designate.network_api.base import NetworkAPI
@ -117,12 +119,13 @@ class NeutronNetworkAPI(NetworkAPI):
# NOTE: 401 might be that the user doesn't have neutron
# activated in a particular region, we'll just log the failure
# and go on with our lives.
msg = "Calling Neutron resulted in a 401, please investigate."
LOG.warning(msg)
LOG.warning(_LW("Calling Neutron resulted in a 401, "
"please investigate."))
LOG.exception(e)
return
except Exception as e:
LOG.error('Failed calling Neutron %s - %s', region, endpoint)
LOG.error(_LE('Failed calling Neutron %s - %s'),
region, endpoint)
LOG.exception(e)
failed.append((e, endpoint, region))
return

View File

@ -16,6 +16,8 @@
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate.openstack.common import policy
from designate.openstack.common.gettextutils import _
from designate.openstack.common.gettextutils import _LI
from designate import utils
from designate import exceptions
@ -60,7 +62,7 @@ def init(default_rule=None):
msg = 'Unable to determine appropriate policy json file'
raise exceptions.ConfigurationError(msg)
LOG.info('Using policy_file found at: %s' % policy_files[0])
LOG.info(_LI('Using policy_file found at: %s') % policy_files[0])
with open(policy_files[0]) as fh:
policy_string = fh.read()
@ -89,8 +91,10 @@ def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
extra = {'policy': {'rule': rule, 'target': target}}
if result:
LOG.audit("Policy check succeeded for rule '%s' on target %s",
rule, repr(target), extra=extra)
LOG.audit(_("Policy check succeeded for rule '%(rule)s' "
"on target %(target)s") %
{'rule': rule, 'target': repr(target)}, extra=extra)
else:
LOG.audit("Policy check failed for rule '%s' on target: %s",
rule, repr(target), extra=extra)
LOG.audit(_("Policy check failed for rule '%(rule)s' "
"on target %(target)s") %
{'rule': rule, 'target': repr(target)}, extra=extra)

View File

@ -26,7 +26,7 @@ from sqlalchemy.pool import NullPool, StaticPool
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _
from designate.openstack.common.gettextutils import _LW
LOG = logging.getLogger(__name__)
@ -100,7 +100,7 @@ def ping_listener(dbapi_conn, connection_rec, connection_proxy):
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn('Got mysql server has gone away: %s', ex)
LOG.warn(_LW('Got mysql server has gone away: %s'), ex)
raise DisconnectionError("Database server went away")
else:
raise
@ -178,8 +178,8 @@ def get_engine(config_group):
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
LOG.warn(_LW('SQL connection failed. %s attempts left.') %
remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(cfg.CONF[config_group].retry_interval)

View File

@ -15,6 +15,7 @@
# under the License.
from designate.openstack.common import timeutils
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LW
from sqlalchemy import MetaData, Table, Column, Integer
LOG = logging.getLogger(__name__)
@ -34,8 +35,8 @@ def upgrade(migrate_engine):
domain_count = domains_table.count().execute().first()[0]
if domain_count > 0:
LOG.warn('A sync-domains is now required in order for the API '
'provided, and backend provided serial numbers to align')
LOG.warn(_LW('A sync-domains is now required in order for the API '
'provided, and backend provided serial numbers to align'))
def downgrade(migrate_engine):

View File

@ -18,6 +18,7 @@ from sqlalchemy.exc import IntegrityError
from sqlalchemy.schema import Table, Column, MetaData
from sqlalchemy.types import String
from designate.openstack.common import log as logging
from designate.openstack.common.gettextutils import _LW
LOG = logging.getLogger(__name__)
meta = MetaData()
@ -54,7 +55,7 @@ def upgrade(migrate_engine):
except IntegrityError:
if record.domain_id not in sync_domains:
sync_domains.append(record.domain_id)
LOG.warn("Domain '%s' needs to be synchronised" %
LOG.warn(_LW("Domain '%s' needs to be synchronised") %
record.domain_id)
records_table.delete()\