Merge "Remove log translations in keystone"

This commit is contained in:
Jenkins 2017-03-27 17:35:03 +00:00 committed by Gerrit Code Review
commit af4e98c770
41 changed files with 334 additions and 565 deletions

View File

@ -25,7 +25,7 @@ from keystone.common import driver_hints
from keystone.common import manager
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LI, _LE
from keystone.i18n import _
from keystone import notifications
@ -145,16 +145,16 @@ class Manager(manager.Manager):
try:
self.role_api.get_role(CONF.member_role_id)
except exception.RoleNotFound:
LOG.info(_LI("Creating the default role %s "
"because it does not exist."),
LOG.info("Creating the default role %s "
"because it does not exist.",
CONF.member_role_id)
role = {'id': CONF.member_role_id,
'name': CONF.member_role_name}
try:
self.role_api.create_role(CONF.member_role_id, role)
except exception.Conflict:
LOG.info(_LI("Creating the default role %s failed because it "
"was already created"),
LOG.info("Creating the default role %s failed because it "
"was already created",
CONF.member_role_id)
def add_user_to_project(self, tenant_id, user_id):
@ -644,14 +644,14 @@ class Manager(manager.Manager):
_make_implied_ref_copy(
next_ref, implied_role['implied_role_id']))
if implied_ref in checked_role_refs:
msg = _LE('Circular reference found '
'role inference rules - %(prior_role_id)s.')
msg = ('Circular reference found '
'role inference rules - %(prior_role_id)s.')
LOG.error(msg, {'prior_role_id': next_ref['role_id']})
else:
ref_results.append(implied_ref)
role_refs_to_check.append(implied_ref)
except exception.NotImplemented:
LOG.error(_LE('Role driver does not support implied roles.'))
LOG.error('Role driver does not support implied roles.')
return ref_results

View File

@ -27,7 +27,7 @@ from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.federation import constants
from keystone.i18n import _, _LW, _LE
from keystone.i18n import _
from keystone.resource import controllers as resource_controllers
@ -194,26 +194,26 @@ class Auth(controller.V3Controller):
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
msg = ("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
msg = ("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
msg = ("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
@ -227,11 +227,11 @@ class Auth(controller.V3Controller):
if not isinstance(auth_context, core.AuthContext):
LOG.error(
_LE('`auth_context` passed to the Auth controller '
'`authenticate` method is not of type '
'`keystone.auth.controllers.AuthContext`. For security '
'purposes this is required. This is likely a programming '
'error. Received object of type `%s`'), type(auth_context))
'`auth_context` passed to the Auth controller '
'`authenticate` method is not of type '
'`keystone.auth.controllers.AuthContext`. For security '
'purposes this is required. This is likely a programming '
'error. Received object of type `%s`', type(auth_context))
raise exception.Unauthorized(
_('Cannot Authenticate due to internal error.'))
# The 'external' method allows any 'REMOTE_USER' based authentication

View File

@ -22,7 +22,7 @@ from keystone.common import dependency
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LI, _LE
from keystone.i18n import _
from keystone.identity.backends import resource_options as ro
@ -102,9 +102,9 @@ class AuthContext(dict):
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
LOG.info('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.',
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
@ -434,8 +434,8 @@ class UserMFARulesValidator(object):
# processing.
rule_set = []
if not isinstance(rules, list):
LOG.error(_LE('Corrupt rule data structure for user %(user_id)s, '
'no rules loaded.'),
LOG.error('Corrupt rule data structure for user %(user_id)s, '
'no rules loaded.',
{'user_id': user_id})
# Corrupt Data means no rules. Auth success > MFA rules in this
# case.
@ -448,8 +448,8 @@ class UserMFARulesValidator(object):
if not isinstance(r_list, list):
# Rule was not a list, it is invalid, drop the rule from
# being considered.
LOG.info(_LI('Ignoring Rule %(rule)r; rule must be a list of '
'strings.'),
LOG.info('Ignoring Rule %(rule)r; rule must be a list of '
'strings.',
{'type': type(r_list)})
continue
@ -460,8 +460,8 @@ class UserMFARulesValidator(object):
if not isinstance(item, six.string_types):
# Rules may only contain strings for method names
# Reject a rule with non-string values
LOG.info(_LI('Ignoring Rule %(rule)r; rule contains '
'non-string values.'),
LOG.info('Ignoring Rule %(rule)r; rule contains '
'non-string values.',
{'rule': r_list})
# Rule is known to be bad, drop it from consideration.
_ok_rule = False

View File

@ -23,7 +23,7 @@ from keystone.common import dependency
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _, _LE, _LI
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
@ -123,9 +123,9 @@ def handle_unscoped_token(request, auth_payload, resource_api, federation_api,
# be useful.
if shadow_role['name'] not in existing_roles:
LOG.error(
_LE('Role %s was specified in the mapping but does '
'not exist. All roles specified in a mapping must '
'exist before assignment.'),
'Role %s was specified in the mapping but does '
'not exist. All roles specified in a mapping must '
'exist before assignment.',
shadow_role['name']
)
# NOTE(lbragstad): The RoleNotFound exception usually
@ -136,8 +136,8 @@ def handle_unscoped_token(request, auth_payload, resource_api, federation_api,
if (role['domain_id'] is not None and
role['domain_id'] != idp_domain_id):
LOG.error(
_LE('Role %(role)s is a domain-specific role and '
'cannot be assigned within %(domain)s.'),
'Role %(role)s is a domain-specific role and '
'cannot be assigned within %(domain)s.',
{'role': shadow_role['name'], 'domain': idp_domain_id}
)
raise exception.DomainSpecificRoleNotWithinIdPDomain(
@ -157,8 +157,8 @@ def handle_unscoped_token(request, auth_payload, resource_api, federation_api,
)
except exception.ProjectNotFound:
LOG.info(
_LI('Project %(project_name)s does not exist. It will be '
'automatically provisioning for user %(user_id)s.'),
'Project %(project_name)s does not exist. It will be '
'automatically provisioning for user %(user_id)s.',
{'project_name': shadow_project['name'],
'user_id': user['id']}
)

View File

@ -21,7 +21,6 @@ from keystone.catalog.backends import base
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _LC
LOG = log.getLogger(__name__)
@ -103,7 +102,7 @@ class Catalog(base.CatalogDriverBase):
with open(template_file) as f:
self.templates = parse_templates(f)
except IOError:
LOG.critical(_LC('Unable to open template file %s'), template_file)
LOG.critical('Unable to open template file %s', template_file)
raise
# region crud

View File

@ -39,7 +39,7 @@ from keystone.credential.providers import fernet as credential_fernet
from keystone import exception
from keystone.federation import idp
from keystone.federation import utils as mapping_engine
from keystone.i18n import _, _LE, _LI, _LW
from keystone.i18n import _
from keystone.server import backends
from keystone import token
@ -190,10 +190,10 @@ class BootStrap(BaseApp):
self.resource_manager.create_domain(
domain_id=default_domain['id'],
domain=default_domain)
LOG.info(_LI('Created domain %s'), default_domain['id'])
LOG.info('Created domain %s', default_domain['id'])
except exception.Conflict:
# NOTE(morganfainberg): Domain already exists, continue on.
LOG.info(_LI('Domain %s already exists, skipping creation.'),
LOG.info('Domain %s already exists, skipping creation.',
default_domain['id'])
try:
@ -206,9 +206,9 @@ class BootStrap(BaseApp):
'the cloud.',
'name': self.project_name}
)
LOG.info(_LI('Created project %s'), self.project_name)
LOG.info('Created project %s', self.project_name)
except exception.Conflict:
LOG.info(_LI('Project %s already exists, skipping creation.'),
LOG.info('Project %s already exists, skipping creation.',
self.project_name)
project = self.resource_manager.get_project_by_name(
self.project_name, default_domain['id'])
@ -218,7 +218,7 @@ class BootStrap(BaseApp):
try:
user = self.identity_manager.get_user_by_name(self.username,
default_domain['id'])
LOG.info(_LI('User %s already exists, skipping creation.'),
LOG.info('User %s already exists, skipping creation.',
self.username)
# If the user is not enabled, re-enable them. This also helps
@ -246,12 +246,12 @@ class BootStrap(BaseApp):
# as a recovery tool, without having to create a new user.
if update:
user = self.identity_manager.update_user(user['id'], update)
LOG.info(_LI('Reset password for user %s.'), self.username)
LOG.info('Reset password for user %s.', self.username)
if not enabled and user['enabled']:
# Although we always try to enable the user, this log
# message only makes sense if we know that the user was
# previously disabled.
LOG.info(_LI('Enabled user %s.'), self.username)
LOG.info('Enabled user %s.', self.username)
except exception.UserNotFound:
user = self.identity_manager.create_user(
user_ref={'name': self.username,
@ -260,7 +260,7 @@ class BootStrap(BaseApp):
'password': self.password
}
)
LOG.info(_LI('Created user %s'), self.username)
LOG.info('Created user %s', self.username)
# NOTE(morganfainberg): Do not create the role if it already exists.
try:
@ -269,9 +269,9 @@ class BootStrap(BaseApp):
role={'name': self.role_name,
'id': self.role_id},
)
LOG.info(_LI('Created role %s'), self.role_name)
LOG.info('Created role %s', self.role_name)
except exception.Conflict:
LOG.info(_LI('Role %s exists, skipping creation.'), self.role_name)
LOG.info('Role %s exists, skipping creation.', self.role_name)
# NOTE(davechen): There is no backend method to get the role
# by name, so build the hints to list the roles and filter by
# name instead.
@ -288,14 +288,14 @@ class BootStrap(BaseApp):
tenant_id=self.project_id,
role_id=self.role_id
)
LOG.info(_LI('Granted %(role)s on %(project)s to user'
' %(username)s.'),
LOG.info('Granted %(role)s on %(project)s to user'
' %(username)s.',
{'role': self.role_name,
'project': self.project_name,
'username': self.username})
except exception.Conflict:
LOG.info(_LI('User %(username)s already has %(role)s on '
'%(project)s.'),
LOG.info('User %(username)s already has %(role)s on '
'%(project)s.',
{'username': self.username,
'role': self.role_name,
'project': self.project_name})
@ -305,9 +305,9 @@ class BootStrap(BaseApp):
self.catalog_manager.create_region(
region_ref={'id': self.region_id}
)
LOG.info(_LI('Created region %s'), self.region_id)
LOG.info('Created region %s', self.region_id)
except exception.Conflict:
LOG.info(_LI('Region %s exists, skipping creation.'),
LOG.info('Region %s exists, skipping creation.',
self.region_id)
if self.public_url or self.admin_url or self.internal_url:
@ -364,12 +364,12 @@ class BootStrap(BaseApp):
endpoint_id=endpoint_ref['id'],
endpoint_ref=endpoint_ref)
LOG.info(_LI('Created %(interface)s endpoint %(url)s'),
LOG.info('Created %(interface)s endpoint %(url)s',
{'interface': interface, 'url': url})
else:
# NOTE(jamielennox): electing not to update existing
# endpoints here. There may be call to do so in future.
LOG.info(_LI('Skipping %s endpoint as already created'),
LOG.info('Skipping %s endpoint as already created',
interface)
self.endpoints[interface] = endpoint_ref['id']
@ -464,10 +464,10 @@ class DbSync(BaseApp):
repo='data_migration_repo')
contract_version = upgrades.get_db_version(repo='contract_repo')
except migration.exception.DbMigrationError:
LOG.info(_LI('Your database is not currently under version '
'control or the database is already controlled. Your '
'first step is to run `keystone-manage db_sync '
'--expand`.'))
LOG.info('Your database is not currently under version '
'control or the database is already controlled. Your '
'first step is to run `keystone-manage db_sync '
'--expand`.')
return 2
repo = migrate.versioning.repository.Repository(
@ -476,34 +476,33 @@ class DbSync(BaseApp):
if (contract_version > migrate_version or migrate_version >
expand_version):
LOG.info(_LI('Your database is out of sync. For more information '
'refer to https://docs.openstack.org/developer/'
'keystone/upgrading.html'))
LOG.info('Your database is out of sync. For more information '
'refer to https://docs.openstack.org/developer/'
'keystone/upgrading.html')
status = 1
elif migration_script_version > expand_version:
LOG.info(_LI('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.'))
LOG.info('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.')
status = 2
elif expand_version > migrate_version:
LOG.info(_LI('Expand version is ahead of migrate. Your next step '
'is to run `keystone-manage db_sync --migrate`.'))
LOG.info('Expand version is ahead of migrate. Your next step '
'is to run `keystone-manage db_sync --migrate`.')
status = 3
elif migrate_version > contract_version:
LOG.info(_LI('Migrate version is ahead of contract. Your next '
'step is to run `keystone-manage db_sync --contract`.'
))
LOG.info('Migrate version is ahead of contract. Your next '
'step is to run `keystone-manage db_sync --contract`.')
status = 4
elif (migration_script_version == expand_version == migrate_version ==
contract_version):
LOG.info(_LI('All db_sync commands are upgraded to the same '
'version and up-to-date.'))
LOG.info(_LI('The latest installed migration script version is: '
'%(script)d.\nCurrent repository versions:\nExpand: '
'%(expand)d \nMigrate: %(migrate)d\nContract: '
'%(contract)d') % {'script': migration_script_version,
'expand': expand_version,
'migrate': migrate_version,
'contract': contract_version})
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
LOG.info('The latest installed migration script version is: '
'%(script)d.\nCurrent repository versions:\nExpand: '
'%(expand)d \nMigrate: %(migrate)d\nContract: '
'%(contract)d' % {'script': migration_script_version,
'expand': expand_version,
'migrate': migrate_version,
'contract': contract_version})
return status
@staticmethod
@ -610,11 +609,11 @@ class PKISetup(BaseCertificateSetup):
def main(cls):
versionutils.report_deprecated_feature(
LOG,
_LW("keystone-manage pki_setup is deprecated as of Mitaka in "
"favor of not using PKI tokens and may be removed in 'O' "
"release."))
LOG.warning(_LW('keystone-manage pki_setup is not recommended for '
'production use.'))
"keystone-manage pki_setup is deprecated as of Mitaka in "
"favor of not using PKI tokens and may be removed in 'O' "
"release.")
LOG.warning('keystone-manage pki_setup is not recommended for '
'production use.')
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
@ -848,8 +847,8 @@ class TokenFlush(BaseApp):
except exception.NotImplemented:
# NOTE(ravelar159): Stop NotImplemented from unsupported token
# driver when using token_flush and print out warning instead
LOG.warning(_LW('Token driver %s does not support token_flush. '
'The token_flush command had no effect.'),
LOG.warning('Token driver %s does not support token_flush. '
'The token_flush command had no effect.',
CONF.token.driver)
@ -945,7 +944,7 @@ def _domain_config_finder(conf_dir):
:returns: generator yielding (filename, domain_name) tuples
"""
LOG.info(_LI('Scanning %r for domain config files'), conf_dir)
LOG.info('Scanning %r for domain config files', conf_dir)
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
@ -956,8 +955,8 @@ def _domain_config_finder(conf_dir):
yield (os.path.join(r, fname), domain_name)
continue
LOG.warning(_LW('Ignoring file (%s) while scanning '
'domain config directory'), fname)
LOG.warning('Ignoring file (%s) while scanning '
'domain config directory', fname)
class DomainConfigUploadFiles(object):
@ -1045,8 +1044,8 @@ class DomainConfigUploadFiles(object):
sections)
return True
except Exception as e:
msg = _LE('Error processing config file for domain: '
'%(domain_name)s, file: %(filename)s, error: %(error)s')
msg = ('Error processing config file for domain: '
'%(domain_name)s, file: %(filename)s, error: %(error)s')
LOG.error(msg,
{'domain_name': domain_name,
'filename': file_name,
@ -1082,13 +1081,13 @@ class DomainConfigUploadFiles(object):
for filename, domain_name in self._domain_config_finder(conf_dir):
if self._upload_config_to_database(filename, domain_name):
success_cnt += 1
LOG.info(_LI('Successfully uploaded domain config %r'),
LOG.info('Successfully uploaded domain config %r',
filename)
else:
failure_cnt += 1
if success_cnt == 0:
LOG.warning(_LW('No domain configs uploaded from %r'), conf_dir)
LOG.warning('No domain configs uploaded from %r', conf_dir)
if failure_cnt:
return False
@ -1136,9 +1135,9 @@ class DomainConfigUpload(BaseApp):
def main():
versionutils.report_deprecated_feature(
LOG,
_LW("keystone-manage domain_config_upload is deprecated as of "
"Newton in favor of setting domain config options via the API "
"and may be removed in 'P' release."))
"keystone-manage domain_config_upload is deprecated as of "
"Newton in favor of setting domain config options via the API "
"and may be removed in 'P' release.")
dcu = DomainConfigUploadFiles()
status = dcu.run()
if status is not None:
@ -1367,6 +1366,6 @@ def main(argv=None, config_files=None):
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=config_files)
if not CONF.default_config_files:
LOG.warning(_LW('Config file not found, using default configs.'))
LOG.warning('Config file not found, using default configs.')
keystone.conf.setup_logging()
CONF.command.cmd_class.main()

View File

@ -19,7 +19,7 @@
from oslo_log import log
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
from keystone.models import token_model
@ -66,7 +66,7 @@ def token_to_auth_context(token):
try:
auth_context['user_id'] = token.user_id
except KeyError:
LOG.warning(_LW('RBAC: Invalid user data in token'))
LOG.warning('RBAC: Invalid user data in token')
raise exception.Unauthorized(_('No user_id in token'))
auth_context['user_domain_id'] = token.user_domain_id

View File

@ -28,7 +28,7 @@ from keystone.common import utils
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
from keystone.models import token_model
@ -100,7 +100,7 @@ def protected(callback=None):
request.assert_authenticated()
if request.context.is_admin:
LOG.warning(_LW('RBAC: Bypassing authorization'))
LOG.warning('RBAC: Bypassing authorization')
elif callback is not None:
prep_info = {'f_name': f.__name__,
'input_attr': kwargs}
@ -228,7 +228,7 @@ def filterprotected(*filters, **callback):
LOG.debug('RBAC: Authorization granted')
else:
LOG.warning(_LW('RBAC: Bypassing authorization'))
LOG.warning('RBAC: Bypassing authorization')
return f(self, request, filters, **kwargs)
return wrapper
return _filterprotected
@ -723,12 +723,12 @@ class V3Controller(wsgi.Application):
# issue a deprecation warning.
versionutils.report_deprecated_feature(
LOG,
_LW('Not specifying a domain during a create user, group or '
'project call, and relying on falling back to the '
'default domain, is deprecated as of Liberty. There is no '
'plan to remove this compatibility, however, future API '
'versions may remove this, so please specify the domain '
'explicitly or use a domain-scoped token.'))
'Not specifying a domain during a create user, group or '
'project call, and relying on falling back to the '
'default domain, is deprecated as of Liberty. There is no '
'plan to remove this compatibility, however, future API '
'versions may remove this, so please specify the domain '
'explicitly or use a domain-scoped token.')
return CONF.identity.default_domain_id
def _normalize_domain_id(self, request, ref):
@ -752,7 +752,7 @@ class V3Controller(wsgi.Application):
"""
if request.context.is_admin:
LOG.warning(_LW('RBAC: Bypassing authorization'))
LOG.warning('RBAC: Bypassing authorization')
else:
action = 'identity:%s' % prep_info['f_name']
# TODO(henry-nash) need to log the target attributes as well

View File

@ -18,7 +18,6 @@ from cryptography import fernet
from oslo_log import log
import keystone.conf
from keystone.i18n import _LE, _LW, _LI
LOG = log.getLogger(__name__)
@ -56,9 +55,9 @@ class FernetUtils(object):
if not is_valid:
LOG.error(
_LE('Either [%(config_group)s] key_repository does not exist '
'or Keystone does not have sufficient permission to '
'access it: %(key_repo)s'),
'Either [%(config_group)s] key_repository does not exist '
'or Keystone does not have sufficient permission to '
'access it: %(key_repo)s',
{'key_repo': self.key_repository,
'config_group': self.config_group})
else:
@ -66,8 +65,8 @@ class FernetUtils(object):
stat_info = os.stat(self.key_repository)
if(stat_info.st_mode & stat.S_IROTH or
stat_info.st_mode & stat.S_IXOTH):
LOG.warning(_LW(
'key_repository is world readable: %s'),
LOG.warning(
'key_repository is world readable: %s',
self.key_repository)
return is_valid
@ -76,17 +75,17 @@ class FernetUtils(object):
keystone_group_id=None):
"""Attempt to create the key directory if it doesn't exist."""
if not os.access(self.key_repository, os.F_OK):
LOG.info(_LI(
LOG.info(
'key_repository does not appear to exist; attempting to '
'create it'))
'create it')
try:
os.makedirs(self.key_repository, 0o700)
except OSError:
LOG.error(_LE(
LOG.error(
'Failed to create key_repository: either it already '
'exists or you don\'t have sufficient permissions to '
'create it'))
'create it')
if keystone_user_id and keystone_group_id:
os.chown(
@ -94,10 +93,10 @@ class FernetUtils(object):
keystone_user_id,
keystone_group_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
LOG.warning(
'Unable to change the ownership of key_repository without '
'a keystone user ID and keystone group ID both being '
'provided: %s') % self.key_repository)
'provided: %s' % self.key_repository)
def _create_new_key(self, keystone_user_id, keystone_group_id):
"""Securely create a new encryption key.
@ -126,10 +125,10 @@ class FernetUtils(object):
os.setegid(keystone_group_id)
os.seteuid(keystone_user_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
LOG.warning(
'Unable to change the ownership of the new key without a '
'keystone user ID and keystone group ID both being provided: '
'%s') %
'%s' %
self.key_repository)
# Determine the file name of the new key
key_file = os.path.join(self.key_repository, '0.tmp')
@ -141,7 +140,7 @@ class FernetUtils(object):
f.flush()
create_success = True
except IOError:
LOG.error(_LE('Failed to create new temporary key: %s'), key_file)
LOG.error('Failed to create new temporary key: %s', key_file)
raise
finally:
# After writing the key, set the umask back to it's original value.
@ -155,7 +154,7 @@ class FernetUtils(object):
if not create_success and os.access(key_file, os.F_OK):
os.remove(key_file)
LOG.info(_LI('Created a new temporary key: %s'), key_file)
LOG.info('Created a new temporary key: %s', key_file)
def _become_valid_new_key(self):
"""Make the tmp new key a valid new key.
@ -167,7 +166,7 @@ class FernetUtils(object):
os.rename(tmp_key_file, valid_key_file)
LOG.info(_LI('Become a valid new key: %s'), valid_key_file)
LOG.info('Become a valid new key: %s', valid_key_file)
def initialize_key_repository(self, keystone_user_id=None,
keystone_group_id=None):
@ -180,7 +179,7 @@ class FernetUtils(object):
# make sure we have work to do before proceeding
if os.access(os.path.join(self.key_repository, '0'),
os.F_OK):
LOG.info(_LI('Key repository is already initialized; aborting.'))
LOG.info('Key repository is already initialized; aborting.')
return
# bootstrap an existing key
@ -221,19 +220,19 @@ class FernetUtils(object):
else:
key_files[key_id] = path
LOG.info(_LI('Starting key rotation with %(count)s key files: '
'%(list)s'), {
'count': len(key_files),
'list': list(key_files.values())})
LOG.info('Starting key rotation with %(count)s key files: '
'%(list)s', {
'count': len(key_files),
'list': list(key_files.values())})
# add a tmp new key to the rotation, which will be the *next* primary
self._create_tmp_new_key(keystone_user_id, keystone_group_id)
# determine the number of the new primary key
current_primary_key = max(key_files.keys())
LOG.info(_LI('Current primary key is: %s'), current_primary_key)
LOG.info('Current primary key is: %s', current_primary_key)
new_primary_key = current_primary_key + 1
LOG.info(_LI('Next primary key will be: %s'), new_primary_key)
LOG.info('Next primary key will be: %s', new_primary_key)
# promote the next primary key to be the primary
os.rename(
@ -244,7 +243,7 @@ class FernetUtils(object):
key_files[new_primary_key] = os.path.join(
self.key_repository,
str(new_primary_key))
LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key)
LOG.info('Promoted key 0 to be the primary: %s', new_primary_key)
# rename the tmp key to the real staged key
self._become_valid_new_key()
@ -259,7 +258,7 @@ class FernetUtils(object):
while len(keys) > (max_active_keys - 1):
index_to_purge = keys.pop()
key_to_purge = key_files[index_to_purge]
LOG.info(_LI('Excess key to purge: %s'), key_to_purge)
LOG.info('Excess key to purge: %s', key_to_purge)
os.remove(key_to_purge)
def load_keys(self, use_null_key=False):

View File

@ -20,7 +20,6 @@ from oslo_log import log
from keystone.common import utils
import keystone.conf
from keystone.i18n import _LI, _LE, _LW
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
@ -71,13 +70,13 @@ class BaseCertificateConfigure(object):
if b'OpenSSL 0.' in openssl_ver:
self.ssl_dictionary['default_md'] = 'sha1'
except subprocess.CalledProcessError:
LOG.warning(_LW('Failed to invoke ``openssl version``, '
'assuming is v1.0 or newer'))
LOG.warning('Failed to invoke ``openssl version``, '
'assuming is v1.0 or newer')
self.ssl_dictionary.update(kwargs)
def exec_command(self, command):
to_exec = [part % self.ssl_dictionary for part in command]
LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
LOG.info('Running command - %s', ' '.join(to_exec))
try:
# NOTE(shaleh): use check_output instead of the simpler
# `check_call()` in order to log any output from an error.
@ -87,8 +86,8 @@ class BaseCertificateConfigure(object):
to_exec,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = _LE("Command %(to_exec)s exited with %(retcode)s - "
"%(output)s)")
msg = ("Command %(to_exec)s exited with %(retcode)s - "
"%(output)s)")
LOG.error(msg,
{'to_exec': to_exec,
'retcode': e.returncode,
@ -112,8 +111,8 @@ class BaseCertificateConfigure(object):
try:
os.remove(file_path)
except OSError as exc:
msg = _LE("Failed to remove file %(file_path)r: "
"%(error)s")
msg = ("Failed to remove file %(file_path)r: "
"%(error)s")
LOG.error(msg,
{'file_path': file_path,
'error': exc.strerror})

View File

@ -14,7 +14,6 @@ from oslo_log import log
import osprofiler.initializer
import keystone.conf
from keystone.i18n import _LI
CONF = keystone.conf.CONF
@ -37,11 +36,11 @@ def setup(name, host='0.0.0.0'): # nosec
service=name,
host=host
)
LOG.info(_LI("OSProfiler is enabled.\n"
"Traces provided from the profiler "
"can only be subscribed to using the same HMAC keys that "
"are configured in Keystone's configuration file "
"under the [profiler] section. \n To disable OSprofiler "
"set in /etc/keystone/keystone.conf:\n"
"[profiler]\n"
"enabled=false"))
LOG.info("OSProfiler is enabled.\n"
"Traces provided from the profiler "
"can only be subscribed to using the same HMAC keys that "
"are configured in Keystone's configuration file "
"under the [profiler] section. \n To disable OSprofiler "
"set in /etc/keystone/keystone.conf:\n"
"[profiler]\n"
"enabled=false")

View File

@ -17,7 +17,6 @@ import sqlalchemy as sql
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql as ks_sql
from keystone.i18n import _LE
from keystone.identity.mapping_backends import mapping as mapping_backend
@ -245,7 +244,7 @@ def upgrade(migrate_engine):
try:
table.create()
except Exception:
LOG.exception(_LE('Exception while creating table: %r'), table)
LOG.exception('Exception while creating table: %r', table)
raise
# Unique Constraints

View File

@ -37,7 +37,7 @@ from six import moves
from keystone.common import authorization
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -103,7 +103,7 @@ def verify_length_and_trunc_password(password):
if CONF.strict_password_check:
raise exception.PasswordVerificationError(size=max_length)
else:
msg = _LW("Truncating user password to %d characters.")
msg = "Truncating user password to %d characters."
LOG.warning(msg, max_length)
return password[:max_length]
else:
@ -241,10 +241,10 @@ def setup_remote_pydev_debug():
stderrToServer=True)
return True
except Exception:
LOG.exception(_LE(
LOG.exception(
'Error setting up the debug environment. Verify that the '
'option --debug-url has the format <host>:<port> and that a '
'debugger processes is listening on that port.'))
'debugger processes is listening on that port.')
raise
@ -606,25 +606,24 @@ def format_url(url, substitutions, silent_keyerror_failures=None):
try:
result = url.replace('$(', '%(') % substitutions
except AttributeError:
msg = _LE("Malformed endpoint - %(url)r is not a string")
msg = "Malformed endpoint - %(url)r is not a string"
LOG.error(msg, {"url": url})
raise exception.MalformedEndpoint(endpoint=url)
except KeyError as e:
if not e.args or e.args[0] not in allow_keyerror:
msg = _LE("Malformed endpoint %(url)s - unknown key "
"%(keyerror)s")
msg = "Malformed endpoint %(url)s - unknown key %(keyerror)s"
LOG.error(msg, {"url": url, "keyerror": e})
raise exception.MalformedEndpoint(endpoint=url)
else:
result = None
except TypeError as e:
msg = _LE("Malformed endpoint '%(url)s'. The following type error "
"occurred during string substitution: %(typeerror)s")
msg = ("Malformed endpoint '%(url)s'. The following type error "
"occurred during string substitution: %(typeerror)s")
LOG.error(msg, {"url": url, "typeerror": e})
raise exception.MalformedEndpoint(endpoint=url)
except ValueError:
msg = _LE("Malformed endpoint %s - incomplete format "
"(are you missing a type notifier ?)")
msg = ("Malformed endpoint %s - incomplete format "
"(are you missing a type notifier ?)")
LOG.error(msg, url)
raise exception.MalformedEndpoint(endpoint=url)
return result

View File

@ -19,7 +19,7 @@ from oslo_log import log
import six
from keystone import exception
from keystone.i18n import _, _LE
from keystone.i18n import _
CONF = cfg.CONF
@ -40,8 +40,8 @@ def validate_password(password):
raise exception.PasswordRequirementsValidationError(
detail=pattern_desc)
except re.error:
msg = _LE("Unable to validate password due to invalid regular "
"expression - password_regex: ")
msg = ("Unable to validate password due to invalid regular "
"expression - password_regex: ")
LOG.error(msg, pattern)
detail = _("Unable to validate password due to invalid "
"configuration")

View File

@ -42,7 +42,7 @@ from keystone.common import request as request_mod
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.i18n import _
from keystone.models import token_model
@ -105,7 +105,7 @@ def validate_token_bind(context, token_ref):
LOG.info(msg)
raise exception.Unauthorized(msg)
LOG.info(_LI('Kerberos bind authentication successful'))
LOG.info('Kerberos bind authentication successful')
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind (due to permissive mode): "
@ -229,8 +229,8 @@ class Application(BaseApplication):
result = method(req, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
"Authorization failed. %(exception)s from "
"%(remote_addr)s",
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e,
context=req.context_dict,

View File

@ -20,7 +20,7 @@ from keystone.common import fernet_utils
import keystone.conf
from keystone.credential.providers import core
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -70,11 +70,11 @@ class Provider(core.Provider):
crypto, keys = get_multi_fernet_keys()
if keys[0] == fernet_utils.NULL_KEY:
LOG.warning(_LW(
LOG.warning(
'Encrypting credentials with the null key. Please properly '
'encrypt credentials using `keystone-manage credential_setup`,'
' `keystone-manage credential_migrate`, and `keystone-manage '
'credential_rotate`'))
'credential_rotate`')
try:
return (

View File

@ -18,7 +18,7 @@ from keystone.common import dependency
from keystone.common import manager
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -88,8 +88,8 @@ class Manager(manager.Manager):
try:
return self.catalog_api.get_endpoint(endpoint_id)
except exception.EndpointNotFound:
msg = _LW('Endpoint %(endpoint_id)s referenced in '
'association for policy %(policy_id)s not found.')
msg = ('Endpoint %(endpoint_id)s referenced in '
'association for policy %(policy_id)s not found.')
LOG.warning(msg, {'policy_id': policy_id,
'endpoint_id': endpoint_id})
raise
@ -125,8 +125,8 @@ class Manager(manager.Manager):
"""
if region_id in regions_examined:
msg = _LE('Circular reference or a repeated entry found '
'in region tree - %(region_id)s.')
msg = ('Circular reference or a repeated entry found '
'in region tree - %(region_id)s.')
LOG.error(msg, {'region_id': ref.region_id})
return
@ -175,9 +175,9 @@ class Manager(manager.Manager):
endpoints, regions))
continue
msg = _LW('Unsupported policy association found - '
'Policy %(policy_id)s, Endpoint %(endpoint_id)s, '
'Service %(service_id)s, Region %(region_id)s, ')
msg = ('Unsupported policy association found - '
'Policy %(policy_id)s, Endpoint %(endpoint_id)s, '
'Service %(service_id)s, Region %(region_id)s, ')
LOG.warning(msg, {'policy_id': policy_id,
'endpoint_id': ref['endpoint_id'],
'service_id': ref['service_id'],
@ -191,8 +191,8 @@ class Manager(manager.Manager):
try:
return self.policy_api.get_policy(policy_id)
except exception.PolicyNotFound:
msg = _LW('Policy %(policy_id)s referenced in association '
'for endpoint %(endpoint_id)s not found.')
msg = ('Policy %(policy_id)s referenced in association '
'for endpoint %(endpoint_id)s not found.')
LOG.warning(msg, {'policy_id': policy_id,
'endpoint_id': endpoint_id})
raise
@ -225,8 +225,8 @@ class Manager(manager.Manager):
if region.get('parent_region_id') is not None:
region_id = region['parent_region_id']
if region_id in regions_examined:
msg = _LE('Circular reference or a repeated entry '
'found in region tree - %(region_id)s.')
msg = ('Circular reference or a repeated entry '
'found in region tree - %(region_id)s.')
LOG.error(msg, {'region_id': region_id})
break

View File

@ -18,7 +18,7 @@ import six
from six.moves import http_client
import keystone.conf
from keystone.i18n import _, _LW
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -64,7 +64,7 @@ class Error(Exception):
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
LOG.warning(_LW('missing exception kwargs (programmer error)'))
LOG.warning('missing exception kwargs (programmer error)')
message = self.message_format
super(Error, self).__init__(message)

View File

@ -34,7 +34,7 @@ if not xmldsig:
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE
from keystone.i18n import _
LOG = log.getLogger(__name__)
@ -405,9 +405,9 @@ def _sign_assertion(assertion):
# explode like a thousand fiery supernovas made entirely of unsigned SAML.
for option in ('keyfile', 'certfile'):
if ',' in getattr(CONF.saml, option, ''):
raise exception.UnexpectedError(_LE(
raise exception.UnexpectedError(
'The configuration value in `keystone.conf [saml] %s` cannot '
'contain a comma (`,`). Please fix your configuration.') %
'contain a comma (`,`). Please fix your configuration.' %
option)
# xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
@ -443,7 +443,7 @@ def _sign_assertion(assertion):
# parsing.
stderr=subprocess.STDOUT)
except Exception as e:
msg = _LE('Error when signing assertion, reason: %(reason)s%(output)s')
msg = ('Error when signing assertion, reason: %(reason)s%(output)s')
LOG.error(msg,
{'reason': e,
'output': ' ' + e.output if hasattr(e, 'output') else ''})

View File

@ -23,7 +23,7 @@ import six
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -616,7 +616,7 @@ class RuleProcessor(object):
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user:
LOG.warning(_LW('Ignoring user name'))
LOG.warning('Ignoring user name')
else:
user = identity_value.get('user')
if 'group' in identity_value:

View File

@ -25,13 +25,3 @@ _translators = oslo_i18n.TranslatorFactory(domain='keystone')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical

View File

@ -30,7 +30,7 @@ from six.moves import map, zip
from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
LOG = log.getLogger(__name__)
@ -1037,9 +1037,9 @@ class KeystoneLDAPHandler(LDAPHandler):
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
LOG.warning('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.')
self._disable_paging()
break
return res
@ -1222,9 +1222,9 @@ class BaseLdap(object):
try:
ldap_attr, attr_map = item.split(':')
except ValueError:
LOG.warning(_LW(
LOG.warning(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
'Format must be <ldap_attribute>:<keystone_attribute>',
item)
continue
mapping[ldap_attr] = attr_map
@ -1334,11 +1334,10 @@ class BaseLdap(object):
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
message = ('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr, 'dn': res[0]})
LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
@ -1641,9 +1640,9 @@ class BaseLdap(object):
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
msg = _LW('When deleting entries for %(search_base)s, '
'could not delete nonexistent entries '
'%(entries)s%(dots)s')
msg = ('When deleting entries for %(search_base)s, '
'could not delete nonexistent entries '
'%(entries)s%(dots)s')
LOG.warning(msg,
{'search_base': search_base,
'entries': not_deleted_nodes[:3],

View File

@ -21,7 +21,7 @@ import six
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
from keystone.identity.backends import base
from keystone.identity.backends.ldap import common as common_ldap
from keystone.identity.backends.ldap import models
@ -229,8 +229,8 @@ class Identity(base.IdentityDriverBase):
try:
super(GroupApi, self.group).remove_member(user_dn, group_dn)
except ldap.NO_SUCH_ATTRIBUTE:
msg = _LW('User %(user)s was not removed from group %(group)s '
'because the relationship was not found')
msg = ('User %(user)s was not removed from group %(group)s '
'because the relationship was not found')
LOG.warning(msg, {'user': user_id, 'group': group['id']})
if hasattr(user, 'tenant_id'):

View File

@ -21,7 +21,7 @@ from keystone.common import dependency
from keystone.common import validation
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
from keystone.identity import schema
@ -134,8 +134,7 @@ class User(controller.V2Controller):
# old tenant. This could occur if roles aren't found
# or if the project is invalid or if there are no roles
# for the user on that project.
msg = _LW('Unable to remove user %(user)s from '
'%(tenant)s.')
msg = 'Unable to remove user %(user)s from %(tenant)s.'
LOG.warning(msg, {'user': user_id,
'tenant': old_user_ref['tenantId']})
@ -153,7 +152,7 @@ class User(controller.V2Controller):
# that the project is invalid or roles are some how
# incorrect. This shouldn't prevent the return of the
# new ref.
msg = _LW('Unable to add user %(user)s to %(tenant)s.')
msg = 'Unable to add user %(user)s to %(tenant)s.'
LOG.warning(msg, {'user': user_id,
'tenant': user_ref['tenantId']})

View File

@ -33,7 +33,7 @@ from keystone.common import manager
from keystone.common.validation import validators
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping
from keystone import notifications
from oslo_utils import timeutils
@ -109,7 +109,7 @@ class DomainConfigs(dict):
domain_ref = resource_api.get_domain_by_name(domain_name)
except exception.DomainNotFound:
LOG.warning(
_LW('Invalid domain name (%s) found in config file name'),
('Invalid domain name (%s) found in config file name'),
domain_name)
return
@ -146,7 +146,7 @@ class DomainConfigs(dict):
"""
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
LOG.warning(_LW('Unable to locate domain config directory: %s'),
LOG.warning('Unable to locate domain config directory: %s',
conf_dir)
return
@ -736,10 +736,10 @@ class Manager(manager.Manager):
if (not driver.is_domain_aware() and driver == self.driver and
domain_id != CONF.identity.default_domain_id and
domain_id is not None):
LOG.warning(_LW('Found multiple domains being mapped to a '
'driver that does not support that (e.g. '
'LDAP) - Domain ID: %(domain)s, '
'Default Driver: %(driver)s'),
LOG.warning('Found multiple domains being mapped to a '
'driver that does not support that (e.g. '
'LDAP) - Domain ID: %(domain)s, '
'Default Driver: %(driver)s',
{'domain': domain_id,
'driver': (driver == self.driver)})
raise exception.DomainNotFound(domain_id=domain_id)

View File

@ -22,7 +22,7 @@ import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _, _LI, _LW
from keystone.i18n import _
from keystone.middleware import core
from keystone.models import token_model
from keystone.token.providers import common
@ -118,16 +118,16 @@ class AuthContextMiddleware(auth_token.BaseAuthProtocol):
issuer = request.environ.get(CONF.tokenless_auth.issuer_attribute)
if not issuer:
msg = _LI('Cannot find client issuer in env by the '
'issuer attribute - %s.')
msg = ('Cannot find client issuer in env by the '
'issuer attribute - %s.')
LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
return False
if issuer in CONF.tokenless_auth.trusted_issuer:
return True
msg = _LI('The client issuer %(client_issuer)s does not match with '
'the trusted issuer %(trusted_issuer)s')
msg = ('The client issuer %(client_issuer)s does not match with '
'the trusted issuer %(trusted_issuer)s')
LOG.info(
msg, {'client_issuer': issuer,
'trusted_issuer': CONF.tokenless_auth.trusted_issuer})
@ -145,11 +145,11 @@ class AuthContextMiddleware(auth_token.BaseAuthProtocol):
if CONF.admin_token and (token == CONF.admin_token):
context_env['is_admin'] = True
LOG.warning(
_LW("The use of the '[DEFAULT] admin_token' configuration"
"option presents a significant security risk and should "
"not be set. This option is deprecated in favor of using "
"'keystone-manage bootstrap' and will be removed in a "
"future release."))
"The use of the '[DEFAULT] admin_token' configuration"
"option presents a significant security risk and should "
"not be set. This option is deprecated in favor of using "
"'keystone-manage bootstrap' and will be removed in a "
"future release.")
request.environ[core.CONTEXT_ENV] = context_env
if not context_env.get('is_admin', False):
@ -174,9 +174,9 @@ class AuthContextMiddleware(auth_token.BaseAuthProtocol):
request.environ[context.REQUEST_CONTEXT_ENV] = request_context
if authorization.AUTH_CONTEXT_ENV in request.environ:
msg = _LW('Auth context already exists in the request '
'environment; it will be used for authorization '
'instead of creating a new one.')
msg = ('Auth context already exists in the request '
'environment; it will be used for authorization '
'instead of creating a new one.')
LOG.warning(msg)
return

View File

@ -19,7 +19,6 @@ from oslo_serialization import jsonutils
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.i18n import _LE
CONF = keystone.conf.CONF
@ -64,14 +63,14 @@ class AdminTokenAuthMiddleware(wsgi.Middleware):
# NOTE(notmorgan): This is deprecated and emits a significant error
# message to make sure deployers update their deployments so in the
# future release upgrade the deployment does not break.
LOG.error(_LE('The admin_token_auth middleware functionality has been '
'merged into the main auth middleware '
'(keystone.middleware.auth.AuthContextMiddleware). '
'`admin_token_auth` must be removed from the '
'[pipeline:api_v3], [pipeline:admin_api], and '
'[pipeline:public_api] sections of your paste ini '
'file. The [filter:admin_token_auth] block will also '
'need to be removed from your paste ini file. '))
LOG.error('The admin_token_auth middleware functionality has been '
'merged into the main auth middleware '
'(keystone.middleware.auth.AuthContextMiddleware). '
'`admin_token_auth` must be removed from the '
'[pipeline:api_v3], [pipeline:admin_api], and '
'[pipeline:public_api] sections of your paste ini '
'file. The [filter:admin_token_auth] block will also '
'need to be removed from your paste ini file. ')
class JsonBodyMiddleware(wsgi.Middleware):

View File

@ -30,11 +30,12 @@ from pycadf import eventfactory
from pycadf import reason
from pycadf import resource
from keystone import exception
from keystone.i18n import _, _LE
from keystone.common import dependency
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _
_CATALOG_HELPER_OBJ = None
@ -314,7 +315,7 @@ def _get_notifier():
_notifier = oslo_messaging.Notifier(transport,
"identity.%s" % host)
except Exception:
LOG.exception(_LE("Failed to construct notifier"))
LOG.exception("Failed to construct notifier")
_notifier = False
return _notifier
@ -419,8 +420,8 @@ def _send_notification(operation, resource_type, resource_id, actor_dict=None,
try:
notifier.info(context, event_type, payload)
except Exception:
LOG.exception(_LE(
'Failed to send %(res_id)s %(event_type)s notification'),
LOG.exception(
'Failed to send %(res_id)s %(event_type)s notification',
{'res_id': resource_id, 'event_type': event_type})
@ -716,8 +717,8 @@ def _send_audit_notification(action, initiator, outcome, target,
except Exception:
# diaper defense: any exception that occurs while emitting the
# notification should not interfere with the API request
LOG.exception(_LE(
'Failed to send %(action)s %(event_type)s notification'),
LOG.exception(
'Failed to send %(action)s %(event_type)s notification',
{'action': action, 'event_type': event_type})

View File

@ -27,7 +27,7 @@ from keystone.common import extension
from keystone.common import manager
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE
from keystone.i18n import _
from keystone import notifications
@ -96,7 +96,7 @@ def get_oauth_headers(headers):
parameters.update(dict(params))
return parameters
else:
msg = _LE('Cannot retrieve Authorization headers')
msg = 'Cannot retrieve Authorization headers'
LOG.error(msg)
raise exception.OAuthHeadersMissingError()

View File

@ -15,7 +15,6 @@ from oslo_log import log
from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
from keystone.i18n import _LE, _LW
from keystone.resource.backends import base
@ -133,9 +132,9 @@ class Resource(base.ResourceDriverBase):
children_ids = set()
for ref in children:
if ref['id'] in examined:
msg = _LE('Circular reference or a repeated '
'entry found in projects hierarchy - '
'%(project_id)s.')
msg = ('Circular reference or a repeated '
'entry found in projects hierarchy - '
'%(project_id)s.')
LOG.error(msg, {'project_id': ref['id']})
return
children_ids.add(ref['id'])
@ -152,9 +151,9 @@ class Resource(base.ResourceDriverBase):
examined = set()
while project.get('parent_id') is not None:
if project['id'] in examined:
msg = _LE('Circular reference or a repeated '
'entry found in projects hierarchy - '
'%(project_id)s.')
msg = ('Circular reference or a repeated '
'entry found in projects hierarchy - '
'%(project_id)s.')
LOG.error(msg, {'project_id': project['id']})
return
@ -214,8 +213,8 @@ class Resource(base.ResourceDriverBase):
for project_id in project_ids:
if (project_id not in project_ids_from_bd or
project_id == base.NULL_DOMAIN_ID):
LOG.warning(_LW('Project %s does not exist and was not '
'deleted.') % project_id)
LOG.warning('Project %s does not exist and was not '
'deleted.' % project_id)
query.delete(synchronize_session=False)

View File

@ -24,7 +24,7 @@ from keystone.common import manager
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone.i18n import _
from keystone import notifications
from keystone.resource.backends import base
from keystone.token import provider as token_provider
@ -686,9 +686,9 @@ class Manager(manager.Manager):
result can be returned in response to a domain API call.
"""
if not project_ref['is_domain']:
LOG.error(_LE('Asked to convert a non-domain project into a '
'domain - Domain: %(domain_id)s, Project ID: '
'%(id)s, Project Name: %(project_name)s'),
LOG.error('Asked to convert a non-domain project into a '
'domain - Domain: %(domain_id)s, Project ID: '
'%(id)s, Project Name: %(project_name)s',
{'domain_id': project_ref['domain_id'],
'id': project_ref['id'],
'project_name': project_ref['name']})
@ -780,8 +780,8 @@ class Manager(manager.Manager):
"""
def _delete_projects(project, projects, examined):
if project['id'] in examined:
msg = _LE('Circular reference or a repeated entry found '
'projects hierarchy - %(project_id)s.')
msg = ('Circular reference or a repeated entry found '
'projects hierarchy - %(project_id)s.')
LOG.error(msg, {'project_id': project['id']})
return
@ -845,15 +845,15 @@ class Manager(manager.Manager):
}
self.create_domain(CONF.identity.default_domain_id,
default_domain_attrs)
LOG.warning(_LW(
LOG.warning(
'The default domain was created automatically to contain V2 '
'resources. This is deprecated in the M release and will not '
'be supported in the O release. Create the default domain '
'manually or use the keystone-manage bootstrap command.'))
'manually or use the keystone-manage bootstrap command.')
except exception.Conflict:
LOG.debug('The default domain already exists.')
except Exception:
LOG.error(_LE('Failed to create the default domain.'))
LOG.error('Failed to create the default domain.')
raise
def _require_matching_domain_id(self, new_ref, orig_ref):
@ -1028,9 +1028,9 @@ class DomainConfigManager(manager.Manager):
# there is only one option in the answer (and that it's the right
# one) - if not, something has gone wrong and we raise an error
if len(the_list) > 1 or the_list[0]['option'] != req_option:
LOG.error(_LE('Unexpected results in response for domain '
'config - %(count)s responses, first option is '
'%(option)s, expected option %(expected)s'),
LOG.error('Unexpected results in response for domain '
'config - %(count)s responses, first option is '
'%(option)s, expected option %(expected)s',
{'count': len(the_list), 'option': list[0]['option'],
'expected': req_option})
raise exception.UnexpectedError(
@ -1334,14 +1334,14 @@ class DomainConfigManager(manager.Manager):
each_whitelisted['value'] = (
each_whitelisted['value'] % sensitive_dict)
except KeyError:
warning_msg = _LW(
warning_msg = (
'Found what looks like an unmatched config option '
'substitution reference - domain: %(domain)s, group: '
'%(group)s, option: %(option)s, value: %(value)s. Perhaps '
'the config option to which it refers has yet to be '
'added?')
except (ValueError, TypeError):
warning_msg = _LW(
warning_msg = (
'Found what looks like an incorrectly constructed '
'config option substitution reference - domain: '
'%(domain)s, group: %(group)s, option: %(option)s, '

View File

@ -17,7 +17,6 @@ from oslo_log import log
from keystone.common import dependency
from keystone.common import sql
import keystone.conf
from keystone.i18n import _LW
from keystone.server import backends
@ -38,9 +37,9 @@ def configure(version=None, config_files=None,
keystone.conf.setup_logging()
if CONF.insecure_debug:
LOG.warning(_LW(
LOG.warning(
'insecure_debug is enabled so responses may include sensitive '
'information.'))
'information.')
def setup_backends(load_extra_backends_fn=lambda: {},

View File

@ -146,19 +146,13 @@ class CheckForAssertingNoneEquality(BaseASTChecker):
super(CheckForAssertingNoneEquality, self).generic_visit(node)
class CheckForLoggingIssues(BaseASTChecker):
class CheckForTranslationIssues(BaseASTChecker):
DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
LOGGING_CHECK_DESC = 'K005 Using translated string in logging'
USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn'
LOG_MODULES = ('logging', 'oslo_log.log')
I18N_MODULES = (
'keystone.i18n._',
'keystone.i18n._LI',
'keystone.i18n._LW',
'keystone.i18n._LE',
'keystone.i18n._LC',
)
TRANS_HELPER_MAP = {
'debug': None,
@ -170,7 +164,7 @@ class CheckForLoggingIssues(BaseASTChecker):
}
def __init__(self, tree, filename):
super(CheckForLoggingIssues, self).__init__(tree, filename)
super(CheckForTranslationIssues, self).__init__(tree, filename)
self.logger_names = []
self.logger_module_names = []
@ -202,13 +196,13 @@ class CheckForLoggingIssues(BaseASTChecker):
def visit_Import(self, node):
for alias in node.names:
self._filter_imports(alias.name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
full_name = '%s.%s' % (node.module, alias.name)
self._filter_imports(full_name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
@ -249,7 +243,7 @@ class CheckForLoggingIssues(BaseASTChecker):
if (len(node.targets) != 1
or not isinstance(node.targets[0], attr_node_types)):
# say no to: "x, y = ..."
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
target_name = self._find_name(node.targets[0])
@ -265,19 +259,19 @@ class CheckForLoggingIssues(BaseASTChecker):
if not isinstance(node.value, ast.Call):
# node.value must be a call to getLogger
self.assignments.pop(target_name, None)
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
# is this a call to an i18n function?
if (isinstance(node.value.func, ast.Name)
and node.value.func.id in self.i18n_names):
self.assignments[target_name] = node.value.func.id
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
if (not isinstance(node.value.func, ast.Attribute)
or not isinstance(node.value.func.value, attr_node_types)):
# function must be an attribute on an object like
# logging.getLogger
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
object_name = self._find_name(node.value.func.value)
func_name = node.value.func.attr
@ -286,7 +280,7 @@ class CheckForLoggingIssues(BaseASTChecker):
and func_name == 'getLogger'):
self.logger_names.append(target_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
@ -299,7 +293,8 @@ class CheckForLoggingIssues(BaseASTChecker):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckForLoggingIssues, self).generic_visit(node)
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
# if dealing with a logger the method can't be "warn"
if obj_name in self.logger_names and method_name == 'warn':
@ -309,81 +304,32 @@ class CheckForLoggingIssues(BaseASTChecker):
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names
or method_name not in self.TRANS_HELPER_MAP):
return super(CheckForLoggingIssues, self).generic_visit(node)
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
# the call must have arguments
if not node.args:
return super(CheckForLoggingIssues, self).generic_visit(node)
return (super(CheckForTranslationIssues, self)
.generic_visit(node))
if method_name == 'debug':
self._process_debug(node)
elif method_name in self.TRANS_HELPER_MAP:
self._process_non_debug(node, method_name)
self._process_log_messages(node)
return super(CheckForLoggingIssues, self).generic_visit(node)
return super(CheckForTranslationIssues, self).generic_visit(node)
def _process_debug(self, node):
def _process_log_messages(self, node):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if (isinstance(msg, ast.Call)
and isinstance(msg.func, ast.Name)
and msg.func.id in self.i18n_names):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
self.add_error(msg, message=self.LOGGING_CHECK_DESC)
# if the first arg is a reference to a i18n call
elif (isinstance(msg, ast.Name)
and msg.id in self.assignments
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
def _process_non_debug(self, node, method_name):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if isinstance(msg, ast.Call):
try:
func_name = msg.func.id
except AttributeError:
# in the case of logging only an exception, the msg function
# will not have an id associated with it, for instance:
# LOG.warning(six.text_type(e))
return
# the function name is the correct translation helper
# for the logging method
if func_name == self.TRANS_HELPER_MAP[method_name]:
return
# the function name is an alias for the correct translation
# helper for the loggine method
if (self.i18n_names[func_name] ==
self.TRANS_HELPER_MAP[method_name]):
return
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
# if the first arg is not a reference to the correct i18n hint
elif isinstance(msg, ast.Name):
# FIXME(dstanek): to make sure more robust we should be checking
# all names passed into a logging method. we can't right now
# because:
# 1. We have code like this that we'll fix when dealing with the %:
# msg = _('....') % {}
# LOG.warning(msg)
# 2. We also do LOG.exception(e) in several places. I'm not sure
# exactly what we should be doing about that.
if msg.id not in self.assignments:
return
helper_method_name = self.TRANS_HELPER_MAP[method_name]
if (self.assignments[msg.id] != helper_method_name
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
elif (self.assignments[msg.id] == helper_method_name
and self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
self.add_error(msg, message=self.LOGGING_CHECK_DESC)
def _is_raised_later(self, node, name):
@ -454,5 +400,5 @@ def factory(register):
register(CheckForMutableDefaultArgs)
register(block_comments_begin_with_a_space)
register(CheckForAssertingNoneEquality)
register(CheckForLoggingIssues)
register(CheckForTranslationIssues)
register(dict_constructor_with_sequence_copy)

View File

@ -137,18 +137,20 @@ class HackingCode(fixtures.Fixture):
]}
class HackingLogging(fixtures.Fixture):
class HackingTranslations(fixtures.Fixture):
"""Fixtures for checking translation rules.
1. Exception messages should be translated
2. Logging messages should not be translated
3. If a message is used for both an exception and logging it
should be translated
"""
shared_imports = """
import logging
import logging as stlib_logging
from keystone.i18n import _
from keystone.i18n import _ as oslo_i18n
from keystone.i18n import _LC
from keystone.i18n import _LE
from keystone.i18n import _LE as error_hint
from keystone.i18n import _LI
from keystone.i18n import _LW
from oslo_log import log
from oslo_log import log as oslo_logging
"""
@ -162,11 +164,10 @@ class HackingLogging(fixtures.Fixture):
class C:
def __init__(self):
LOG.warning(oslo_i18n('text', {}))
LOG.warning(_LW('text', {}))
""",
'expected_errors': [
(3, 9, 'K006'),
(6, 20, 'K006'),
(3, 9, 'K005'),
(6, 20, 'K005'),
],
},
{
@ -179,12 +180,9 @@ class HackingLogging(fixtures.Fixture):
self.L.warning(
_('text'), {}
)
self.L.warning(
_LW('text'), {}
)
""",
'expected_errors': [
(7, 12, 'K006'),
(7, 12, 'K005'),
],
},
{
@ -192,10 +190,9 @@ class HackingLogging(fixtures.Fixture):
# oslo logging and specifying a logger
L = log.getLogger(__name__)
L.error(oslo_i18n('text'))
L.error(error_hint('text'))
""",
'expected_errors': [
(3, 8, 'K006'),
(3, 8, 'K005'),
],
},
{
@ -205,10 +202,9 @@ class HackingLogging(fixtures.Fixture):
def __init__(self):
self.LOG = oslo_logging.getLogger()
self.LOG.critical(_('text'))
self.LOG.critical(_LC('text'))
""",
'expected_errors': [
(5, 26, 'K006'),
(5, 26, 'K005'),
],
},
{
@ -217,26 +213,9 @@ class HackingLogging(fixtures.Fixture):
# translation on a separate line
msg = _('text')
LOG.exception(msg)
msg = _LE('text')
LOG.exception(msg)
""",
'expected_errors': [
(4, 14, 'K006'),
],
},
{
'code': """
LOG = logging.getLogger()
# ensure the correct helper is being used
LOG.warning(_LI('this should cause an error'))
# debug should not allow any helpers either
LOG.debug(_LI('this should cause an error'))
""",
'expected_errors': [
(4, 12, 'K006'),
(7, 10, 'K005'),
(4, 14, 'K005'),
],
},
{
@ -269,7 +248,7 @@ class HackingLogging(fixtures.Fixture):
raise Exception('some other message')
""",
'expected_errors': [
(4, 16, 'K006'),
(4, 16, 'K005'),
],
},
{
@ -295,18 +274,7 @@ class HackingLogging(fixtures.Fixture):
LOG.warning(msg)
""",
'expected_errors': [
(6, 12, 'K006'),
],
},
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('text')
LOG.warning(msg)
raise Exception(msg)
""",
'expected_errors': [
(3, 12, 'K007'),
(6, 12, 'K005'),
],
},
{
@ -322,22 +290,13 @@ class HackingLogging(fixtures.Fixture):
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('hello %s') % 'world'
msg = _('hello %s') % 'world'
LOG.warning(msg)
raise Exception(msg)
""",
'expected_errors': [
(3, 12, 'K007'),
(3, 12, 'K005'),
],
},
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('hello %s') % 'world'
LOG.warning(msg)
""",
'expected_errors': [],
},
{
'code': """
# this should not be an error
@ -363,92 +322,4 @@ class HackingLogging(fixtures.Fixture):
""",
'expected_errors': [],
},
{
'code': """
# this should error since we are using _LW instead of _
LOG = log.getLogger(__name__)
try:
pass
except AssertionError as e:
msg = _LW('some message')
LOG.warning(msg)
raise exception.Unauthorized(message=msg)
""",
'expected_errors': [
(7, 16, 'K007'),
],
},
]
assert_not_using_deprecated_warn = {
'code': """
# Logger.warn has been deprecated in Python3 in favor of
# Logger.warning
LOG = log.getLogger(__name__)
LOG.warn(_LW('text'))
""",
'expected_errors': [
(4, 9, 'K009'),
],
}
assert_no_translations_for_debug_logging = {
'code': """
# stdlib logging
L0 = logging.getLogger()
L0.debug(_('text'))
class C:
def __init__(self):
L0.debug(oslo_i18n('text', {}))
# stdlib logging w/ alias and specifying a logger
class C:
def __init__(self):
self.L1 = logging.getLogger(__name__)
def m(self):
self.L1.debug(
_('text'), {}
)
# oslo logging and specifying a logger
L2 = logging.getLogger(__name__)
L2.debug(oslo_i18n('text'))
# oslo logging w/ alias
class C:
def __init__(self):
self.L3 = oslo_logging.getLogger()
self.L3.debug(_('text'))
# translation on a separate line
msg = _('text')
L2.debug(msg)
# this should not fail
if True:
msg = _('message %s') % X
L2.error(msg)
raise TypeError(msg)
if True:
msg = 'message'
L2.debug(msg)
# this should not fail
if True:
if True:
msg = _('message')
else:
msg = _('message')
L2.debug(msg)
raise Exception(msg)
""",
'expected_errors': [
(3, 9, 'K005'),
(6, 17, 'K005'),
(14, 12, 'K005'),
(19, 9, 'K005'),
(25, 22, 'K005'),
(29, 9, 'K005'),
]
}

View File

@ -23,7 +23,6 @@ import six
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.i18n import _LE
from keystone.tests import unit
@ -293,17 +292,17 @@ class TestSecurityErrorTranslation(unit.BaseTestCase):
setattr, exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True)
class CustomSecurityError(exception.SecurityError):
message_format = _LE('We had a failure in the %(place)r')
message_format = 'We had a failure in the %(place)r'
class CustomError(exception.Error):
message_format = _LE('We had a failure in the %(place)r')
message_format = 'We had a failure in the %(place)r'
def test_nested_translation_of_SecurityErrors(self):
e = self.CustomSecurityError(place='code')
_LE('Admiral found this in the log: %s') % e
('Admiral found this in the log: %s') % e
self.assertNotIn('programmer error', self.warning_log.output)
def test_that_regular_Errors_can_be_deep_copied(self):
e = self.CustomError(place='code')
_LE('Admiral found this in the log: %s') % e
('Admiral found this in the log: %s') % e
self.assertNotIn('programmer error', self.warning_log.output)

View File

@ -91,16 +91,15 @@ class TestAssertingNoneEquality(BaseStyleCheck):
self.assert_has_errors(code, expected_errors=errors)
class BaseLoggingCheck(BaseStyleCheck):
class TestTranslationChecks(BaseStyleCheck):
def get_checker(self):
return checks.CheckForLoggingIssues
return checks.CheckForTranslationIssues
def get_fixture(self):
return hacking_fixtures.HackingLogging()
return hacking_fixtures.HackingTranslations()
def assert_has_errors(self, code, expected_errors=None):
# pull out the parts of the error that we'll match against
actual_errors = (e[:3] for e in self.run_check(code))
# adjust line numbers to make the fixture data more readable.
@ -109,27 +108,6 @@ class BaseLoggingCheck(BaseStyleCheck):
for e in actual_errors]
self.assertEqual(expected_errors or [], actual_errors)
class TestCheckForDebugLoggingIssues(BaseLoggingCheck):
def test_for_translations(self):
fixture = self.code_ex.assert_no_translations_for_debug_logging
code = self.code_ex.shared_imports + fixture['code']
errors = fixture['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestLoggingWithWarn(BaseLoggingCheck):
def test(self):
data = self.code_ex.assert_not_using_deprecated_warn
code = self.code_ex.shared_imports + data['code']
errors = data['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestCheckForNonDebugLoggingIssues(BaseLoggingCheck):
def test_for_translations(self):
for example in self.code_ex.examples:
code = self.code_ex.shared_imports + example['code']

View File

@ -22,7 +22,6 @@ from oslo_utils import timeutils
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.i18n import _LI
from keystone import token
from keystone.token.providers import common
@ -290,4 +289,4 @@ class Token(token.persistence.TokenDriverBase):
LOG.debug('Removed %d total expired tokens', total_removed)
session.flush()
LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
LOG.info('Total expired tokens removed: %d', total_removed)

View File

@ -25,7 +25,6 @@ from keystone.common import dependency
from keystone.common import manager
import keystone.conf
from keystone import exception
from keystone.i18n import _LW
CONF = keystone.conf.CONF
@ -192,9 +191,9 @@ class Manager(object):
raise AttributeError()
f = getattr(self.token_provider_api._persistence, item)
LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
'utilizing methods on `token_provider_api` and may be '
'removed in Kilo.'), item)
LOG.warning('`token_api.%s` is deprecated as of Juno in favor of '
'utilizing methods on `token_provider_api` and may be '
'removed in Kilo.', item)
setattr(self, item, f)
return f

View File

@ -26,7 +26,7 @@ from keystone.common import dependency
from keystone.common import manager
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LE
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
from keystone.token import persistence
@ -201,8 +201,8 @@ class Manager(manager.Manager):
expiry += datetime.timedelta(seconds=window_seconds)
except Exception:
LOG.exception(_LE('Unexpected error or malformed token '
'determining token expiry: %s'), token)
LOG.exception('Unexpected error or malformed token '
'determining token expiry: %s', token)
raise exception.TokenNotFound(_('Failed to validate token'))
if current_time < expiry:

View File

@ -27,7 +27,7 @@ from keystone.common import fernet_utils as utils
from keystone.common import utils as ks_utils
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LI
from keystone.i18n import _
CONF = keystone.conf.CONF
@ -163,8 +163,8 @@ class TokenFormatter(object):
# anywhere, we can't say it isn't being stored somewhere else with
# those kind of backend constraints.
if len(token) > 255:
LOG.info(_LI('Fernet token created with length of %d '
'characters, which exceeds 255 characters'),
LOG.info('Fernet token created with length of %d '
'characters, which exceeds 255 characters',
len(token))
return token

View File

@ -27,7 +27,6 @@ import keystone.conf
from keystone.credential import routers as credential_routers
from keystone.endpoint_policy import routers as endpoint_policy_routers
from keystone.federation import routers as federation_routers
from keystone.i18n import _LW
from keystone.identity import routers as identity_routers
from keystone.oauth1 import routers as oauth1_routers
from keystone.policy import routers as policy_routers
@ -74,8 +73,7 @@ def warn_local_conf(f):
@functools.wraps(f)
def wrapper(*args, **local_conf):
if local_conf:
LOG.warning(_LW('\'local conf\' from PasteDeploy INI is being '
'ignored.'))
LOG.warning("'local conf' from PasteDeploy INI is being ignored.")
return f(*args, **local_conf)
return wrapper