Convert files to use _LE and friends
LOG.warn etc. should be translated separately and thus messages need to be marked with _LW for LOG.warn, _LI for LOG.info and _LE for LOG.errors and LOG.exception. Mark all LOG invocations with proper translation marker. Use ',' instead of '%' when adding variables to log messages to allow lazy evaluation. Add new hacking checks for these. Change-Id: I31d3ee50f30c63d7d647b1c2b1eae50bf96f0c74
This commit is contained in:
parent
6c84cf095c
commit
15641fba88
33
HACKING.rst
33
HACKING.rst
@ -12,9 +12,40 @@ Manila Specific Commandments
|
||||
- [M319] Validate that debug level logs are not translated.
|
||||
- [M323] Ensure that the _() function is explicitly imported to ensure proper translations.
|
||||
- [M325] str() cannot be used on an exception. Remove use or use six.text_type()
|
||||
- [M326] Translated messages cannot be concatenated. String should be included in translated message.
|
||||
- [M326] Translated messages cannot be concatenated. String should be
|
||||
included in translated message.
|
||||
- [M328] LOG.critical messages require translations _LC()!
|
||||
- [M328] LOG.error and LOG.exception messages require translations _LE()!
|
||||
- [M329] LOG.info messages require translations _LI()!
|
||||
- [M330] LOG.warning messages require translations _LW()!
|
||||
- [M331] Log messages require translations!
|
||||
|
||||
|
||||
LOG Translations
|
||||
----------------
|
||||
|
||||
LOG.debug messages will not get translated. Use ``_LI()`` for
|
||||
``LOG.info``, ``_LW`` for ``LOG.warning``, ``_LE`` for ``LOG.error``
|
||||
and ``LOG.exception``, and ``_LC()`` for ``LOG.critical``.
|
||||
|
||||
``_()`` is preferred for any user facing message, even if it is also
|
||||
going to a log file. This ensures that the translated version of the
|
||||
message will be available to the user.
|
||||
|
||||
The log marker functions (``_LI()``, ``_LW()``, ``_LE()``, and ``_LC()``)
|
||||
must only be used when the message is only sent directly to the log.
|
||||
Anytime that the message will be passed outside of the current context
|
||||
(for example as part of an exception) the ``_()`` marker function
|
||||
must be used.
|
||||
|
||||
A common pattern is to define a single message object and use it more
|
||||
than once, for the log call and the exception. In that case, ``_()``
|
||||
must be used because the message is going to appear in an exception that
|
||||
may be presented to the user.
|
||||
|
||||
For more details about translations, see
|
||||
http://docs.openstack.org/developer/oslo.i18n/guidelines.html
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
For every new feature, unit tests should be created that both test and
|
||||
|
@ -43,7 +43,7 @@ from manila import i18n
|
||||
i18n.enable_lazy()
|
||||
|
||||
from manila.common import config # Need to register global_opts # noqa
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import service
|
||||
from manila import utils
|
||||
@ -63,12 +63,12 @@ if __name__ == '__main__':
|
||||
try:
|
||||
servers.append(service.WSGIService('osapi_share'))
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_('Failed to load osapi_share'))
|
||||
LOG.exception(_LE('Failed to load osapi_share'))
|
||||
|
||||
for binary in ['manila-share', 'manila-scheduler', 'manila-api']:
|
||||
try:
|
||||
servers.append(service.Service.create(binary=binary))
|
||||
except (Exception, SystemExit):
|
||||
LOG.exception(_('Failed to load %s'), binary)
|
||||
LOG.exception(_LE('Failed to load %s'), binary)
|
||||
service.serve(*servers)
|
||||
service.wait()
|
||||
|
@ -15,7 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from manila.api.middleware import auth
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class ManilaKeystoneContext(auth.ManilaKeystoneContext):
|
||||
def __init__(self, application):
|
||||
LOG.warn(_('manila.api.auth:ManilaKeystoneContext is deprecated. '
|
||||
LOG.warn(_LW('manila.api.auth:ManilaKeystoneContext is deprecated. '
|
||||
'Please use '
|
||||
'manila.api.middleware.auth:ManilaKeystoneContext '
|
||||
'instead.'))
|
||||
@ -32,6 +32,6 @@ class ManilaKeystoneContext(auth.ManilaKeystoneContext):
|
||||
|
||||
|
||||
def pipeline_factory(loader, global_conf, **local_conf):
|
||||
LOG.warn(_('manila.api.auth:pipeline_factory is deprecated. Please use '
|
||||
LOG.warn(_LW('manila.api.auth:pipeline_factory is deprecated. Please use '
|
||||
'manila.api.middleware.auth:pipeline_factory instead.'))
|
||||
auth.pipeline_factory(loader, global_conf, **local_conf)
|
||||
|
@ -26,7 +26,9 @@ import manila.api.openstack
|
||||
from manila.api.openstack import wsgi
|
||||
from manila.api import xmlutil
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
import manila.policy
|
||||
|
||||
@ -180,7 +182,7 @@ class ExtensionManager(object):
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
LOG.info(_('Initializing extension manager.'))
|
||||
LOG.info(_LI('Initializing extension manager.'))
|
||||
|
||||
self.cls_list = CONF.osapi_share_extension
|
||||
|
||||
@ -196,7 +198,7 @@ class ExtensionManager(object):
|
||||
return
|
||||
|
||||
alias = ext.alias
|
||||
LOG.info(_('Loaded extension: %s'), alias)
|
||||
LOG.info(_LI('Loaded extension: %s'), alias)
|
||||
|
||||
if alias in self.extensions:
|
||||
raise exception.Error("Found duplicate extension: %s" % alias)
|
||||
@ -241,7 +243,7 @@ class ExtensionManager(object):
|
||||
LOG.debug('Ext namespace: %s', extension.namespace)
|
||||
LOG.debug('Ext updated: %s', extension.updated)
|
||||
except AttributeError as ex:
|
||||
LOG.exception(_("Exception loading extension: %s"),
|
||||
LOG.exception(_LE("Exception loading extension: %s"),
|
||||
six.text_type(ex))
|
||||
return False
|
||||
|
||||
@ -276,9 +278,10 @@ class ExtensionManager(object):
|
||||
'standard_extensions')
|
||||
new_contrib_path = 'manila.api.contrib.standard_extensions'
|
||||
if old_contrib_path in extensions:
|
||||
LOG.warn(_('osapi_share_extension is set to deprecated path: %s'),
|
||||
LOG.warn(_LW('osapi_share_extension is set to deprecated path: '
|
||||
'%s'),
|
||||
old_contrib_path)
|
||||
LOG.warn(_('Please set your flag or manila.conf settings for '
|
||||
LOG.warn(_LW('Please set your flag or manila.conf settings for '
|
||||
'osapi_share_extension to: %s'), new_contrib_path)
|
||||
extensions = [e.replace(old_contrib_path, new_contrib_path)
|
||||
for e in extensions]
|
||||
@ -287,7 +290,7 @@ class ExtensionManager(object):
|
||||
try:
|
||||
self.load_extension(ext_factory)
|
||||
except Exception as exc:
|
||||
LOG.warn(_('Failed to load extension %(ext_factory)s: '
|
||||
LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
|
||||
'%(exc)s'),
|
||||
{"ext_factory": ext_factory, "exc": exc})
|
||||
|
||||
@ -356,9 +359,9 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
|
||||
try:
|
||||
ext_mgr.load_extension(classpath)
|
||||
except Exception as exc:
|
||||
logger.warn(_('Failed to load extension %(classpath)s: '
|
||||
'%(exc)s')
|
||||
% {"classpath": classpath, "exc": exc})
|
||||
logger.warn(_LW('Failed to load extension %(classpath)s: '
|
||||
'%(exc)s'),
|
||||
{"classpath": classpath, "exc": exc})
|
||||
|
||||
# Now, let's consider any subdirectories we may have...
|
||||
subdirs = []
|
||||
@ -381,8 +384,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
|
||||
try:
|
||||
ext(ext_mgr)
|
||||
except Exception as exc:
|
||||
logger.warn(_('Failed to load extension %(ext_name)s: '
|
||||
'%(exc)s') %
|
||||
logger.warn(_LW('Failed to load extension %(ext_name)s: '
|
||||
'%(exc)s'),
|
||||
{"ext_name": ext_name, "exc": exc})
|
||||
|
||||
# Update the list of directories we'll explore...
|
||||
|
@ -19,7 +19,8 @@ import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from manila.api.openstack import wsgi
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila import wsgi as base_wsgi
|
||||
@ -42,7 +43,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
||||
status, webob.exc.HTTPInternalServerError)()
|
||||
|
||||
def _error(self, inner, req):
|
||||
LOG.exception(_("Caught error: %s"), six.text_type(inner))
|
||||
LOG.exception(_LE("Caught error: %s"), six.text_type(inner))
|
||||
|
||||
safe = getattr(inner, 'safe', False)
|
||||
headers = getattr(inner, 'headers', None)
|
||||
@ -51,7 +52,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
||||
status = 500
|
||||
|
||||
msg_dict = dict(url=req.url, status=status)
|
||||
LOG.info(_("%(url)s returned with HTTP %(status)d"), msg_dict)
|
||||
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
|
||||
outer = self.status_to_type(status)
|
||||
if headers:
|
||||
outer.headers = headers
|
||||
|
@ -23,6 +23,7 @@ import routes
|
||||
from manila.api.openstack import wsgi
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila import utils
|
||||
from manila import wsgi as base_wsgi
|
||||
|
||||
@ -107,7 +108,7 @@ class APIRouter(base_wsgi.Router):
|
||||
controller = extension.controller
|
||||
|
||||
if collection not in self.resources:
|
||||
LOG.warning(_('Extension %(ext_name)s: Cannot extend '
|
||||
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
|
||||
'resource %(collection)s: No such resource'),
|
||||
{'ext_name': ext_name, 'collection': collection})
|
||||
continue
|
||||
@ -126,7 +127,7 @@ class APIRouter(base_wsgi.Router):
|
||||
|
||||
class FaultWrapper(base_wsgi.Middleware):
|
||||
def __init__(self, application):
|
||||
LOG.warn(_('manila.api.openstack:FaultWrapper is deprecated. Please '
|
||||
LOG.warn(_LW('manila.api.openstack:FaultWrapper is deprecated. Please '
|
||||
'use manila.api.middleware.fault:FaultWrapper instead.'))
|
||||
# Avoid circular imports from here.
|
||||
from manila.api.middleware import fault
|
||||
|
@ -16,13 +16,13 @@
|
||||
|
||||
from manila.api import urlmap
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def urlmap_factory(loader, global_conf, **local_conf):
|
||||
LOG.warn(_('manila.api.openstack.urlmap:urlmap_factory is deprecated. '
|
||||
LOG.warn(_LW('manila.api.openstack.urlmap:urlmap_factory is deprecated. '
|
||||
'Please use manila.api.urlmap:urlmap_factory instead.'))
|
||||
urlmap.urlmap_factory(loader, global_conf, **local_conf)
|
||||
|
@ -15,7 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from manila.api.v1.router import APIRouter as v1_router
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -23,6 +23,6 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class APIRouter(v1_router):
|
||||
def __init__(self, ext_mgr=None):
|
||||
LOG.warn(_('manila.api.openstack.volume:APIRouter is deprecated. '
|
||||
LOG.warn(_LW('manila.api.openstack.volume:APIRouter is deprecated. '
|
||||
'Please use manila.api.v1.router:APIRouter instead.'))
|
||||
super(APIRouter, self).__init__(ext_mgr)
|
||||
|
@ -22,6 +22,8 @@ import webob
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila import wsgi
|
||||
@ -578,14 +580,14 @@ class ResourceExceptionHandler(object):
|
||||
code=ex_value.code, explanation=six.text_type(ex_value)))
|
||||
elif isinstance(ex_value, TypeError):
|
||||
exc_info = (ex_type, ex_value, ex_traceback)
|
||||
LOG.error(_('Exception handling resource: %s'),
|
||||
LOG.error(_LE('Exception handling resource: %s'),
|
||||
ex_value, exc_info=exc_info)
|
||||
raise Fault(webob.exc.HTTPBadRequest())
|
||||
elif isinstance(ex_value, Fault):
|
||||
LOG.info(_("Fault thrown: %s"), six.text_type(ex_value))
|
||||
LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
|
||||
raise ex_value
|
||||
elif isinstance(ex_value, webob.exc.HTTPException):
|
||||
LOG.info(_("HTTP exception thrown: %s"), six.text_type(ex_value))
|
||||
LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
|
||||
raise Fault(ex_value)
|
||||
|
||||
# We didn't handle the exception
|
||||
|
@ -15,7 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from manila.api.middleware import sizelimit
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.warn(_('manila.api.sizelimit:RequestBodySizeLimiter is '
|
||||
'deprecated. Please use manila.api.middleware.sizelimit:'
|
||||
LOG.warn(_LW('manila.api.sizelimit:RequestBodySizeLimiter is '
|
||||
'deprecated. Please use manila.api.middleware.sizelimit: '
|
||||
'RequestBodySizeLimiter instead'))
|
||||
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
|
||||
|
@ -27,6 +27,7 @@ from manila.common import constants
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import policy
|
||||
|
||||
@ -81,7 +82,7 @@ class SecurityServiceController(wsgi.Controller):
|
||||
"""Delete a security service."""
|
||||
context = req.environ['manila.context']
|
||||
|
||||
LOG.info(_("Delete security service with id: %s"),
|
||||
LOG.info(_LI("Delete security service with id: %s"),
|
||||
id, context=context)
|
||||
|
||||
try:
|
||||
|
@ -28,6 +28,8 @@ from manila.api import xmlutil
|
||||
from manila.db import api as db_api
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import policy
|
||||
from manila import quota
|
||||
@ -117,8 +119,8 @@ class ShareNetworkController(wsgi.Controller):
|
||||
context, project_id=share_network['project_id'],
|
||||
share_networks=-1)
|
||||
except Exception:
|
||||
msg = _("Failed to update usages deleting share-network.")
|
||||
LOG.exception(msg)
|
||||
LOG.exception(_LE("Failed to update usages deleting "
|
||||
"share-network."))
|
||||
else:
|
||||
QUOTAS.commit(context, reservations,
|
||||
project_id=share_network['project_id'])
|
||||
@ -264,10 +266,10 @@ class ShareNetworkController(wsgi.Controller):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'share_networks' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
LOG.warn(_LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"share-network (%(d_consumed)d of %(d_quota)d "
|
||||
"already consumed)")
|
||||
LOG.warn(msg, {'s_pid': context.project_id,
|
||||
"already consumed)"), {
|
||||
's_pid': context.project_id,
|
||||
'd_consumed': _consumed('share_networks'),
|
||||
'd_quota': quotas['share_networks']})
|
||||
raise exception.ShareNetworksLimitExceeded(
|
||||
|
@ -24,7 +24,7 @@ from manila.api.openstack import wsgi
|
||||
from manila.api.views import share_snapshots as snapshot_views
|
||||
from manila.api import xmlutil
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import share
|
||||
|
||||
@ -82,7 +82,7 @@ class ShareSnapshotsController(wsgi.Controller):
|
||||
"""Delete a snapshot."""
|
||||
context = req.environ['manila.context']
|
||||
|
||||
LOG.info(_("Delete snapshot with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
|
||||
|
||||
try:
|
||||
snapshot = self.share_api.get_snapshot(context, id)
|
||||
@ -181,8 +181,8 @@ class ShareSnapshotsController(wsgi.Controller):
|
||||
|
||||
share_id = snapshot['share_id']
|
||||
share = self.share_api.get(context, share_id)
|
||||
msg = _("Create snapshot from share %s")
|
||||
LOG.info(msg, share_id, context=context)
|
||||
LOG.info(_LI("Create snapshot from share %s"),
|
||||
share_id, context=context)
|
||||
|
||||
# NOTE(rushiagr): v2 API allows name instead of display_name
|
||||
if 'name' in snapshot:
|
||||
|
@ -27,6 +27,7 @@ from manila.api.views import shares as share_views
|
||||
from manila.api import xmlutil
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import uuidutils
|
||||
from manila import share
|
||||
@ -84,7 +85,7 @@ class ShareController(wsgi.Controller):
|
||||
"""Delete a share."""
|
||||
context = req.environ['manila.context']
|
||||
|
||||
LOG.info(_("Delete share with id: %s"), id, context=context)
|
||||
LOG.info(_LI("Delete share with id: %s"), id, context=context)
|
||||
|
||||
try:
|
||||
share = self.share_api.get(context, id)
|
||||
@ -212,7 +213,7 @@ class ShareController(wsgi.Controller):
|
||||
size = share['size']
|
||||
share_proto = share['share_proto'].upper()
|
||||
|
||||
msg = (_("Create %(share_proto)s share of %(size)s GB") %
|
||||
msg = (_LI("Create %(share_proto)s share of %(size)s GB") %
|
||||
{'share_proto': share_proto, 'size': size})
|
||||
LOG.info(msg, context=context)
|
||||
|
||||
|
@ -23,6 +23,7 @@ from oslo.utils import timeutils
|
||||
import six
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import context as common_context
|
||||
from manila.openstack.common import local
|
||||
from manila.openstack.common import log as logging
|
||||
@ -56,7 +57,7 @@ class RequestContext(object):
|
||||
because they possibly came in from older rpc messages.
|
||||
"""
|
||||
if kwargs:
|
||||
LOG.warn(_('Arguments dropped when creating context: %s'),
|
||||
LOG.warn(_LW('Arguments dropped when creating context: %s'),
|
||||
str(kwargs))
|
||||
|
||||
self.user_id = user_id
|
||||
|
@ -28,7 +28,7 @@ from alembic import op
|
||||
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
|
||||
from sqlalchemy import Integer, MetaData, String, Table, UniqueConstraint
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
|
||||
@ -400,7 +400,7 @@ def upgrade():
|
||||
table.create()
|
||||
except Exception:
|
||||
LOG.info(repr(table))
|
||||
LOG.exception(_('Exception while creating table.'))
|
||||
LOG.exception(_LE('Exception while creating table.'))
|
||||
raise
|
||||
|
||||
if migrate_engine.name == "mysql":
|
||||
|
@ -37,6 +37,8 @@ from manila.common import constants
|
||||
from manila.db.sqlalchemy import models
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
|
||||
@ -945,7 +947,7 @@ def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
|
||||
session.add(usage_ref)
|
||||
|
||||
if unders:
|
||||
LOG.warning(_("Change will make usage less than 0 for the following "
|
||||
LOG.warning(_LW("Change will make usage less than 0 for the following "
|
||||
"resources: %s"), unders)
|
||||
if overs:
|
||||
if project_quotas == user_quotas:
|
||||
@ -2115,8 +2117,8 @@ def volume_type_destroy(context, id):
|
||||
results = model_query(context, models.Share, session=session). \
|
||||
filter_by(volume_type_id=id).all()
|
||||
if results:
|
||||
msg = _('VolumeType %s deletion failed, VolumeType in use.') % id
|
||||
LOG.error(msg)
|
||||
LOG.error(_LE('VolumeType %s deletion failed, VolumeType in use.'),
|
||||
id)
|
||||
raise exception.VolumeTypeInUse(volume_type_id=id)
|
||||
model_query(context, models.VolumeTypeExtraSpecs, session=session).\
|
||||
filter_by(volume_type_id=id).update(
|
||||
|
@ -27,6 +27,7 @@ import six
|
||||
import webob.exc
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import processutils
|
||||
|
||||
@ -89,9 +90,10 @@ class ManilaException(Exception):
|
||||
except Exception as e:
|
||||
# kwargs doesn't match a variable in the message
|
||||
# log the issue and the kwargs
|
||||
LOG.exception(_('Exception in string format operation.'))
|
||||
LOG.exception(_LE('Exception in string format operation.'))
|
||||
for name, value in six.iteritems(kwargs):
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
LOG.error(_LE("%(name)s: %(value)s"), {
|
||||
'name': name, 'value': value})
|
||||
if CONF.fatal_exception_format_errors:
|
||||
raise e
|
||||
else:
|
||||
|
@ -16,6 +16,8 @@
|
||||
import ast
|
||||
import re
|
||||
|
||||
import pep8
|
||||
|
||||
|
||||
"""
|
||||
Guidelines for writing new hacking checks
|
||||
@ -33,6 +35,16 @@ Guidelines for writing new hacking checks
|
||||
|
||||
UNDERSCORE_IMPORT_FILES = []
|
||||
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
|
||||
log_translation_LC = re.compile(
|
||||
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
|
||||
log_translation_LE = re.compile(
|
||||
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
|
||||
log_translation_LI = re.compile(
|
||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
||||
log_translation_LW = re.compile(
|
||||
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
|
||||
translated_log = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
|
||||
"\(\s*_\(\s*('|\")")
|
||||
@ -104,6 +116,32 @@ def no_translate_debug_logs(logical_line, filename):
|
||||
yield(0, "M319 Don't translate debug level logs")
|
||||
|
||||
|
||||
def validate_log_translations(logical_line, physical_line, filename):
|
||||
# Translations are not required in the test and tempest
|
||||
# directories.
|
||||
if ("manila/tests" in filename or
|
||||
"contrib/tempest" in filename):
|
||||
return
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
msg = "M327: LOG.critical messages require translations `_LC()`!"
|
||||
if log_translation_LC.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = ("M328: LOG.error and LOG.exception messages require translations "
|
||||
"`_LE()`!")
|
||||
if log_translation_LE.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "M329: LOG.info messages require translations `_LI()`!"
|
||||
if log_translation_LI.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "M330: LOG.warning messages require translations `_LW()`!"
|
||||
if log_translation_LW.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "M331: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def check_explicit_underscore_import(logical_line, filename):
|
||||
"""Check for explicit import of the _ function
|
||||
|
||||
@ -184,6 +222,7 @@ class CheckForTransAdd(BaseASTChecker):
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(validate_log_translations)
|
||||
register(check_explicit_underscore_import)
|
||||
register(no_translate_debug_logs)
|
||||
register(CheckForStrExc)
|
||||
|
@ -55,7 +55,7 @@ from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from manila.db import base
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.scheduler import rpcapi as scheduler_rpcapi
|
||||
from manila import version
|
||||
@ -160,8 +160,8 @@ class Manager(base.Base):
|
||||
except Exception as e:
|
||||
if raise_on_error:
|
||||
raise
|
||||
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
|
||||
locals())
|
||||
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
|
||||
{'full_task_name': full_task_name, 'e': e})
|
||||
|
||||
def init_host(self):
|
||||
"""Handle initialization if this is a standalone service.
|
||||
|
@ -21,6 +21,8 @@ import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.network.linux import ip_lib
|
||||
from manila.network.linux import ovs_lib
|
||||
from manila.openstack.common import log as logging
|
||||
@ -154,7 +156,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
|
||||
namespace_obj.add_device_to_namespace(ns_dev)
|
||||
|
||||
else:
|
||||
LOG.warn(_("Device %s already exists"), device_name)
|
||||
LOG.warn(_LW("Device %s already exists"), device_name)
|
||||
ns_dev.link.set_up()
|
||||
|
||||
@device_name_synchronized
|
||||
@ -170,7 +172,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
|
||||
try:
|
||||
ovs.delete_port(tap_name)
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"),
|
||||
LOG.error(_LE("Failed unplugging interface '%s'"),
|
||||
device_name)
|
||||
|
||||
|
||||
@ -199,7 +201,7 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
||||
else:
|
||||
ns_veth = ip.device(device_name)
|
||||
root_veth = ip.device(tap_name)
|
||||
LOG.warn(_("Device %s already exists"), device_name)
|
||||
LOG.warn(_LW("Device %s already exists"), device_name)
|
||||
|
||||
root_veth.link.set_up()
|
||||
ns_veth.link.set_up()
|
||||
@ -212,5 +214,5 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
||||
device.link.delete()
|
||||
LOG.debug("Unplugged interface '%s'", device_name)
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"),
|
||||
LOG.error(_LE("Failed unplugging interface '%s'"),
|
||||
device_name)
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
import re
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
|
||||
@ -45,7 +45,8 @@ class OVSBridge:
|
||||
try:
|
||||
return utils.execute(*full_args, run_as_root=True)
|
||||
except Exception as e:
|
||||
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
|
||||
LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
|
||||
"%(exception)s"),
|
||||
{'cmd': full_args, 'exception': e})
|
||||
|
||||
def reset_bridge(self):
|
||||
|
@ -19,7 +19,7 @@ from oslo.config import cfg
|
||||
from manila import context
|
||||
from manila.db import base
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.network import neutron
|
||||
from manila.network.neutron import constants as neutron_constants
|
||||
from manila.openstack.common import log as logging
|
||||
@ -129,7 +129,7 @@ class API(base.Base):
|
||||
port = self.client.create_port(port_req_body).get('port', {})
|
||||
return port
|
||||
except neutron_client_exc.NeutronClientException as e:
|
||||
LOG.exception(_('Neutron error creating port on network %s') %
|
||||
LOG.exception(_LE('Neutron error creating port on network %s'),
|
||||
network_id)
|
||||
if e.status_code == 409:
|
||||
raise exception.PortLimitExceeded()
|
||||
|
@ -25,7 +25,7 @@ import six
|
||||
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -980,7 +980,8 @@ class QuotaEngine(object):
|
||||
# usage resynchronization and the reservation expiration
|
||||
# mechanisms will resolve the issue. The exception is
|
||||
# logged, however, because this is less than optimal.
|
||||
LOG.exception(_("Failed to commit reservations %s"), reservations)
|
||||
LOG.exception(_LE("Failed to commit reservations %s"),
|
||||
reservations)
|
||||
return
|
||||
LOG.debug("Committed reservations %s", reservations)
|
||||
|
||||
@ -1003,7 +1004,7 @@ class QuotaEngine(object):
|
||||
# usage resynchronization and the reservation expiration
|
||||
# mechanisms will resolve the issue. The exception is
|
||||
# logged, however, because this is less than optimal.
|
||||
LOG.exception(_("Failed to roll back reservations %s"),
|
||||
LOG.exception(_LE("Failed to roll back reservations %s"),
|
||||
reservations)
|
||||
return
|
||||
LOG.debug("Rolled back reservations %s", reservations)
|
||||
|
@ -24,6 +24,7 @@ from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.scheduler import driver
|
||||
from manila.scheduler import scheduler_options
|
||||
@ -208,13 +209,12 @@ class FilterScheduler(driver.Scheduler):
|
||||
return # no previously attempted hosts, skip
|
||||
|
||||
last_host = hosts[-1]
|
||||
msg = _("Error scheduling %(share_id)s from last share-service: "
|
||||
"%(last_host)s : %(exc)s") % {
|
||||
LOG.error(_LE("Error scheduling %(share_id)s from last share-service: "
|
||||
"%(last_host)s : %(exc)s"), {
|
||||
"share_id": share_id,
|
||||
"last_host": last_host,
|
||||
"exc": "exc"
|
||||
}
|
||||
LOG.error(msg)
|
||||
})
|
||||
|
||||
def populate_filter_properties_share(self, request_spec,
|
||||
filter_properties):
|
||||
|
@ -18,7 +18,8 @@
|
||||
|
||||
import math
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common.scheduler import filters
|
||||
|
||||
@ -35,7 +36,7 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
|
||||
if host_state.free_capacity_gb is None:
|
||||
# Fail Safe
|
||||
LOG.error(_("Free capacity not set: "
|
||||
LOG.error(_LE("Free capacity not set: "
|
||||
"volume node info collection broken."))
|
||||
return False
|
||||
|
||||
@ -49,7 +50,7 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
reserved = float(host_state.reserved_percentage) / 100
|
||||
free = math.floor(free_space * (1 - reserved))
|
||||
if free < volume_size:
|
||||
LOG.warning(_("Insufficient free space for volume creation "
|
||||
LOG.warning(_LW("Insufficient free space for volume creation "
|
||||
"(requested / avail): "
|
||||
"%(requested)s/%(available)s"),
|
||||
{'requested': volume_size,
|
||||
|
@ -25,7 +25,7 @@ import six
|
||||
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common.scheduler import filters
|
||||
from manila.openstack.common.scheduler import weights
|
||||
@ -267,7 +267,7 @@ class HostManager(object):
|
||||
share_services = db.service_get_all_by_topic(context, topic)
|
||||
for service in share_services:
|
||||
if not utils.service_is_up(service) or service['disabled']:
|
||||
LOG.warn(_("service is down or disabled."))
|
||||
LOG.warn(_LW("service is down or disabled."))
|
||||
continue
|
||||
host = service['host']
|
||||
capabilities = self.service_states.get(host, None)
|
||||
|
@ -26,7 +26,7 @@ from oslo.utils import importutils
|
||||
from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila import manager
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import rpc
|
||||
@ -90,7 +90,7 @@ class SchedulerManager(manager.Manager):
|
||||
|
||||
def _set_share_error_state_and_notify(self, method, context, ex,
|
||||
request_spec):
|
||||
LOG.warning(_("Failed to schedule_%(method)s: %(ex)s"),
|
||||
LOG.warning(_LW("Failed to schedule_%(method)s: %(ex)s"),
|
||||
{"method": method, "ex": ex})
|
||||
|
||||
share_state = {'status': 'error'}
|
||||
|
@ -27,7 +27,7 @@ from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.utils import timeutils
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
scheduler_json_config_location_opt = cfg.StrOpt(
|
||||
@ -65,7 +65,7 @@ class SchedulerOptions(object):
|
||||
try:
|
||||
return os.path.getmtime(filename)
|
||||
except os.error as e:
|
||||
LOG.exception(_("Could not stat scheduler options file "
|
||||
LOG.exception(_LE("Could not stat scheduler options file "
|
||||
"%(filename)s: '%(e)s'"),
|
||||
{"filename": filename, "e": e})
|
||||
raise
|
||||
@ -75,7 +75,7 @@ class SchedulerOptions(object):
|
||||
try:
|
||||
return jsonutils.load(handle)
|
||||
except ValueError as e:
|
||||
LOG.exception(_("Could not decode scheduler options: "
|
||||
LOG.exception(_LE("Could not decode scheduler options: "
|
||||
"'%(e)s'"), {"e": e})
|
||||
return {}
|
||||
|
||||
|
@ -34,7 +34,9 @@ from oslo.utils import importutils
|
||||
from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import loopingcall
|
||||
from manila import rpc
|
||||
@ -120,7 +122,7 @@ class Launcher(object):
|
||||
|
||||
"""
|
||||
def sigterm(sig, frame):
|
||||
LOG.info(_("SIGTERM received"))
|
||||
LOG.info(_LI("SIGTERM received"))
|
||||
# NOTE(jk0): Raise a ^C which is caught by the caller and cleanly
|
||||
# shuts down the service. This does not yet handle eventlet
|
||||
# threads.
|
||||
@ -170,7 +172,7 @@ class ProcessLauncher(object):
|
||||
# dies unexpectedly
|
||||
self.readpipe.read()
|
||||
|
||||
LOG.info(_('Parent process has died unexpectedly, exiting'))
|
||||
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
@ -209,7 +211,7 @@ class ProcessLauncher(object):
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_('Forking too fast, sleeping'))
|
||||
LOG.info(_LI('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
@ -227,19 +229,19 @@ class ProcessLauncher(object):
|
||||
except SignalExit as exc:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[exc.signo]
|
||||
LOG.info(_('Caught %s, exiting'), signame)
|
||||
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_('Unhandled exception'))
|
||||
LOG.exception(_LE('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
wrap.server.stop()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_('Started child %d'), pid)
|
||||
LOG.info(_LI('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
@ -249,7 +251,7 @@ class ProcessLauncher(object):
|
||||
def launch_server(self, server, workers=1):
|
||||
wrap = ServerWrapper(server, workers)
|
||||
self.totalwrap = self.totalwrap + 1
|
||||
LOG.info(_('Starting %d workers'), wrap.workers)
|
||||
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||
while (self.running and len(wrap.children) < wrap.workers
|
||||
and not wrap.failed):
|
||||
self._start_child(wrap)
|
||||
@ -268,13 +270,15 @@ class ProcessLauncher(object):
|
||||
code = 0
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
|
||||
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||
{'pid': pid, 'sig': sig})
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
|
||||
LOG.info(_LI('Child %(pid)d exited with status %(code)d'),
|
||||
{'pid': pid, 'code': code})
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_('pid %d not in child list'), pid)
|
||||
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
@ -282,7 +286,7 @@ class ProcessLauncher(object):
|
||||
if 2 == code:
|
||||
wrap.failed = True
|
||||
self.failedwrap = self.failedwrap + 1
|
||||
LOG.info(_('_wait_child %d'), self.failedwrap)
|
||||
LOG.info(_LI('_wait_child %d'), self.failedwrap)
|
||||
if self.failedwrap == self.totalwrap:
|
||||
self.running = False
|
||||
return wrap
|
||||
@ -298,7 +302,7 @@ class ProcessLauncher(object):
|
||||
eventlet.greenthread.sleep(.01)
|
||||
continue
|
||||
|
||||
LOG.info(_('wait wrap.failed %s'), wrap.failed)
|
||||
LOG.info(_LI('wait wrap.failed %s'), wrap.failed)
|
||||
while (self.running and len(wrap.children) < wrap.workers
|
||||
and not wrap.failed):
|
||||
self._start_child(wrap)
|
||||
@ -306,7 +310,7 @@ class ProcessLauncher(object):
|
||||
if self.sigcaught:
|
||||
signame = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}[self.sigcaught]
|
||||
LOG.info(_('Caught %s, stopping children'), signame)
|
||||
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||
|
||||
for pid in self.children:
|
||||
try:
|
||||
@ -317,7 +321,7 @@ class ProcessLauncher(object):
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_('Waiting on %d children to exit'), len(self.children))
|
||||
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
@ -352,7 +356,7 @@ class Service(object):
|
||||
|
||||
def start(self):
|
||||
version_string = version.version_string()
|
||||
LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
|
||||
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
|
||||
{'topic': self.topic, 'version_string': version_string})
|
||||
self.model_disconnected = False
|
||||
ctxt = context.get_admin_context()
|
||||
@ -449,7 +453,7 @@ class Service(object):
|
||||
try:
|
||||
db.service_destroy(context.get_admin_context(), self.service_id)
|
||||
except exception.NotFound:
|
||||
LOG.warn(_('Service killed that has no database entry'))
|
||||
LOG.warn(_LW('Service killed that has no database entry'))
|
||||
|
||||
def stop(self):
|
||||
# Try to shut the connection down, but if we get any sort of
|
||||
@ -501,13 +505,13 @@ class Service(object):
|
||||
# TODO(termie): make this pattern be more elegant.
|
||||
if getattr(self, 'model_disconnected', False):
|
||||
self.model_disconnected = False
|
||||
LOG.error(_('Recovered model server connection!'))
|
||||
LOG.error(_LE('Recovered model server connection!'))
|
||||
|
||||
# TODO(vish): this should probably only catch connection errors
|
||||
except Exception: # pylint: disable=W0702
|
||||
if not getattr(self, 'model_disconnected', False):
|
||||
self.model_disconnected = True
|
||||
LOG.exception(_('model server went away'))
|
||||
LOG.exception(_LE('model server went away'))
|
||||
|
||||
|
||||
class WSGIService(object):
|
||||
|
@ -27,6 +27,8 @@ from manila.api import extensions
|
||||
from manila.db import base
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import policy
|
||||
from manila import quota
|
||||
@ -120,19 +122,19 @@ class API(base.Base):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'gigabytes' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG share (%(d_consumed)dG of %(d_quota)dG "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
LOG.warn(_LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG share (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed)"), {
|
||||
's_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
raise exception.ShareSizeExceedsAvailableQuota()
|
||||
elif 'shares' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
LOG.warn(_LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"share (%(d_consumed)d shares "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
"already consumed)"), {
|
||||
's_pid': context.project_id,
|
||||
'd_consumed': _consumed('shares')})
|
||||
raise exception.ShareLimitExceeded(allowed=quotas['shares'])
|
||||
|
||||
@ -218,7 +220,7 @@ class API(base.Base):
|
||||
gigabytes=-share['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update quota for deleting share"))
|
||||
LOG.exception(_LE("Failed to update quota for deleting share"))
|
||||
self.db.share_delete(context.elevated(), share_id)
|
||||
|
||||
if reservations:
|
||||
@ -274,19 +276,19 @@ class API(base.Base):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'gigabytes' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG snapshot (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
LOG.warn(msg, {'s_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
raise exception.ShareSizeExceedsAvailableQuota()
|
||||
elif 'snapshots' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"snapshot (%(d_consumed)d snapshots "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
LOG.warn(msg, {'s_pid': context.project_id,
|
||||
'd_consumed': _consumed('snapshots')})
|
||||
raise exception.SnapshotLimitExceeded(
|
||||
allowed=quotas['snapshots'])
|
||||
|
@ -20,7 +20,7 @@ Drivers for shares.
|
||||
import time
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
|
||||
@ -73,7 +73,7 @@ class ExecuteMixin(object):
|
||||
tries += 1
|
||||
if tries >= self.configuration.num_shell_tries:
|
||||
raise
|
||||
LOG.exception(_("Recovering from a failed execute. "
|
||||
LOG.exception(_LE("Recovering from a failed execute. "
|
||||
"Try number %s"), tries)
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
|
@ -20,6 +20,8 @@ import six
|
||||
from manila import db as manila_db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log
|
||||
from manila.share.drivers.emc.plugins import base as driver
|
||||
import manila.share.drivers.emc.plugins.registry
|
||||
@ -190,8 +192,8 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
share_server=None):
|
||||
"""Is called to remove share."""
|
||||
if share_server is None:
|
||||
LOG.warn(_("Driver does not support share deletion without share "
|
||||
"network specified. "
|
||||
LOG.warn(_LW("Driver does not support share deletion without "
|
||||
"share network specified. "
|
||||
"Return directly because there is nothing to clean"))
|
||||
return
|
||||
|
||||
@ -212,7 +214,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
mover_id = self._get_vdm_id(share_server)
|
||||
status, share_obj = self._XMLAPI_helper.get_cifs_share_by_name(name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warn(_("CIFS share %s not found. Skip the deletion"), name)
|
||||
LOG.warn(_LW("CIFS share %s not found. Skip the deletion"), name)
|
||||
else:
|
||||
mover_id = share_obj['mover']
|
||||
# Delete CIFS export
|
||||
@ -245,7 +247,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
path,
|
||||
mover_name)
|
||||
if constants.STATUS_NOT_FOUND == status:
|
||||
LOG.warn(_("NFS share %s not found. Skip the deletion"), name)
|
||||
LOG.warn(_LW("NFS share %s not found. Skip the deletion"), name)
|
||||
else:
|
||||
# Delete NFS export if it is present
|
||||
status, out = self._NASCmd_helper.delete_nfs_share(
|
||||
@ -278,17 +280,16 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
'true')
|
||||
if constants.STATUS_OK != status:
|
||||
if self._XMLAPI_helper._is_mount_point_unexist_error(out):
|
||||
LOG.warn(_("Mount point %(path)s on %(vdm)s not found."),
|
||||
LOG.warn(_LW("Mount point %(path)s on %(vdm)s not found."),
|
||||
{'path': path, 'vdm': vdm_name})
|
||||
else:
|
||||
msg = (_("Deleting mount point %(path)s on "
|
||||
"%(mover_name)s failed. Reason: %(err)s")
|
||||
% {'path': path,
|
||||
LOG.warn(_LW("Deleting mount point %(path)s on "
|
||||
"%(mover_name)s failed. Reason: %(err)s"),
|
||||
{'path': path,
|
||||
'mover_name': vdm_name,
|
||||
'err': out})
|
||||
LOG.warn(msg)
|
||||
else:
|
||||
LOG.warn(_("Failed to find the VDM. Try to "
|
||||
LOG.warn(_LW("Failed to find the VDM. Try to "
|
||||
"delete the file system"))
|
||||
|
||||
self._delete_filesystem(name)
|
||||
@ -299,7 +300,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
allow_absence=True)
|
||||
|
||||
if not filesystem:
|
||||
LOG.warn(_("File system %s not found. Skip the deletion"), name)
|
||||
LOG.warn(_LW("File system %s not found. Skip the deletion"), name)
|
||||
return
|
||||
|
||||
# Delete file system
|
||||
@ -320,14 +321,12 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
|
||||
status, ckpt = self._XMLAPI_helper.get_check_point_by_name(ckpt_name)
|
||||
if constants.STATUS_OK != status:
|
||||
message = _("Check point not found. Reason: %s.") % status
|
||||
LOG.warn(message)
|
||||
LOG.warn(_LW("Check point not found. Reason: %s."), status)
|
||||
return
|
||||
|
||||
if ckpt['id'] == '':
|
||||
message = _("Snapshot: %(name)s not found. "
|
||||
"Skip the deletion.") % {'name': snapshot['name']}
|
||||
LOG.warn(message)
|
||||
LOG.warn(_LW("Snapshot: %(name)s not found. "
|
||||
"Skip the deletion.") % {'name': snapshot['name']})
|
||||
return
|
||||
|
||||
status, out = self._XMLAPI_helper.delete_check_point(ckpt['id'])
|
||||
@ -566,8 +565,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
message = _('Could not setup server. Reason: %s.') % ex
|
||||
LOG.error(message)
|
||||
LOG.error(_LE('Could not setup server. Reason: %s.'), ex)
|
||||
server_details = self._contruct_backend_details(
|
||||
vdm_name, vdmRef, interface_info)
|
||||
self.teardown_server(None, server_details, sec_services)
|
||||
@ -689,9 +687,8 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
status, cifs_servers = self._XMLAPI_helper.get_cifs_servers(
|
||||
vdm_id)
|
||||
if constants.STATUS_OK != status:
|
||||
message = (_('Could not find CIFS server by name: %s.')
|
||||
% vdm_name)
|
||||
LOG.error(message)
|
||||
LOG.error(_LE('Could not find CIFS server by name: %s.'),
|
||||
vdm_name)
|
||||
|
||||
for server in cifs_servers:
|
||||
# Unjoin CIFS Server from domain
|
||||
@ -735,8 +732,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
|
||||
status, out = self._XMLAPI_helper.list_storage_pool()
|
||||
if constants.STATUS_OK != status:
|
||||
message = _("Could not get storage pool list.")
|
||||
LOG.error(message)
|
||||
LOG.error(_LE("Could not get storage pool list."))
|
||||
|
||||
for pool in out:
|
||||
if name == pool['name']:
|
||||
@ -863,9 +859,7 @@ class VNXStorageConnection(driver.StorageConnection):
|
||||
def _get_mount_point_by_filesystem(self, filesystem, mover):
|
||||
status, out = self._XMLAPI_helper.get_mount_point(mover['id'])
|
||||
if constants.STATUS_OK != status:
|
||||
message = (_("Could not get mount point. Reason: %s.") % out)
|
||||
|
||||
LOG.error(message)
|
||||
LOG.error(_LE("Could not get mount point. Reason: %s."), out)
|
||||
|
||||
for mount in out:
|
||||
if mount['fs_id'] == filesystem['id']:
|
||||
|
@ -23,6 +23,9 @@ from six.moves.urllib import request as url_request # pylint: disable=E0611
|
||||
|
||||
import manila.exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import lockutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.share.drivers.emc.plugins.vnx import constants
|
||||
@ -84,7 +87,7 @@ class XMLAPIConnector(object):
|
||||
headers = six.text_type(resp.headers).replace('\n', '\\n')
|
||||
if failed_req:
|
||||
LOG.error(
|
||||
_('REQ: [%(method)s] %(url)s %(req_hdrs)s\n'
|
||||
_LE('REQ: [%(method)s] %(url)s %(req_hdrs)s\n'
|
||||
'REQ BODY: %(req_b)s\n'
|
||||
'RESP: [%(code)s] %(resp_hdrs)s\n'
|
||||
'RESP BODY: %(resp_b)s\n'),
|
||||
@ -171,7 +174,7 @@ class XMLAPIHelper(object):
|
||||
else:
|
||||
status = constants.STATUS_ERROR
|
||||
|
||||
LOG.warn(_("Translated status from %(old)s to %(new)s. "
|
||||
LOG.warn(_LW("Translated status from %(old)s to %(new)s. "
|
||||
"Message: %(info)s."),
|
||||
{'old': status_before,
|
||||
'new': status,
|
||||
@ -1154,7 +1157,7 @@ class SSHConnector(object):
|
||||
greenthread.sleep(random.randint(20, 500) / 100.0)
|
||||
|
||||
except Exception:
|
||||
LOG.error(_("Error running SSH command: %s"), command)
|
||||
LOG.error(_LE("Error running SSH command: %s"), command)
|
||||
|
||||
return stdout, stderr
|
||||
|
||||
@ -1187,7 +1190,7 @@ class NASCommandHelper(object):
|
||||
lines = out.strip().split('\n')
|
||||
for line in lines:
|
||||
if line.strip().split() == header:
|
||||
LOG.info(_('Found the header of the command '
|
||||
LOG.info(_LI('Found the header of the command '
|
||||
'/nas/bin/nas_cel -interconnect -l'))
|
||||
else:
|
||||
interconn = line.strip().split()
|
||||
|
@ -15,7 +15,7 @@
|
||||
import types
|
||||
import xml.dom.minidom
|
||||
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log
|
||||
|
||||
|
||||
@ -821,17 +821,16 @@ def one_child(tt, acceptable):
|
||||
k = kids(tt)
|
||||
|
||||
if len(k) != 1:
|
||||
message = (_('Expected just one %(item)s, got %(more)s.')
|
||||
% {'item': acceptable,
|
||||
LOG.warn(_LW('Expected just one %(item)s, got %(more)s.'),
|
||||
{'item': acceptable,
|
||||
'more': " ".join([t[0] for t in k])})
|
||||
LOG.warn(message)
|
||||
|
||||
child = k[0]
|
||||
|
||||
if name(child) not in acceptable:
|
||||
message = (_('Expected one of %(item)s, got %(child)s '
|
||||
'under %(parent)s.')
|
||||
% {'item': acceptable,
|
||||
LOG.warn(_LW('Expected one of %(item)s, got %(child)s '
|
||||
'under %(parent)s.'),
|
||||
{'item': acceptable,
|
||||
'child': name(child),
|
||||
'parent': name(tt)})
|
||||
|
||||
@ -852,8 +851,7 @@ def parse_any(tt):
|
||||
fn_name = 'parse_' + node_name.lower()
|
||||
fn = globals().get(fn_name)
|
||||
if fn is None:
|
||||
message = _('No parser for node type %s.') % name(tt)
|
||||
LOG.warn(message)
|
||||
LOG.warn(_LW('No parser for node type %s.'), name(tt))
|
||||
else:
|
||||
return fn(tt)
|
||||
|
||||
@ -880,9 +878,8 @@ def check_node(tt, nodename, required_attrs=None, optional_attrs=None,
|
||||
required_attrs = []
|
||||
|
||||
if name(tt) != nodename:
|
||||
message = (_('Expected node type %(expected)s, not %(actual)s.')
|
||||
% {'expected': nodename, 'actual': name(tt)})
|
||||
LOG.warn(message)
|
||||
LOG.warn(_LW('Expected node type %(expected)s, not %(actual)s.'),
|
||||
{'expected': nodename, 'actual': name(tt)})
|
||||
|
||||
# Check we have all the required attributes, and no unexpected ones
|
||||
tt_attrs = {}
|
||||
@ -891,12 +888,11 @@ def check_node(tt, nodename, required_attrs=None, optional_attrs=None,
|
||||
|
||||
for attr in required_attrs:
|
||||
if attr not in tt_attrs:
|
||||
message = (_('Expected %(attr)s attribute on %(node)s node,'
|
||||
' but only have %(attrs)s.')
|
||||
% {'attr': attr,
|
||||
LOG.warn(_LW('Expected %(attr)s attribute on %(node)s node,'
|
||||
' but only have %(attrs)s.'),
|
||||
{'attr': attr,
|
||||
'node': name(tt),
|
||||
'attrs': attrs(tt).keys()})
|
||||
LOG.warn(message)
|
||||
else:
|
||||
del tt_attrs[attr]
|
||||
|
||||
@ -905,28 +901,25 @@ def check_node(tt, nodename, required_attrs=None, optional_attrs=None,
|
||||
del tt_attrs[attr]
|
||||
|
||||
if len(tt_attrs.keys()) > 0:
|
||||
message = _('Invalid extra attributes %s.') % tt_attrs.keys()
|
||||
LOG.warn(message)
|
||||
LOG.warn(_LW('Invalid extra attributes %s.'), tt_attrs.keys())
|
||||
|
||||
if allowed_children is not None:
|
||||
for c in kids(tt):
|
||||
if name(c) not in allowed_children:
|
||||
message = (_('Unexpected node %(node)s under %(parent)s;'
|
||||
' wanted %(expected)s.')
|
||||
% {'node': name(c),
|
||||
LOG.warn(_LW('Unexpected node %(node)s under %(parent)s;'
|
||||
' wanted %(expected)s.'),
|
||||
{'node': name(c),
|
||||
'parent': name(tt),
|
||||
'expected': allowed_children})
|
||||
LOG.warn(message)
|
||||
|
||||
if not allow_pcdata:
|
||||
for c in tt[2]:
|
||||
if isinstance(c, types.StringTypes):
|
||||
if c.lstrip(' \t\n') != '':
|
||||
message = (_('Unexpected non-blank pcdata node %(node)s'
|
||||
' under %(parent)s.')
|
||||
% {'node': repr(c),
|
||||
LOG.warn(_LW('Unexpected non-blank pcdata node %(node)s'
|
||||
' under %(parent)s.'),
|
||||
{'node': repr(c),
|
||||
'parent': name(tt)})
|
||||
LOG.warn(message)
|
||||
|
||||
|
||||
def optional_child(tt, allowed):
|
||||
@ -935,10 +928,9 @@ def optional_child(tt, allowed):
|
||||
k = kids(tt)
|
||||
|
||||
if len(k) > 1:
|
||||
message = (_('Expected either zero or one of %(node)s '
|
||||
'under %(parent)s.') % {'node': allowed,
|
||||
LOG.warn(_LW('Expected either zero or one of %(node)s '
|
||||
'under %(parent)s.'), {'node': allowed,
|
||||
'parent': tt})
|
||||
LOG.warn(message)
|
||||
elif len(k) == 1:
|
||||
return one_child(tt, allowed)
|
||||
else:
|
||||
@ -956,12 +948,11 @@ def list_of_various(tt, acceptable):
|
||||
|
||||
for child in kids(tt):
|
||||
if name(child) not in acceptable:
|
||||
message = (_('Expected one of %(expected)s under'
|
||||
' %(parent)s, got %(actual)s.')
|
||||
% {'expected': acceptable,
|
||||
LOG.warn(_LW('Expected one of %(expected)s under'
|
||||
' %(parent)s, got %(actual)s.'),
|
||||
{'expected': acceptable,
|
||||
'parent': name(tt),
|
||||
'actual': repr(name(child))})
|
||||
LOG.warn(message)
|
||||
result = parse_any(child)
|
||||
if result is not None:
|
||||
r.append(result)
|
||||
|
@ -29,6 +29,8 @@ from manila import compute
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import processutils
|
||||
from manila.share import driver
|
||||
@ -221,14 +223,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
|
||||
)
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.error(_("Failed to sync mount files on server '%s'."),
|
||||
LOG.error(_LE("Failed to sync mount files on server '%s'."),
|
||||
server_details['instance_id'])
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
try:
|
||||
# Remount it to avoid postponed point of failure
|
||||
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.error(_("Failed to mount all shares on server '%s'."),
|
||||
LOG.error(_LE("Failed to mount all shares on server '%s'."),
|
||||
server_details['instance_id'])
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
|
||||
@ -261,7 +263,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
# Add mount permanently
|
||||
self._sync_mount_temp_and_perm_files(server_details)
|
||||
else:
|
||||
LOG.warning(_("Mount point '%(path)s' already exists on "
|
||||
LOG.warning(_LW("Mount point '%(path)s' already exists on "
|
||||
"server '%(server)s'."), log_data)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.ShareBackendException(msg=six.text_type(e))
|
||||
@ -287,7 +289,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
# Remove mount permanently
|
||||
self._sync_mount_temp_and_perm_files(server_details)
|
||||
else:
|
||||
LOG.warning(_("Mount point '%(path)s' does not exist on "
|
||||
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
|
||||
"server '%(server)s'."), log_data)
|
||||
return _unmount_device_with_lock()
|
||||
|
||||
@ -600,8 +602,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
|
||||
def teardown_server(self, server_details, security_services=None):
|
||||
instance_id = server_details.get("instance_id")
|
||||
msg = "Removing share infrastructure for service instance '%s'."
|
||||
LOG.debug(msg % instance_id)
|
||||
LOG.debug("Removing share infrastructure for service instance '%s'.",
|
||||
instance_id)
|
||||
try:
|
||||
self.service_instance_manager.delete_service_instance(
|
||||
self.admin_context,
|
||||
@ -786,7 +788,7 @@ class CIFSHelper(NASHelperBase):
|
||||
self._ssh_exec(
|
||||
server, ['sudo', 'net', 'conf', 'delshare', share_name])
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.warning(_("Caught error trying delete share: %(error)s, try"
|
||||
LOG.warning(_LW("Caught error trying delete share: %(error)s, try"
|
||||
"ing delete it forcibly."), {'error': e.stderr})
|
||||
self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share',
|
||||
share_name])
|
||||
|
@ -31,6 +31,8 @@ import xml.etree.cElementTree as etree
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.share import driver
|
||||
|
||||
@ -127,7 +129,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
self._execute(*args, **kw)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_("Error in gluster volume set: %s"), exc.stderr)
|
||||
LOG.error(_LE("Error in gluster volume set: %s"), exc.stderr)
|
||||
raise
|
||||
|
||||
def check_for_setup_error(self):
|
||||
@ -148,7 +150,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
self._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in exc.stderr:
|
||||
LOG.warn(_("%s is already mounted"),
|
||||
LOG.warn(_LW("%s is already mounted"),
|
||||
self.gluster_address.export)
|
||||
else:
|
||||
raise exception.GlusterfsException(
|
||||
@ -173,7 +175,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
)
|
||||
out, err = self._execute(*args, **kw)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_("Error retrieving volume info: %s"), exc.stderr)
|
||||
LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
|
||||
raise
|
||||
|
||||
if not out:
|
||||
@ -212,7 +214,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
self._mount_gluster_vol(mount_path, ensure=True)
|
||||
except exception.GlusterfsException:
|
||||
LOG.error('Could not mount the Gluster volume %s',
|
||||
LOG.error(_LE('Could not mount the Gluster volume %s'),
|
||||
self.gluster_address.volume)
|
||||
raise
|
||||
|
||||
@ -270,7 +272,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
self._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.error('Unable to create share %s', share['name'])
|
||||
LOG.error(_LE('Unable to create share %s'), share['name'])
|
||||
raise
|
||||
|
||||
export_location = os.path.join(self.gluster_address.qualified,
|
||||
@ -284,7 +286,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
self._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.error('Unable to delete share %s', share['name'])
|
||||
LOG.error(_LE('Unable to delete share %s'), share['name'])
|
||||
raise
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
@ -343,7 +345,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
try:
|
||||
self._execute(*args, **kw)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_("Error in gluster volume set: %s"), exc.stderr)
|
||||
LOG.error(_LE("Error in gluster volume set: %s"), exc.stderr)
|
||||
raise
|
||||
|
||||
def allow_access(self, context, share, access, share_server=None):
|
||||
|
@ -35,6 +35,7 @@ import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.share import driver
|
||||
from manila.share.drivers import glusterfs
|
||||
@ -99,7 +100,7 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
LOG.info(_("Number of gluster volumes read from config: "
|
||||
LOG.info(_LI("Number of gluster volumes read from config: "
|
||||
"%(numvols)s"),
|
||||
{'numvols': len(self.configuration.glusterfs_targets)})
|
||||
|
||||
@ -126,9 +127,9 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
"with in the glusterfs_targets configuration parameter."))
|
||||
LOG.warn(msg)
|
||||
else:
|
||||
LOG.info(_("Number of gluster volumes in use: %(inuse-numvols)s. "
|
||||
"Number of gluster volumes available for use as share: "
|
||||
"%(unused-numvols)s"),
|
||||
LOG.info(_LI("Number of gluster volumes in use: "
|
||||
"%(inuse-numvols)s. Number of gluster volumes "
|
||||
"available for use as share: %(unused-numvols)s"),
|
||||
{'inuse-numvols': len(self.gluster_used_vols_dict),
|
||||
'unused-numvols': len(self.gluster_unused_vols_dict)})
|
||||
|
||||
@ -390,7 +391,7 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
|
||||
# For native protocol, the export_location should be of the form:
|
||||
# server:/volname
|
||||
LOG.info(_("export_location sent back from create_share: %s"),
|
||||
LOG.info(_LI("export_location sent back from create_share: %s"),
|
||||
(export_location,))
|
||||
return export_location
|
||||
|
||||
|
@ -31,6 +31,8 @@ import six
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.netapp import api as naapi
|
||||
@ -163,7 +165,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
try:
|
||||
licenses = self._client.send_request('license-v2-list-info')
|
||||
except naapi.NaApiError as e:
|
||||
LOG.error(_("Could not get licenses list. %s."), e)
|
||||
LOG.error(_LE("Could not get licenses list. %s."), e)
|
||||
else:
|
||||
self._licenses = sorted([
|
||||
l.get_child_content('package').lower()
|
||||
@ -173,7 +175,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
'backend': self.backend_name,
|
||||
'licenses': ', '.join(self._licenses),
|
||||
}
|
||||
LOG.info(_("Available licenses on '%(backend)s' "
|
||||
LOG.info(_LI("Available licenses on '%(backend)s' "
|
||||
"are %(licenses)s."), log_data)
|
||||
return self._licenses
|
||||
|
||||
@ -407,7 +409,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
ip, netmask, vserver_client)
|
||||
except naapi.NaApiError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Failed to create network interface"))
|
||||
LOG.error(_LE("Failed to create network interface"))
|
||||
self._delete_vserver(vserver_name, vserver_client)
|
||||
|
||||
self._enable_nfs(vserver_client)
|
||||
@ -488,7 +490,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
vserver_client.send_request('net-dns-create', args)
|
||||
except naapi.NaApiError as e:
|
||||
if e.code == '13130':
|
||||
LOG.error(_("DNS exists for vserver."))
|
||||
LOG.error(_LE("DNS exists for vserver."))
|
||||
else:
|
||||
raise exception.NetAppException(
|
||||
_("Failed to configure DNS. %s") % e.message)
|
||||
@ -709,7 +711,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
self._remove_export(share, vserver_client)
|
||||
self._deallocate_container(share, vserver_client)
|
||||
else:
|
||||
LOG.info(_("Share %s does not exist."), share['id'])
|
||||
LOG.info(_LI("Share %s does not exist."), share['id'])
|
||||
|
||||
def _create_export(self, share, vserver, vserver_client):
|
||||
"""Creates NAS storage."""
|
||||
@ -819,7 +821,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
Deletes vserver.
|
||||
"""
|
||||
if not self._vserver_exists(vserver_name):
|
||||
LOG.error(_("Vserver %s does not exist."), vserver_name)
|
||||
LOG.error(_LE("Vserver %s does not exist."), vserver_name)
|
||||
return
|
||||
volumes_data = vserver_client.send_request('volume-get-iter')
|
||||
volumes_count = int(volumes_data.get_child_content('num-records'))
|
||||
@ -830,7 +832,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
{'name': self.configuration.netapp_root_volume_name})
|
||||
except naapi.NaApiError as e:
|
||||
if e.code == '13042':
|
||||
LOG.error(_("Volume %s is already offline."),
|
||||
LOG.error(_LE("Volume %s is already offline."),
|
||||
self.configuration.netapp_root_volume_name)
|
||||
else:
|
||||
raise e
|
||||
@ -854,7 +856,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
|
||||
args)
|
||||
except naapi.NaApiError as e:
|
||||
if e.code == "15661":
|
||||
LOG.error(_("CIFS server does not exist for"
|
||||
LOG.error(_LE("CIFS server does not exist for"
|
||||
" vserver %s"), vserver_name)
|
||||
else:
|
||||
vserver_client.send_request('cifs-server-delete')
|
||||
@ -1182,9 +1184,9 @@ class NetAppClusteredCIFSHelper(NetAppNASHelperBase):
|
||||
self._restrict_access(user, share_name)
|
||||
except naapi.NaApiError as e:
|
||||
if e.code == "22":
|
||||
LOG.error(_("User %s does not exist."), user)
|
||||
LOG.error(_LE("User %s does not exist."), user)
|
||||
elif e.code == "15661":
|
||||
LOG.error(_("Rule %s does not exist."), user)
|
||||
LOG.error(_LE("Rule %s does not exist."), user)
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
@ -30,6 +30,7 @@ from manila import compute
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LW
|
||||
from manila.network.linux import ip_lib
|
||||
from manila.network.neutron import api as neutron
|
||||
from manila.openstack.common import log as logging
|
||||
@ -220,7 +221,7 @@ class ServiceInstanceManager(object):
|
||||
name = name or self.get_config_option(
|
||||
"service_instance_security_group")
|
||||
if not name:
|
||||
LOG.warning(_("Name for service instance security group is not "
|
||||
LOG.warning(_LW("Name for service instance security group is not "
|
||||
"provided. Skipping security group step."))
|
||||
return None
|
||||
s_groups = [s for s in self.compute_api.security_group_list(context)
|
||||
@ -255,7 +256,7 @@ class ServiceInstanceManager(object):
|
||||
inst = self.compute_api.server_get(self.admin_context,
|
||||
server['instance_id'])
|
||||
except exception.InstanceNotFound:
|
||||
LOG.warning(_("Service instance %s does not exist."),
|
||||
LOG.warning(_LW("Service instance %s does not exist."),
|
||||
server['instance_id'])
|
||||
return False
|
||||
if inst['status'] == 'ACTIVE':
|
||||
@ -378,12 +379,13 @@ class ServiceInstanceManager(object):
|
||||
'private_path': self.path_to_private_key,
|
||||
'public_path': self.path_to_public_key,
|
||||
}
|
||||
LOG.warning(_('No key path is available. May be non-existent '
|
||||
'key path is provided. Check path_to_private_key'
|
||||
' (current value %(private_path)s) and '
|
||||
LOG.warning(_LW('No key path is available. May be '
|
||||
'non-existent key path is provided. Check '
|
||||
'path_to_private_key (current value '
|
||||
'%(private_path)s) and '
|
||||
'path_to_public_key (current value '
|
||||
'%(public_path)s) in manila '
|
||||
'configuration file.') % str_params)
|
||||
'configuration file.'), str_params)
|
||||
|
||||
security_group = self._get_or_create_security_group(context)
|
||||
network_data = self._setup_network_for_instance(neutron_net_id,
|
||||
|
@ -28,7 +28,8 @@ import six
|
||||
from manila.common import constants
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila import manager
|
||||
from manila import network
|
||||
from manila.openstack.common import log as logging
|
||||
@ -88,7 +89,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
ctxt, share, share_server=share_server)
|
||||
except Exception as e:
|
||||
LOG.error(
|
||||
_("Caught exception trying ensure share '%(s_id)s'. "
|
||||
_LE("Caught exception trying ensure share '%(s_id)s'. "
|
||||
"Exception: \n%(e)s."),
|
||||
{'s_id': share['id'], 'e': six.text_type(e)},
|
||||
)
|
||||
@ -105,7 +106,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
pass
|
||||
except Exception as e:
|
||||
LOG.error(
|
||||
_("Unexpected exception during share access"
|
||||
_LE("Unexpected exception during share access"
|
||||
" allow operation. Share id is '%(s_id)s'"
|
||||
", access rule type is '%(ar_type)s', "
|
||||
"access rule id is '%(ar_id)s', exception"
|
||||
@ -117,7 +118,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
else:
|
||||
LOG.info(
|
||||
_("Share %(name)s: skipping export, because it has "
|
||||
_LI("Share %(name)s: skipping export, because it has "
|
||||
"'%(status)s' status."),
|
||||
{'name': share['name'], 'status': share['status']},
|
||||
)
|
||||
@ -171,10 +172,11 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
if not exist:
|
||||
# Create share server on backend with data from db
|
||||
share_server = self._setup_server(context, share_server)
|
||||
LOG.info(_("Share server created successfully."))
|
||||
LOG.info(_LI("Share server created successfully."))
|
||||
else:
|
||||
LOG.info(_("Used already existed share server '%(share_server"
|
||||
"_id)s'"), {'share_server_id': share_server['id']})
|
||||
LOG.info(_LI("Used already existed share server "
|
||||
"'%(share_server_id)s'"),
|
||||
{'share_server_id': share_server['id']})
|
||||
return share_server, share_ref
|
||||
|
||||
return _provide_share_server_for_share()
|
||||
@ -213,7 +215,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_id, {'share_server_id': share_server['id']})
|
||||
except exception.ShareServerNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Share server %s does not exist."),
|
||||
LOG.error(_LE("Share server %s does not exist."),
|
||||
parent_share_server_id)
|
||||
self.db.share_update(context, share_id,
|
||||
{'status': 'error'})
|
||||
@ -223,7 +225,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_network_id, share_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Failed to get share server"
|
||||
LOG.error(_LE("Failed to get share server"
|
||||
" for share creation."))
|
||||
self.db.share_update(context, share_id,
|
||||
{'status': 'error'})
|
||||
@ -242,10 +244,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
{'export_location': export_location})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Share %s failed on creation."), share_id)
|
||||
LOG.error(_LE("Share %s failed on creation."), share_id)
|
||||
self.db.share_update(context, share_id, {'status': 'error'})
|
||||
else:
|
||||
LOG.info(_("Share created successfully."))
|
||||
LOG.info(_LI("Share created successfully."))
|
||||
self.db.share_update(context, share_id,
|
||||
{'status': 'available',
|
||||
'launched_at': timeutils.utcnow()})
|
||||
@ -277,10 +279,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
gigabytes=-share_ref['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deleting share"))
|
||||
LOG.exception(_LE("Failed to update usages deleting share"))
|
||||
|
||||
self.db.share_delete(context, share_id)
|
||||
LOG.info(_("Share %s: deleted successfully."), share_ref['name'])
|
||||
LOG.info(_LI("Share %s: deleted successfully."), share_ref['name'])
|
||||
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
@ -351,7 +353,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
gigabytes=-snapshot_ref['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deleting snapshot"))
|
||||
LOG.exception(_LE("Failed to update usages deleting snapshot"))
|
||||
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
@ -394,7 +396,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
@manager.periodic_task
|
||||
def _report_driver_status(self, context):
|
||||
LOG.info(_('Updating share status'))
|
||||
LOG.info(_LI('Updating share status'))
|
||||
share_stats = self.driver.get_share_stats(refresh=True)
|
||||
if share_stats:
|
||||
self.update_service_capabilities(share_stats)
|
||||
@ -480,7 +482,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
security_services=sec_services)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Share server %s failed on deletion."),
|
||||
LOG.error(_LE("Share server %s failed on deletion."),
|
||||
share_server['id'])
|
||||
self.db.share_server_update(
|
||||
context, share_server['id'],
|
||||
@ -489,5 +491,5 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_server_delete(context, share_server['id'])
|
||||
|
||||
_teardown_server()
|
||||
LOG.info(_("Share server deleted successfully."))
|
||||
LOG.info(_LI("Share server deleted successfully."))
|
||||
self.network_api.deallocate_network(context, share_server)
|
||||
|
@ -23,6 +23,7 @@ from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -36,7 +37,7 @@ def create(context, name, extra_specs={}):
|
||||
dict(name=name,
|
||||
extra_specs=extra_specs))
|
||||
except db_exception.DBError as e:
|
||||
LOG.exception(_('DB error: %s'), e)
|
||||
LOG.exception(_LE('DB error: %s'), e)
|
||||
raise exception.VolumeTypeCreateFailed(name=name,
|
||||
extra_specs=extra_specs)
|
||||
return type_ref
|
||||
@ -122,8 +123,9 @@ def get_default_volume_type():
|
||||
# Couldn't find volume type with the name in default_volume_type
|
||||
# flag, record this issue and move on
|
||||
# TODO(zhiteng) consider add notification to warn admin
|
||||
LOG.exception(_('Default volume type is not found, '
|
||||
'please check default_volume_type config: %s'), e)
|
||||
LOG.exception(_LE('Default volume type is not found, '
|
||||
'please check default_volume_type config: %s'),
|
||||
e)
|
||||
|
||||
return vol_type
|
||||
|
||||
|
@ -38,6 +38,7 @@ import webob.exc
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LI
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
socket_opts = [
|
||||
@ -216,7 +217,7 @@ class Server(object):
|
||||
backlog=backlog)
|
||||
self._server = eventlet.spawn(self._start)
|
||||
(self._host, self._port) = self._socket.getsockname()[0:2]
|
||||
LOG.info(_("Started %(name)s on %(_host)s:%(_port)s"), self.__dict__)
|
||||
LOG.info(_LI("Started %(name)s on %(_host)s:%(_port)s"), self.__dict__)
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
@ -235,7 +236,7 @@ class Server(object):
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
LOG.info(_("Stopping WSGI server."))
|
||||
LOG.info(_LI("Stopping WSGI server."))
|
||||
self._server.kill()
|
||||
|
||||
def wait(self):
|
||||
@ -249,7 +250,7 @@ class Server(object):
|
||||
try:
|
||||
self._server.wait()
|
||||
except greenlet.GreenletExit:
|
||||
LOG.info(_("WSGI server has stopped."))
|
||||
LOG.info(_LI("WSGI server has stopped."))
|
||||
|
||||
|
||||
class Request(webob.Request):
|
||||
|
Loading…
Reference in New Issue
Block a user