Browse Source

Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I4c96f3590d46205c45d12ee4ead8c208e11c52c5
changes/76/443976/5
Sean McGinnis 4 years ago
committed by Sean McGinnis
parent
commit
a55a6b5c71
  1. 8
      cinder/api/__init__.py
  2. 10
      cinder/api/contrib/backups.py
  3. 6
      cinder/api/contrib/cgsnapshots.py
  4. 20
      cinder/api/contrib/consistencygroups.py
  5. 4
      cinder/api/contrib/hosts.py
  6. 4
      cinder/api/contrib/snapshot_actions.py
  7. 3
      cinder/api/contrib/snapshot_unmanage.py
  8. 10
      cinder/api/contrib/types_extra_specs.py
  9. 8
      cinder/api/contrib/volume_transfer.py
  10. 3
      cinder/api/contrib/volume_unmanage.py
  11. 19
      cinder/api/extensions.py
  12. 6
      cinder/api/middleware/fault.py
  13. 6
      cinder/api/openstack/__init__.py
  14. 19
      cinder/api/openstack/wsgi.py
  15. 7
      cinder/api/v2/snapshots.py
  16. 6
      cinder/api/v2/volumes.py
  17. 6
      cinder/api/v3/group_snapshots.py
  18. 20
      cinder/api/v3/groups.py
  19. 8
      cinder/api/v3/volumes.py
  20. 10
      cinder/backup/api.py
  21. 16
      cinder/backup/chunkeddriver.py
  22. 22
      cinder/backup/driver.py
  23. 58
      cinder/backup/drivers/ceph.py
  24. 5
      cinder/backup/drivers/swift.py
  25. 12
      cinder/backup/drivers/tsm.py
  26. 110
      cinder/backup/manager.py
  27. 83
      cinder/brick/local_dev/lvm.py
  28. 8
      cinder/cmd/volume.py
  29. 26
      cinder/cmd/volume_usage_audit.py
  30. 4
      cinder/common/sqlalchemyutils.py
  31. 65
      cinder/consistencygroup/api.py
  32. 6
      cinder/context.py
  33. 22
      cinder/coordination.py
  34. 33
      cinder/db/sqlalchemy/api.py
  35. 6
      cinder/exception.py
  36. 75
      cinder/group/api.py
  37. 10
      cinder/i18n.py
  38. 5
      cinder/image/cache.py
  39. 8
      cinder/image/glance.py
  40. 22
      cinder/image/image_utils.py
  41. 10
      cinder/keymgr/__init__.py
  42. 8
      cinder/keymgr/conf_key_mgr.py
  43. 22
      cinder/manager.py
  44. 7
      cinder/message/api.py
  45. 4
      cinder/objects/qos_specs.py
  46. 8
      cinder/quota.py
  47. 14
      cinder/quota_utils.py
  48. 12
      cinder/rpc.py
  49. 25
      cinder/scheduler/base_filter.py
  50. 10
      cinder/scheduler/filter_scheduler.py
  51. 29
      cinder/scheduler/filters/capacity_filter.py
  52. 5
      cinder/scheduler/filters/driver_filter.py
  53. 14
      cinder/scheduler/filters/instance_locality_filter.py
  54. 7
      cinder/scheduler/flows/create_volume.py
  55. 12
      cinder/scheduler/host_manager.py
  56. 30
      cinder/scheduler/manager.py
  57. 8
      cinder/scheduler/scheduler_options.py
  58. 17
      cinder/scheduler/weights/goodness.py
  59. 56
      cinder/service.py
  60. 6
      cinder/ssh_utils.py
  61. 31
      cinder/tests/unit/scheduler/test_base_filter.py
  62. 9
      cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py
  63. 15
      cinder/transfer/api.py
  64. 14
      cinder/utils.py
  65. 162
      cinder/volume/api.py
  66. 75
      cinder/volume/driver.py
  67. 13
      cinder/volume/driver_utils.py
  68. 12
      cinder/volume/drivers/block_device.py
  69. 84
      cinder/volume/drivers/coprhd/common.py
  70. 9
      cinder/volume/drivers/coprhd/scaleio.py
  71. 35
      cinder/volume/drivers/datera/datera_api2.py
  72. 33
      cinder/volume/drivers/datera/datera_api21.py
  73. 8
      cinder/volume/drivers/datera/datera_common.py
  74. 234
      cinder/volume/drivers/dell/dell_storagecenter_api.py
  75. 126
      cinder/volume/drivers/dell/dell_storagecenter_common.py
  76. 12
      cinder/volume/drivers/dell/dell_storagecenter_fc.py
  77. 16
      cinder/volume/drivers/dell/dell_storagecenter_iscsi.py
  78. 73
      cinder/volume/drivers/dell_emc/ps.py
  79. 205
      cinder/volume/drivers/dell_emc/scaleio/driver.py
  80. 22
      cinder/volume/drivers/dell_emc/unity/adapter.py
  81. 14
      cinder/volume/drivers/dell_emc/unity/client.py
  82. 16
      cinder/volume/drivers/dell_emc/unity/utils.py
  83. 370
      cinder/volume/drivers/dell_emc/vmax/common.py
  84. 64
      cinder/volume/drivers/dell_emc/vmax/fast.py
  85. 3
      cinder/volume/drivers/dell_emc/vmax/fc.py
  86. 6
      cinder/volume/drivers/dell_emc/vmax/https.py
  87. 19
      cinder/volume/drivers/dell_emc/vmax/iscsi.py
  88. 185
      cinder/volume/drivers/dell_emc/vmax/masking.py
  89. 10
      cinder/volume/drivers/dell_emc/vmax/provision_v3.py
  90. 96
      cinder/volume/drivers/dell_emc/vmax/utils.py
  91. 110
      cinder/volume/drivers/dell_emc/vnx/adapter.py
  92. 71
      cinder/volume/drivers/dell_emc/vnx/client.py
  93. 12
      cinder/volume/drivers/dell_emc/vnx/common.py
  94. 40
      cinder/volume/drivers/dell_emc/vnx/taskflows.py
  95. 16
      cinder/volume/drivers/dell_emc/vnx/utils.py
  96. 42
      cinder/volume/drivers/dell_emc/xtremio.py
  97. 46
      cinder/volume/drivers/dothill/dothill_client.py
  98. 38
      cinder/volume/drivers/dothill/dothill_common.py
  99. 18
      cinder/volume/drivers/drbdmanagedrv.py
  100. 6
      cinder/volume/drivers/falconstor/fc.py

8
cinder/api/__init__.py

@ -19,8 +19,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import paste.urlmap
from cinder.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -28,7 +26,7 @@ LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api:
LOG.warning(_LW('The v1 api is deprecated and is not under active '
'development. You should set enable_v1_api=false '
'and enable_v3_api=true in your cinder.conf file.'))
LOG.warning('The v1 api is deprecated and is not under active '
'development. You should set enable_v1_api=false '
'and enable_v3_api=true in your cinder.conf file.')
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)

10
cinder/api/contrib/backups.py

@ -28,7 +28,7 @@ from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder import backup as backupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import utils
LOG = logging.getLogger(__name__)
@ -59,7 +59,7 @@ class BackupsController(wsgi.Controller):
LOG.debug('Delete called for member %s.', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete backup with id: %s'), id)
LOG.info('Delete backup with id: %s', id)
try:
backup = self.backup_api.get(context, id)
@ -141,8 +141,8 @@ class BackupsController(wsgi.Controller):
incremental = backup.get('incremental', False)
force = backup.get('force', False)
snapshot_id = backup.get('snapshot_id', None)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
LOG.info("Creating backup of volume %(volume_id)s in container"
" %(container)s",
{'volume_id': volume_id, 'container': container},
context=context)
@ -173,7 +173,7 @@ class BackupsController(wsgi.Controller):
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s",
{'backup_id': id, 'volume_id': volume_id},
context=context)

6
cinder/api/contrib/cgsnapshots.py

@ -28,7 +28,7 @@ from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder import consistencygroup as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
@ -62,7 +62,7 @@ class CgsnapshotsController(wsgi.Controller):
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete cgsnapshot with id: %s'), id)
LOG.info('Delete cgsnapshot with id: %s', id)
try:
cgsnapshot = self._get_cgsnapshot(context, id)
@ -167,7 +167,7 @@ class CgsnapshotsController(wsgi.Controller):
name = cgsnapshot.get('name', None)
description = cgsnapshot.get('description', None)
LOG.info(_LI("Creating cgsnapshot %(name)s."),
LOG.info("Creating cgsnapshot %(name)s.",
{'name': name},
context=context)

20
cinder/api/contrib/consistencygroups.py

@ -28,7 +28,7 @@ from cinder.api.views import consistencygroups as consistencygroup_views
from cinder import consistencygroup as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
@ -77,7 +77,7 @@ class ConsistencyGroupsController(wsgi.Controller):
msg = _("Invalid value '%s' for force.") % force
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI('Delete consistency group with id: %s'), id)
LOG.info('Delete consistency group with id: %s', id)
try:
group = self._get(context, id)
@ -181,7 +181,7 @@ class ConsistencyGroupsController(wsgi.Controller):
group_types.DEFAULT_CGSNAPSHOT_TYPE)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Creating consistency group %(name)s."),
LOG.info("Creating consistency group %(name)s.",
{'name': name})
try:
@ -232,12 +232,12 @@ class ConsistencyGroupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=msg)
if cgsnapshot_id:
LOG.info(_LI("Creating consistency group %(name)s from "
"cgsnapshot %(snap)s."),
LOG.info("Creating consistency group %(name)s from "
"cgsnapshot %(snap)s.",
{'name': name, 'snap': cgsnapshot_id})
elif source_cgid:
LOG.info(_LI("Creating consistency group %(name)s from "
"source consistency group %(source_cgid)s."),
LOG.info("Creating consistency group %(name)s from "
"source consistency group %(source_cgid)s.",
{'name': name, 'source_cgid': source_cgid})
try:
@ -282,9 +282,9 @@ class ConsistencyGroupsController(wsgi.Controller):
def _update(self, context, id, name, description, add_volumes,
remove_volumes,
allow_empty=False):
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
LOG.info("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': id,
'name': name,
'description': description,

4
cinder/api/contrib/hosts.py

@ -25,7 +25,7 @@ from cinder.api.openstack import wsgi
from cinder.common import constants
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.volume import api as volume_api
@ -120,7 +120,7 @@ class HostController(wsgi.Controller):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info(_LI("Setting host %(host)s to %(state)s."),
LOG.info("Setting host %(host)s to %(state)s.",
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,

4
cinder/api/contrib/snapshot_actions.py

@ -18,7 +18,7 @@ import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
@ -94,7 +94,7 @@ class SnapshotActionsController(wsgi.Controller):
update_dict.update({'progress': progress})
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
LOG.info("Updating snapshot %(id)s with info %(dict)s",
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)

3
cinder/api/contrib/snapshot_unmanage.py

@ -20,7 +20,6 @@ from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
@ -49,7 +48,7 @@ class SnapshotUnmanageController(wsgi.Controller):
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage snapshot with id: %s"), id)
LOG.info("Unmanage snapshot with id: %s", id)
try:
snapshot = self.volume_api.get_snapshot(context, id)

10
cinder/api/contrib/types_extra_specs.py

@ -27,7 +27,7 @@ from cinder.api.openstack import wsgi
from cinder import context as ctxt
from cinder import db
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder import rpc
from cinder import utils
from cinder.volume import volume_types
@ -80,10 +80,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
expl = _('Volume Type is currently in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
msg = _LW("The option 'allow_inuse_volume_type_modification' "
"is deprecated and will be removed in a future "
"release. The default behavior going forward will "
"be to disallow modificaton of in-use types.")
msg = ("The option 'allow_inuse_volume_type_modification' "
"is deprecated and will be removed in a future "
"release. The default behavior going forward will "
"be to disallow modificaton of in-use types.")
versionutils.report_deprecated_feature(LOG, msg)
return

8
cinder/api/contrib/volume_transfer.py

@ -23,7 +23,7 @@ from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import transfers as transfer_view
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import transfer as transferAPI
LOG = logging.getLogger(__name__)
@ -95,7 +95,7 @@ class VolumeTransferController(wsgi.Controller):
remove_whitespaces=True)
name = name.strip()
LOG.info(_LI("Creating transfer of volume %s"),
LOG.info("Creating transfer of volume %s",
volume_id)
try:
@ -124,7 +124,7 @@ class VolumeTransferController(wsgi.Controller):
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Accepting transfer %s"), transfer_id)
LOG.info("Accepting transfer %s", transfer_id)
try:
accepted_transfer = self.transfer_api.accept(context, transfer_id,
@ -144,7 +144,7 @@ class VolumeTransferController(wsgi.Controller):
"""Delete a transfer."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete transfer with id: %s"), id)
LOG.info("Delete transfer with id: %s", id)
# Not found exception will be handled at the wsgi level
self.transfer_api.delete(context, transfer_id=id)

3
cinder/api/contrib/volume_unmanage.py

@ -18,7 +18,6 @@ import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
@ -50,7 +49,7 @@ class VolumeUnmanageController(wsgi.Controller):
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage volume with id: %s"), id)
LOG.info("Unmanage volume with id: %s", id)
# Not found exception will be handled at the wsgi level
vol = self.volume_api.get(context, id)

19
cinder/api/extensions.py

@ -25,7 +25,6 @@ import webob.exc
import cinder.api.openstack
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LE, _LI, _LW
import cinder.policy
@ -123,7 +122,7 @@ class ExtensionManager(object):
"""
def __init__(self):
LOG.info(_LI('Initializing extension manager.'))
LOG.info('Initializing extension manager.')
self.cls_list = CONF.osapi_volume_extension
self.extensions = {}
@ -138,7 +137,7 @@ class ExtensionManager(object):
return
alias = ext.alias
LOG.info(_LI('Loaded extension: %s'), alias)
LOG.info('Loaded extension: %s', alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
@ -182,7 +181,7 @@ class ExtensionManager(object):
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError:
LOG.exception(_LE("Exception loading extension."))
LOG.exception("Exception loading extension.")
return False
return True
@ -214,8 +213,8 @@ class ExtensionManager(object):
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
LOG.warning('Failed to load extension %(ext_factory)s: '
'%(exc)s',
{'ext_factory': ext_factory, 'exc': exc})
@ -288,8 +287,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warning(_LW('Failed to load extension %(classpath)s: '
'%(exc)s'),
logger.warning('Failed to load extension %(classpath)s: '
'%(exc)s',
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
@ -313,8 +312,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
try:
ext(ext_mgr)
except Exception as exc:
logger.warning(_LW('Failed to load extension '
'%(ext_name)s: %(exc)s'),
logger.warning('Failed to load extension '
'%(ext_name)s: %(exc)s',
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...

6
cinder/api/middleware/fault.py

@ -21,7 +21,7 @@ import webob.exc
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import utils
from cinder.wsgi import common as base_wsgi
@ -44,7 +44,7 @@ class FaultWrapper(base_wsgi.Middleware):
def _error(self, inner, req):
if not isinstance(inner, exception.QuotaError):
LOG.exception(_LE("Caught error: %(type)s %(error)s"),
LOG.exception("Caught error: %(type)s %(error)s",
{'type': type(inner),
'error': inner})
safe = getattr(inner, 'safe', False)
@ -54,7 +54,7 @@ class FaultWrapper(base_wsgi.Middleware):
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
LOG.info("%(url)s returned with HTTP %(status)d", msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers

6
cinder/api/openstack/__init__.py

@ -23,7 +23,7 @@ from oslo_service import wsgi as base_wsgi
import routes
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LW
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -111,8 +111,8 @@ class APIRouter(base_wsgi.Router):
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
LOG.warning('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource',
{'ext_name': extension.extension.name,
'collection': collection})
continue

19
cinder/api/openstack/wsgi.py

@ -32,7 +32,7 @@ from cinder.api.openstack import api_version_request as api_version
from cinder.api.openstack import versioned_method
from cinder import exception
from cinder import i18n
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import policy
from cinder import utils
from cinder.wsgi import common as wsgi
@ -602,15 +602,14 @@ class ResourceExceptionHandler(object):
code=ex_value.code, explanation=six.text_type(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE(
'Exception handling resource: %s'),
ex_value, exc_info=exc_info)
LOG.error('Exception handling resource: %s',
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
LOG.info("Fault thrown: %s", ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
LOG.info("HTTP exception thrown: %s", ex_value)
raise Fault(ex_value)
# We didn't handle the exception
@ -812,7 +811,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
LOG.info("%(method)s %(url)s",
{"method": request.method,
"url": request.url})
@ -934,10 +933,10 @@ class Resource(wsgi.Application):
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _LI("%(url)s returned with HTTP %(status)d")
msg = "%(url)s returned with HTTP %(status)d"
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = _LI("%(url)s returned a fault: %(e)s")
msg = "%(url)s returned a fault: %(e)s"
LOG.info(msg, msg_dict)
@ -972,7 +971,7 @@ class Resource(wsgi.Application):
'create',
'delete',
'update']):
LOG.exception(_LE('Get method error.'))
LOG.exception('Get method error.')
else:
ctxt.reraise = False
else:

7
cinder/api/v2/snapshots.py

@ -25,7 +25,7 @@ from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.views import snapshots as snapshot_views
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import utils
from cinder import volume
from cinder.volume import utils as volume_utils
@ -58,7 +58,7 @@ class SnapshotsController(wsgi.Controller):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete snapshot with id: %s"), id)
LOG.info("Delete snapshot with id: %s", id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
@ -127,8 +127,7 @@ class SnapshotsController(wsgi.Controller):
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
msg = _LI("Create snapshot from volume %s")
LOG.info(msg, volume_id)
LOG.info("Create snapshot from volume %s", volume_id)
self.validate_name_and_description(snapshot)
# NOTE(thingee): v2 API allows name instead of display_name

6
cinder/api/v2/volumes.py

@ -28,7 +28,7 @@ from cinder.api.v2.views import volumes as volume_views
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder import utils
@ -70,7 +70,7 @@ class VolumeController(wsgi.Controller):
cascade = utils.get_bool_param('cascade', req.params)
LOG.info(_LI("Delete volume with id: %s"), id)
LOG.info("Delete volume with id: %s", id)
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
@ -257,7 +257,7 @@ class VolumeController(wsgi.Controller):
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size)
LOG.info("Create volume of %s GB", size)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')

6
cinder/api/v3/group_snapshots.py

@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
from cinder.api.v3.views import group_snapshots as group_snapshot_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
@ -72,7 +72,7 @@ class GroupSnapshotsController(wsgi.Controller):
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context)
LOG.info('Delete group_snapshot with id: %s', id, context=context)
try:
group_snapshot = self.group_snapshot_api.get_group_snapshot(
@ -160,7 +160,7 @@ class GroupSnapshotsController(wsgi.Controller):
name = group_snapshot.get('name', None)
description = group_snapshot.get('description', None)
LOG.info(_LI("Creating group_snapshot %(name)s."),
LOG.info("Creating group_snapshot %(name)s.",
{'name': name},
context=context)

20
cinder/api/v3/groups.py

@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
from cinder.api.v3.views import groups as views_groups
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
@ -134,7 +134,7 @@ class GroupsController(wsgi.Controller):
% del_vol)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI('Delete group with id: %s'), id,
LOG.info('Delete group with id: %s', id,
context=context)
try:
@ -217,7 +217,7 @@ class GroupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = group.get('availability_zone')
LOG.info(_LI("Creating group %(name)s."),
LOG.info("Creating group %(name)s.",
{'name': name},
context=context)
@ -268,16 +268,16 @@ class GroupsController(wsgi.Controller):
group_type_id = None
if group_snapshot_id:
LOG.info(_LI("Creating group %(name)s from group_snapshot "
"%(snap)s."),
LOG.info("Creating group %(name)s from group_snapshot "
"%(snap)s.",
{'name': name, 'snap': group_snapshot_id},
context=context)
grp_snap = self.group_api.get_group_snapshot(context,
group_snapshot_id)
group_type_id = grp_snap.group_type_id
elif source_group_id:
LOG.info(_LI("Creating group %(name)s from "
"source group %(source_group_id)s."),
LOG.info("Creating group %(name)s from "
"source group %(source_group_id)s.",
{'name': name, 'source_group_id': source_group_id},
context=context)
source_group = self.group_api.get(context, source_group_id)
@ -341,9 +341,9 @@ class GroupsController(wsgi.Controller):
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
LOG.info("Updating group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,

8
cinder/api/v3/volumes.py

@ -25,8 +25,8 @@ from cinder.api.v2 import volumes as volumes_v2
from cinder.api.v3.views import volumes as volume_views_v3
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder import objects
from cinder.i18n import _, _LI
import cinder.policy
from cinder import utils
@ -70,8 +70,8 @@ class VolumeController(volumes_v2.VolumeController):
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
'f': force}
msg = _LI("Delete volume with id: %(id)s %(params)s")
LOG.info(msg, {'id': id, 'params': params}, context=context)
LOG.info("Delete volume with id: %(id)s %(params)s",
{'id': id, 'params': params}, context=context)
if force:
check_policy(context, 'force_delete')
@ -264,7 +264,7 @@ class VolumeController(volumes_v2.VolumeController):
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size)
LOG.info("Create volume of %s GB", size)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')

10
cinder/backup/api.py

@ -33,7 +33,7 @@ from cinder.common import constants
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
import cinder.policy
@ -354,8 +354,8 @@ class API(base.Base):
description = 'auto-created_from_restore_from_backup'
LOG.info(_LI("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s."),
LOG.info("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s.",
{'size': size, 'backup_id': backup_id})
volume = self.volume_api.create(context, size, name, description)
volume_id = volume['id']
@ -380,8 +380,8 @@ class API(base.Base):
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
LOG.info("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s",
{'volume_id': volume_id, 'backup_id': backup_id})
# Setting the status here rather than setting at start and unrolling

16
cinder/backup/chunkeddriver.py

@ -36,7 +36,7 @@ import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import utils as volume_utils
@ -572,10 +572,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception as err:
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup volume metadata failed: %s."),
err)
LOG.exception("Backup volume metadata failed.")
self.delete(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
@ -635,9 +634,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
"fileno() so skipping "
"fsync()"))
LOG.info("volume_file does not support fileno() so skipping "
"fsync()")
else:
os.fsync(fileno)
@ -722,8 +720,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning(_LW('Error while listing objects, continuing'
' with delete.'))
LOG.warning('Error while listing objects, continuing'
' with delete.')
for object_name in object_names:
self.delete_object(container, object_name)

22
cinder/backup/driver.py

@ -24,7 +24,7 @@ import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
@ -64,7 +64,7 @@ class BackupMetadataAPI(base.Base):
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_LI("Value with type=%s is not serializable"),
LOG.info("Value with type=%s is not serializable",
type(value))
return False
@ -84,8 +84,8 @@ class BackupMetadataAPI(base.Base):
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), key)
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
@ -112,8 +112,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), entry)
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
@ -136,8 +136,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup"), entry)
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
@ -234,9 +234,9 @@ class BackupMetadataAPI(base.Base):
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning(_LW("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed."))
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,

58
cinder/backup/drivers/ceph.py

@ -58,7 +58,7 @@ from six.moves import range
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import interface
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
@ -181,8 +181,8 @@ class CephBackupDriver(driver.BackupDriver):
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else:
LOG.info(_LI("RBD striping not supported - ignoring configuration "
"settings for rbd striping"))
LOG.info("RBD striping not supported - ignoring configuration "
"settings for rbd striping.")
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
@ -258,8 +258,8 @@ class CephBackupDriver(driver.BackupDriver):
# moved to the driver's initialization so that it can stop
# the service from starting when the underyling RBD does not
# support the requested features.
LOG.error(_LE("RBD journaling not supported - unable to "
"support per image mirroring in backup pool"))
LOG.error("RBD journaling not supported - unable to "
"support per image mirroring in backup pool")
raise exception.BackupInvalidCephArgs(
_("Image Journaling set but RBD backend does "
"not support journaling")
@ -468,14 +468,14 @@ class CephBackupDriver(driver.BackupDriver):
backup.id)
if rem:
LOG.info(
_LI("Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete."),
"Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete.",
{'snapshots': rem, 'volume': volume_id})
return
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
"volume %(volume)s."),
LOG.info("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
@ -483,17 +483,16 @@ class CephBackupDriver(driver.BackupDriver):
except self.rbd.ImageBusy:
# Allow a retry if the image is busy
if retries > 0:
LOG.info(_LI("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss."),
LOG.info("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss.",
{'retries': retries,
'delay': delay,
'volume': volume_id})
eventlet.sleep(delay)
else:
LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume "
"%(volume)s."),
LOG.error("Max retries reached deleting backup "
"%(basename)s image of volume %(volume)s.",
{'volume': volume_id,
'basename': base_name})
raise
@ -527,7 +526,7 @@ class CephBackupDriver(driver.BackupDriver):
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe1 failed - %s "), e)
LOG.error("Pipe1 failed - %s ", e)
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
@ -541,7 +540,7 @@ class CephBackupDriver(driver.BackupDriver):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe2 failed - %s "), e)
LOG.error("Pipe2 failed - %s ", e)
raise
p1.stdout.close()
@ -1005,8 +1004,7 @@ class CephBackupDriver(driver.BackupDriver):
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
LOG.exception(_LE("Differential restore failed, trying full "
"restore"))
LOG.exception("Differential restore failed, trying full restore")
raise
# If the volume we are restoring to is larger than the backup volume,
@ -1108,10 +1106,9 @@ class CephBackupDriver(driver.BackupDriver):
else:
LOG.debug("Volume file is NOT RBD.")
else:
LOG.info(_LI("No restore point found for backup="
"'%(backup)s' of volume %(volume)s "
"although base image is found - "
"forcing full copy."),
LOG.info("No restore point found for backup='%(backup)s' of "
"volume %(volume)s although base image is found - "
"forcing full copy.",
{'backup': backup.id,
'volume': backup.volume_id})
return False, restore_point
@ -1196,8 +1193,8 @@ class CephBackupDriver(driver.BackupDriver):
LOG.debug('Restore to volume %s finished successfully.',
volume_id)
except exception.BackupOperationError as e:
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
'%(error)s.'), {'error': e, 'volume': volume_id})
LOG.error('Restore to volume %(volume)s finished with error - '
'%(error)s.', {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
@ -1209,8 +1206,8 @@ class CephBackupDriver(driver.BackupDriver):
self._try_delete_base_image(backup)
except self.rbd.ImageNotFound:
LOG.warning(
_LW("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata."),
"RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.",
{'backup': backup.id, 'volume': backup.volume_id})
delete_failed = True
@ -1218,9 +1215,8 @@ class CephBackupDriver(driver.BackupDriver):
VolumeMetadataBackup(client, backup.id).remove_if_exists()
if delete_failed:
LOG.info(_LI("Delete of backup '%(backup)s' "
"for volume '%(volume)s' "
"finished with warning."),
LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' "
"finished with warning.",
{'backup': backup.id, 'volume': backup.volume_id})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "

5
cinder/backup/drivers/swift.py

@ -55,7 +55,6 @@ from swiftclient import client as swift
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder import interface
LOG = logging.getLogger(__name__)
@ -215,8 +214,8 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set"),
LOG.error("single_user auth mode enabled, "
"but %(param)s not set",
{'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
os_options = {}

12
cinder/backup/drivers/tsm.py

@ -35,7 +35,7 @@ from oslo_log import log as logging
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder.i18n import _
from cinder import interface
from cinder import utils
@ -250,9 +250,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.'),
LOG.error('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.',
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
@ -523,8 +523,8 @@ class TSMBackupDriver(driver.BackupDriver):
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'),
LOG.error('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s',
{'vol_id': backup.volume_id,
'out': out,
'err': err})

110
cinder/backup/manager.py

@ -42,7 +42,7 @@ from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import manager
from cinder import objects
from cinder.objects import fields
@ -117,7 +117,7 @@ class BackupManager(manager.ThreadPoolManager):
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
LOG.info("Backend not found in hostname (%s) so using default.",
host)
if 'default' not in self.volume_managers:
@ -168,15 +168,15 @@ class BackupManager(manager.ThreadPoolManager):
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
LOG.info("Starting volume driver %(driver_name)s (%(version)s).",
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Error encountered during initialization of "
"driver: %(name)s."),
LOG.exception("Error encountered during initialization of "
"driver: %(name)s.",
{'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
@ -213,8 +213,7 @@ class BackupManager(manager.ThreadPoolManager):
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception(_LE("Problem cleaning incomplete backup "
"operations."))
LOG.exception("Problem cleaning incomplete backup operations.")
def reset(self):
super(BackupManager, self).reset()
@ -222,7 +221,7 @@ class BackupManager(manager.ThreadPoolManager):
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def _cleanup_incomplete_backup_operations(self, ctxt):
LOG.info(_LI("Cleaning up incomplete backup operations."))
LOG.info("Cleaning up incomplete backup operations.")
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
@ -231,35 +230,35 @@ class BackupManager(manager.ThreadPoolManager):
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
LOG.exception("Problem cleaning up backup %(bkup)s.",
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume):
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
LOG.info('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).',
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
LOG.info('Setting volume %s to error_restoring '
'(was restoring-backup).', volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info(_LI('Resetting backup %s to error (was creating).'),
LOG.info('Resetting backup %s to error (was creating).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
@ -268,8 +267,8 @@ class BackupManager(manager.ThreadPoolManager):
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
LOG.info('Resetting backup %s to '
'available (was restoring).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
self._cleanup_one_volume(ctxt, volume)
@ -277,7 +276,7 @@ class BackupManager(manager.ThreadPoolManager):
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
LOG.info('Resuming delete on backup: %s.', backup['id'])
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
@ -296,8 +295,7 @@ class BackupManager(manager.ThreadPoolManager):