Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I4c96f3590d46205c45d12ee4ead8c208e11c52c5
This commit is contained in:
Sean McGinnis 2017-03-09 15:49:01 -06:00 committed by Sean McGinnis
parent 3a6c184d52
commit a55a6b5c71
233 changed files with 4768 additions and 5071 deletions

View File

@ -19,8 +19,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import paste.urlmap
from cinder.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -28,7 +26,7 @@ LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api:
LOG.warning(_LW('The v1 api is deprecated and is not under active '
LOG.warning('The v1 api is deprecated and is not under active '
'development. You should set enable_v1_api=false '
'and enable_v3_api=true in your cinder.conf file.'))
'and enable_v3_api=true in your cinder.conf file.')
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)

View File

@ -28,7 +28,7 @@ from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder import backup as backupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import utils
LOG = logging.getLogger(__name__)
@ -59,7 +59,7 @@ class BackupsController(wsgi.Controller):
LOG.debug('Delete called for member %s.', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete backup with id: %s'), id)
LOG.info('Delete backup with id: %s', id)
try:
backup = self.backup_api.get(context, id)
@ -141,8 +141,8 @@ class BackupsController(wsgi.Controller):
incremental = backup.get('incremental', False)
force = backup.get('force', False)
snapshot_id = backup.get('snapshot_id', None)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
LOG.info("Creating backup of volume %(volume_id)s in container"
" %(container)s",
{'volume_id': volume_id, 'container': container},
context=context)
@ -173,7 +173,7 @@ class BackupsController(wsgi.Controller):
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s",
{'backup_id': id, 'volume_id': volume_id},
context=context)

View File

@ -28,7 +28,7 @@ from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder import consistencygroup as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
@ -62,7 +62,7 @@ class CgsnapshotsController(wsgi.Controller):
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete cgsnapshot with id: %s'), id)
LOG.info('Delete cgsnapshot with id: %s', id)
try:
cgsnapshot = self._get_cgsnapshot(context, id)
@ -167,7 +167,7 @@ class CgsnapshotsController(wsgi.Controller):
name = cgsnapshot.get('name', None)
description = cgsnapshot.get('description', None)
LOG.info(_LI("Creating cgsnapshot %(name)s."),
LOG.info("Creating cgsnapshot %(name)s.",
{'name': name},
context=context)

View File

@ -28,7 +28,7 @@ from cinder.api.views import consistencygroups as consistencygroup_views
from cinder import consistencygroup as consistencygroup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.objects import cgsnapshot as cgsnap_obj
from cinder.objects import consistencygroup as cg_obj
from cinder.objects import group as grp_obj
@ -77,7 +77,7 @@ class ConsistencyGroupsController(wsgi.Controller):
msg = _("Invalid value '%s' for force.") % force
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI('Delete consistency group with id: %s'), id)
LOG.info('Delete consistency group with id: %s', id)
try:
group = self._get(context, id)
@ -181,7 +181,7 @@ class ConsistencyGroupsController(wsgi.Controller):
group_types.DEFAULT_CGSNAPSHOT_TYPE)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Creating consistency group %(name)s."),
LOG.info("Creating consistency group %(name)s.",
{'name': name})
try:
@ -232,12 +232,12 @@ class ConsistencyGroupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=msg)
if cgsnapshot_id:
LOG.info(_LI("Creating consistency group %(name)s from "
"cgsnapshot %(snap)s."),
LOG.info("Creating consistency group %(name)s from "
"cgsnapshot %(snap)s.",
{'name': name, 'snap': cgsnapshot_id})
elif source_cgid:
LOG.info(_LI("Creating consistency group %(name)s from "
"source consistency group %(source_cgid)s."),
LOG.info("Creating consistency group %(name)s from "
"source consistency group %(source_cgid)s.",
{'name': name, 'source_cgid': source_cgid})
try:
@ -282,9 +282,9 @@ class ConsistencyGroupsController(wsgi.Controller):
def _update(self, context, id, name, description, add_volumes,
remove_volumes,
allow_empty=False):
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
LOG.info("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': id,
'name': name,
'description': description,

View File

@ -25,7 +25,7 @@ from cinder.api.openstack import wsgi
from cinder.common import constants
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.volume import api as volume_api
@ -120,7 +120,7 @@ class HostController(wsgi.Controller):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info(_LI("Setting host %(host)s to %(state)s."),
LOG.info("Setting host %(host)s to %(state)s.",
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,

View File

@ -18,7 +18,7 @@ import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
@ -94,7 +94,7 @@ class SnapshotActionsController(wsgi.Controller):
update_dict.update({'progress': progress})
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
LOG.info("Updating snapshot %(id)s with info %(dict)s",
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)

View File

@ -20,7 +20,6 @@ from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
@ -49,7 +48,7 @@ class SnapshotUnmanageController(wsgi.Controller):
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage snapshot with id: %s"), id)
LOG.info("Unmanage snapshot with id: %s", id)
try:
snapshot = self.volume_api.get_snapshot(context, id)

View File

@ -27,7 +27,7 @@ from cinder.api.openstack import wsgi
from cinder import context as ctxt
from cinder import db
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder import rpc
from cinder import utils
from cinder.volume import volume_types
@ -80,7 +80,7 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
expl = _('Volume Type is currently in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
msg = _LW("The option 'allow_inuse_volume_type_modification' "
msg = ("The option 'allow_inuse_volume_type_modification' "
"is deprecated and will be removed in a future "
"release. The default behavior going forward will "
"be to disallow modificaton of in-use types.")

View File

@ -23,7 +23,7 @@ from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import transfers as transfer_view
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import transfer as transferAPI
LOG = logging.getLogger(__name__)
@ -95,7 +95,7 @@ class VolumeTransferController(wsgi.Controller):
remove_whitespaces=True)
name = name.strip()
LOG.info(_LI("Creating transfer of volume %s"),
LOG.info("Creating transfer of volume %s",
volume_id)
try:
@ -124,7 +124,7 @@ class VolumeTransferController(wsgi.Controller):
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Accepting transfer %s"), transfer_id)
LOG.info("Accepting transfer %s", transfer_id)
try:
accepted_transfer = self.transfer_api.accept(context, transfer_id,
@ -144,7 +144,7 @@ class VolumeTransferController(wsgi.Controller):
"""Delete a transfer."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete transfer with id: %s"), id)
LOG.info("Delete transfer with id: %s", id)
# Not found exception will be handled at the wsgi level
self.transfer_api.delete(context, transfer_id=id)

View File

@ -18,7 +18,6 @@ import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _LI
from cinder import volume
LOG = logging.getLogger(__name__)
@ -50,7 +49,7 @@ class VolumeUnmanageController(wsgi.Controller):
context = req.environ['cinder.context']
authorize(context)
LOG.info(_LI("Unmanage volume with id: %s"), id)
LOG.info("Unmanage volume with id: %s", id)
# Not found exception will be handled at the wsgi level
vol = self.volume_api.get(context, id)

View File

@ -25,7 +25,6 @@ import webob.exc
import cinder.api.openstack
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _LE, _LI, _LW
import cinder.policy
@ -123,7 +122,7 @@ class ExtensionManager(object):
"""
def __init__(self):
LOG.info(_LI('Initializing extension manager.'))
LOG.info('Initializing extension manager.')
self.cls_list = CONF.osapi_volume_extension
self.extensions = {}
@ -138,7 +137,7 @@ class ExtensionManager(object):
return
alias = ext.alias
LOG.info(_LI('Loaded extension: %s'), alias)
LOG.info('Loaded extension: %s', alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
@ -182,7 +181,7 @@ class ExtensionManager(object):
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError:
LOG.exception(_LE("Exception loading extension."))
LOG.exception("Exception loading extension.")
return False
return True
@ -214,8 +213,8 @@ class ExtensionManager(object):
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
LOG.warning('Failed to load extension %(ext_factory)s: '
'%(exc)s',
{'ext_factory': ext_factory, 'exc': exc})
@ -288,8 +287,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warning(_LW('Failed to load extension %(classpath)s: '
'%(exc)s'),
logger.warning('Failed to load extension %(classpath)s: '
'%(exc)s',
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
@ -313,8 +312,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
try:
ext(ext_mgr)
except Exception as exc:
logger.warning(_LW('Failed to load extension '
'%(ext_name)s: %(exc)s'),
logger.warning('Failed to load extension '
'%(ext_name)s: %(exc)s',
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...

View File

@ -21,7 +21,7 @@ import webob.exc
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import utils
from cinder.wsgi import common as base_wsgi
@ -44,7 +44,7 @@ class FaultWrapper(base_wsgi.Middleware):
def _error(self, inner, req):
if not isinstance(inner, exception.QuotaError):
LOG.exception(_LE("Caught error: %(type)s %(error)s"),
LOG.exception("Caught error: %(type)s %(error)s",
{'type': type(inner),
'error': inner})
safe = getattr(inner, 'safe', False)
@ -54,7 +54,7 @@ class FaultWrapper(base_wsgi.Middleware):
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
LOG.info("%(url)s returned with HTTP %(status)d", msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers

View File

@ -23,7 +23,7 @@ from oslo_service import wsgi as base_wsgi
import routes
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LW
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -111,8 +111,8 @@ class APIRouter(base_wsgi.Router):
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
LOG.warning('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource',
{'ext_name': extension.extension.name,
'collection': collection})
continue

View File

@ -32,7 +32,7 @@ from cinder.api.openstack import api_version_request as api_version
from cinder.api.openstack import versioned_method
from cinder import exception
from cinder import i18n
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import policy
from cinder import utils
from cinder.wsgi import common as wsgi
@ -602,15 +602,14 @@ class ResourceExceptionHandler(object):
code=ex_value.code, explanation=six.text_type(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE(
'Exception handling resource: %s'),
LOG.error('Exception handling resource: %s',
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
LOG.info("Fault thrown: %s", ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
LOG.info("HTTP exception thrown: %s", ex_value)
raise Fault(ex_value)
# We didn't handle the exception
@ -812,7 +811,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
LOG.info("%(method)s %(url)s",
{"method": request.method,
"url": request.url})
@ -934,10 +933,10 @@ class Resource(wsgi.Application):
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _LI("%(url)s returned with HTTP %(status)d")
msg = "%(url)s returned with HTTP %(status)d"
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = _LI("%(url)s returned a fault: %(e)s")
msg = "%(url)s returned a fault: %(e)s"
LOG.info(msg, msg_dict)
@ -972,7 +971,7 @@ class Resource(wsgi.Application):
'create',
'delete',
'update']):
LOG.exception(_LE('Get method error.'))
LOG.exception('Get method error.')
else:
ctxt.reraise = False
else:

View File

@ -25,7 +25,7 @@ from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.views import snapshots as snapshot_views
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import utils
from cinder import volume
from cinder.volume import utils as volume_utils
@ -58,7 +58,7 @@ class SnapshotsController(wsgi.Controller):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info(_LI("Delete snapshot with id: %s"), id)
LOG.info("Delete snapshot with id: %s", id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
@ -127,8 +127,7 @@ class SnapshotsController(wsgi.Controller):
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
msg = _LI("Create snapshot from volume %s")
LOG.info(msg, volume_id)
LOG.info("Create snapshot from volume %s", volume_id)
self.validate_name_and_description(snapshot)
# NOTE(thingee): v2 API allows name instead of display_name

View File

@ -28,7 +28,7 @@ from cinder.api.v2.views import volumes as volume_views
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder import utils
@ -70,7 +70,7 @@ class VolumeController(wsgi.Controller):
cascade = utils.get_bool_param('cascade', req.params)
LOG.info(_LI("Delete volume with id: %s"), id)
LOG.info("Delete volume with id: %s", id)
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
@ -257,7 +257,7 @@ class VolumeController(wsgi.Controller):
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size)
LOG.info("Create volume of %s GB", size)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')

View File

@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
from cinder.api.v3.views import group_snapshots as group_snapshot_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
@ -72,7 +72,7 @@ class GroupSnapshotsController(wsgi.Controller):
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context)
LOG.info('Delete group_snapshot with id: %s', id, context=context)
try:
group_snapshot = self.group_snapshot_api.get_group_snapshot(
@ -160,7 +160,7 @@ class GroupSnapshotsController(wsgi.Controller):
name = group_snapshot.get('name', None)
description = group_snapshot.get('description', None)
LOG.info(_LI("Creating group_snapshot %(name)s."),
LOG.info("Creating group_snapshot %(name)s.",
{'name': name},
context=context)

View File

@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi
from cinder.api.v3.views import groups as views_groups
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
@ -134,7 +134,7 @@ class GroupsController(wsgi.Controller):
% del_vol)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI('Delete group with id: %s'), id,
LOG.info('Delete group with id: %s', id,
context=context)
try:
@ -217,7 +217,7 @@ class GroupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = group.get('availability_zone')
LOG.info(_LI("Creating group %(name)s."),
LOG.info("Creating group %(name)s.",
{'name': name},
context=context)
@ -268,16 +268,16 @@ class GroupsController(wsgi.Controller):
group_type_id = None
if group_snapshot_id:
LOG.info(_LI("Creating group %(name)s from group_snapshot "
"%(snap)s."),
LOG.info("Creating group %(name)s from group_snapshot "
"%(snap)s.",
{'name': name, 'snap': group_snapshot_id},
context=context)
grp_snap = self.group_api.get_group_snapshot(context,
group_snapshot_id)
group_type_id = grp_snap.group_type_id
elif source_group_id:
LOG.info(_LI("Creating group %(name)s from "
"source group %(source_group_id)s."),
LOG.info("Creating group %(name)s from "
"source group %(source_group_id)s.",
{'name': name, 'source_group_id': source_group_id},
context=context)
source_group = self.group_api.get(context, source_group_id)
@ -341,9 +341,9 @@ class GroupsController(wsgi.Controller):
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating group %(id)s with name %(name)s "
LOG.info("Updating group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,

View File

@ -25,8 +25,8 @@ from cinder.api.v2 import volumes as volumes_v2
from cinder.api.v3.views import volumes as volume_views_v3
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder import objects
from cinder.i18n import _, _LI
import cinder.policy
from cinder import utils
@ -70,8 +70,8 @@ class VolumeController(volumes_v2.VolumeController):
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
'f': force}
msg = _LI("Delete volume with id: %(id)s %(params)s")
LOG.info(msg, {'id': id, 'params': params}, context=context)
LOG.info("Delete volume with id: %(id)s %(params)s",
{'id': id, 'params': params}, context=context)
if force:
check_policy(context, 'force_delete')
@ -264,7 +264,7 @@ class VolumeController(volumes_v2.VolumeController):
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info(_LI("Create volume of %s GB"), size)
LOG.info("Create volume of %s GB", size)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')

View File

@ -33,7 +33,7 @@ from cinder.common import constants
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
import cinder.policy
@ -354,8 +354,8 @@ class API(base.Base):
description = 'auto-created_from_restore_from_backup'
LOG.info(_LI("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s."),
LOG.info("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s.",
{'size': size, 'backup_id': backup_id})
volume = self.volume_api.create(context, size, name, description)
volume_id = volume['id']
@ -380,8 +380,8 @@ class API(base.Base):
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
LOG.info("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s",
{'volume_id': volume_id, 'backup_id': backup_id})
# Setting the status here rather than setting at start and unrolling

View File

@ -36,7 +36,7 @@ import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import utils as volume_utils
@ -572,10 +572,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception as err:
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup volume metadata failed: %s."),
err)
LOG.exception("Backup volume metadata failed.")
self.delete(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
@ -635,9 +634,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
"fileno() so skipping "
"fsync()"))
LOG.info("volume_file does not support fileno() so skipping "
"fsync()")
else:
os.fsync(fileno)
@ -722,8 +720,8 @@ class ChunkedBackupDriver(driver.BackupDriver):
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning(_LW('Error while listing objects, continuing'
' with delete.'))
LOG.warning('Error while listing objects, continuing'
' with delete.')
for object_name in object_names:
self.delete_object(container, object_name)

View File

@ -24,7 +24,7 @@ import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
@ -64,7 +64,7 @@ class BackupMetadataAPI(base.Base):
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_LI("Value with type=%s is not serializable"),
LOG.info("Value with type=%s is not serializable",
type(value))
return False
@ -84,8 +84,8 @@ class BackupMetadataAPI(base.Base):
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), key)
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
@ -112,8 +112,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), entry)
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
@ -136,8 +136,8 @@ class BackupMetadataAPI(base.Base):
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup"), entry)
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
@ -234,9 +234,9 @@ class BackupMetadataAPI(base.Base):
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning(_LW("Destination volume type is different from "
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed."))
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,

View File

@ -58,7 +58,7 @@ from six.moves import range
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import interface
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
@ -181,8 +181,8 @@ class CephBackupDriver(driver.BackupDriver):
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else:
LOG.info(_LI("RBD striping not supported - ignoring configuration "
"settings for rbd striping"))
LOG.info("RBD striping not supported - ignoring configuration "
"settings for rbd striping.")
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
@ -258,8 +258,8 @@ class CephBackupDriver(driver.BackupDriver):
# moved to the driver's initialization so that it can stop
# the service from starting when the underyling RBD does not
# support the requested features.
LOG.error(_LE("RBD journaling not supported - unable to "
"support per image mirroring in backup pool"))
LOG.error("RBD journaling not supported - unable to "
"support per image mirroring in backup pool")
raise exception.BackupInvalidCephArgs(
_("Image Journaling set but RBD backend does "
"not support journaling")
@ -468,14 +468,14 @@ class CephBackupDriver(driver.BackupDriver):
backup.id)
if rem:
LOG.info(
_LI("Backup base image of volume %(volume)s still "
"Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete."),
"image delete.",
{'snapshots': rem, 'volume': volume_id})
return
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
"volume %(volume)s."),
LOG.info("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
@ -483,17 +483,16 @@ class CephBackupDriver(driver.BackupDriver):
except self.rbd.ImageBusy:
# Allow a retry if the image is busy
if retries > 0:
LOG.info(_LI("Backup image of volume %(volume)s is "
LOG.info("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss."),
"in %(delay)ss.",
{'retries': retries,
'delay': delay,
'volume': volume_id})
eventlet.sleep(delay)
else:
LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume "
"%(volume)s."),
LOG.error("Max retries reached deleting backup "
"%(basename)s image of volume %(volume)s.",
{'volume': volume_id,
'basename': base_name})
raise
@ -527,7 +526,7 @@ class CephBackupDriver(driver.BackupDriver):
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe1 failed - %s "), e)
LOG.error("Pipe1 failed - %s ", e)
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
@ -541,7 +540,7 @@ class CephBackupDriver(driver.BackupDriver):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe2 failed - %s "), e)
LOG.error("Pipe2 failed - %s ", e)
raise
p1.stdout.close()
@ -1005,8 +1004,7 @@ class CephBackupDriver(driver.BackupDriver):
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
LOG.exception(_LE("Differential restore failed, trying full "
"restore"))
LOG.exception("Differential restore failed, trying full restore")
raise
# If the volume we are restoring to is larger than the backup volume,
@ -1108,10 +1106,9 @@ class CephBackupDriver(driver.BackupDriver):
else:
LOG.debug("Volume file is NOT RBD.")
else:
LOG.info(_LI("No restore point found for backup="
"'%(backup)s' of volume %(volume)s "
"although base image is found - "
"forcing full copy."),
LOG.info("No restore point found for backup='%(backup)s' of "
"volume %(volume)s although base image is found - "
"forcing full copy.",
{'backup': backup.id,
'volume': backup.volume_id})
return False, restore_point
@ -1196,8 +1193,8 @@ class CephBackupDriver(driver.BackupDriver):
LOG.debug('Restore to volume %s finished successfully.',
volume_id)
except exception.BackupOperationError as e:
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
'%(error)s.'), {'error': e, 'volume': volume_id})
LOG.error('Restore to volume %(volume)s finished with error - '
'%(error)s.', {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
@ -1209,8 +1206,8 @@ class CephBackupDriver(driver.BackupDriver):
self._try_delete_base_image(backup)
except self.rbd.ImageNotFound:
LOG.warning(
_LW("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata."),
"RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.",
{'backup': backup.id, 'volume': backup.volume_id})
delete_failed = True
@ -1218,9 +1215,8 @@ class CephBackupDriver(driver.BackupDriver):
VolumeMetadataBackup(client, backup.id).remove_if_exists()
if delete_failed:
LOG.info(_LI("Delete of backup '%(backup)s' "
"for volume '%(volume)s' "
"finished with warning."),
LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' "
"finished with warning.",
{'backup': backup.id, 'volume': backup.volume_id})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "

View File

@ -55,7 +55,6 @@ from swiftclient import client as swift
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder import interface
LOG = logging.getLogger(__name__)
@ -215,8 +214,8 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set"),
LOG.error("single_user auth mode enabled, "
"but %(param)s not set",
{'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
os_options = {}

View File

@ -35,7 +35,7 @@ from oslo_log import log as logging
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder.i18n import _
from cinder import interface
from cinder import utils
@ -250,9 +250,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink '
LOG.error('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.'),
'stdout: %(out)s\n stderr: %(err)s.',
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
@ -523,8 +523,8 @@ class TSMBackupDriver(driver.BackupDriver):
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'),
LOG.error('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s',
{'vol_id': backup.volume_id,
'out': out,
'err': err})

View File

@ -42,7 +42,7 @@ from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import manager
from cinder import objects
from cinder.objects import fields
@ -117,7 +117,7 @@ class BackupManager(manager.ThreadPoolManager):
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
LOG.info("Backend not found in hostname (%s) so using default.",
host)
if 'default' not in self.volume_managers:
@ -168,15 +168,15 @@ class BackupManager(manager.ThreadPoolManager):
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
LOG.info("Starting volume driver %(driver_name)s (%(version)s).",
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Error encountered during initialization of "
"driver: %(name)s."),
LOG.exception("Error encountered during initialization of "
"driver: %(name)s.",
{'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
@ -213,8 +213,7 @@ class BackupManager(manager.ThreadPoolManager):
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception(_LE("Problem cleaning incomplete backup "
"operations."))
LOG.exception("Problem cleaning incomplete backup operations.")
def reset(self):
super(BackupManager, self).reset()
@ -222,7 +221,7 @@ class BackupManager(manager.ThreadPoolManager):
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
def _cleanup_incomplete_backup_operations(self, ctxt):
LOG.info(_LI("Cleaning up incomplete backup operations."))
LOG.info("Cleaning up incomplete backup operations.")
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
@ -231,35 +230,35 @@ class BackupManager(manager.ThreadPoolManager):
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
LOG.exception("Problem cleaning up backup %(bkup)s.",
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume):
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
LOG.info('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).',
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
LOG.info('Setting volume %s to error_restoring '
'(was restoring-backup).', volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info(_LI('Resetting backup %s to error (was creating).'),
LOG.info('Resetting backup %s to error (was creating).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
@ -268,8 +267,8 @@ class BackupManager(manager.ThreadPoolManager):
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
LOG.info('Resetting backup %s to '
'available (was restoring).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
self._cleanup_one_volume(ctxt, volume)
@ -277,7 +276,7 @@ class BackupManager(manager.ThreadPoolManager):
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
LOG.info('Resuming delete on backup: %s.', backup['id'])
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
@ -296,8 +295,7 @@ class BackupManager(manager.ThreadPoolManager):
rpcapi = self.volume_rpcapi
rpcapi.detach_volume(ctxt, volume, attachment['id'])
except Exception:
LOG.exception(_LE("Detach attachment %(attach_id)s"
" failed."),
LOG.exception("Detach attachment %(attach_id)s failed.",
{'attach_id': attachment['id']},
resource=volume)
@ -359,8 +357,8 @@ class BackupManager(manager.ThreadPoolManager):
volume_id = backup.volume_id
volume = objects.Volume.get_by_id(context, volume_id)
previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
LOG.info('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.',
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
@ -417,7 +415,7 @@ class BackupManager(manager.ThreadPoolManager):
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
LOG.info('Create backup finished. backup: %s.', backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def _run_backup(self, context, backup, volume):
@ -457,8 +455,8 @@ class BackupManager(manager.ThreadPoolManager):
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
LOG.info('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.',
{'backup_id': backup.id, 'volume_id': volume_id})
volume = objects.Volume.get_by_id(context, volume_id)
@ -490,9 +488,9 @@ class BackupManager(manager.ThreadPoolManager):
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
'size: %(backup_size)d, continuing with restore.',
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
@ -525,8 +523,8 @@ class BackupManager(manager.ThreadPoolManager):
self.db.volume_update(context, volume_id, {'status': 'available'})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
LOG.info('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.',
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
@ -557,7 +555,7 @@ class BackupManager(manager.ThreadPoolManager):
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
LOG.info('Delete backup started, backup: %s.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.start")
backup.host = self.host
@ -604,7 +602,7 @@ class BackupManager(manager.ThreadPoolManager):
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
LOG.exception("Failed to update usages deleting backup")
backup.destroy()
# If this backup is incremental backup, handle the
@ -620,7 +618,7 @@ class BackupManager(manager.ThreadPoolManager):
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
LOG.info('Delete backup finished, backup %s deleted.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
@ -646,7 +644,7 @@ class BackupManager(manager.ThreadPoolManager):
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
LOG.info('Export record started, backup: %s.', backup.id)
expected_status = fields.BackupStatus.AVAILABLE
actual_status = backup.status
@ -680,7 +678,7 @@ class BackupManager(manager.ThreadPoolManager):
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
LOG.info('Export record finished, backup %s exported.', backup.id)
return backup_record
def import_record(self,
@ -699,7 +697,7 @@ class BackupManager(manager.ThreadPoolManager):
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
LOG.info('Import record started, backup_url: %s.', backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
@ -783,9 +781,9 @@ class BackupManager(manager.ThreadPoolManager):
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
LOG.warning('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
'not verified. Skipping verify.',
{'service': self.driver_name,
'id': backup.id})
except exception.InvalidBackup as err:
@ -796,8 +794,8 @@ class BackupManager(manager.ThreadPoolManager):
backup.update({"status": fields.BackupStatus.AVAILABLE})
backup.save()
LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup.id)
LOG.info('Import record id %s metadata from driver '
'finished.', backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
@ -809,13 +807,13 @@ class BackupManager(manager.ThreadPoolManager):
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
LOG.info('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.',
{'backup_id': backup.id,
'status': status})
backup_service_name = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service_name)
LOG.info('Backup service: %s.', backup_service_name)
if backup_service_name is not None:
configured_service = self.driver_name
if backup_service_name != configured_service:
@ -857,14 +855,14 @@ class BackupManager(manager.ThreadPoolManager):
backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup.id)
LOG.error("Backup id %s is not invalid. Skipping reset.",
backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
LOG.error('Backup service %(configured_service)s '
'does not support verify. Backup id '
'%(id)s is not verified. '
'Skipping verify.'),
'Skipping verify.',
{'configured_service': self.driver_name,
'id': backup.id})
except AttributeError:
@ -882,8 +880,8 @@ class BackupManager(manager.ThreadPoolManager):
self._cleanup_temp_volumes_snapshots_for_one_backup(
context, backup)
except Exception:
LOG.exception(_LE("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s."),
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup.id})
# send notification to ceilometer
@ -928,9 +926,9 @@ class BackupManager(manager.ThreadPoolManager):
properties,
force=True)
except Exception:
LOG.warning(_LW("Failed to terminate the connection "
LOG.warning("Failed to terminate the connection "
"of volume %(volume_id)s, but it is "
"acceptable."),
"acceptable.",
{'volume_id', volume.id})
def _connect_device(self, conn):

View File

@ -28,7 +28,6 @@ from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder import utils
@ -97,14 +96,14 @@ class LVM(executor.Executor):
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error creating Volume Group')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_LE('Unable to locate Volume Group %s'), vg_name)
LOG.error('Unable to locate Volume Group %s', vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
@ -180,10 +179,10 @@ class LVM(executor.Executor):
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error querying thin pool about data_percent'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
return free_space
@ -300,8 +299,8 @@ class LVM(executor.Executor):
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info(_LI("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"),
LOG.info("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
{'vg': vg_name, 'lv': lv_name})
out = None
@ -416,7 +415,7 @@ class LVM(executor.Executor):
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_LE('Unable to find VG: %s'), self.vg_name)
LOG.error('Unable to find VG: %s', self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
@ -503,9 +502,9 @@ class LVM(executor.Executor):
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
LOG.error('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
'support it.')
return None
if name is None:
@ -563,11 +562,11 @@ class LVM(executor.Executor):
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.error(_LE('Current state: %s'), self.get_all_volume_groups())
LOG.exception('Error creating Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
LOG.error('Current state: %s', self.get_all_volume_groups())
raise
@utils.retry(putils.ProcessExecutionError)
@ -581,7 +580,7 @@ class LVM(executor.Executor):
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
LOG.error("Trying to create snapshot by non-existent LV: %s",
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
@ -595,10 +594,10 @@ class LVM(executor.Executor):
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating snapshot'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error creating snapshot')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def _mangle_lv_name(self, name):
@ -629,10 +628,10 @@ class LVM(executor.Executor):
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error deactivating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
# Wait until lv is deactivated to return in
@ -686,10 +685,10 @@ class LVM(executor.Executor):
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error activating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
@ -813,10 +812,10 @@ class LVM(executor.Executor):
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error extending Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
@ -851,8 +850,8 @@ class LVM(executor.Executor):
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error renaming logical volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
LOG.exception('Error renaming logical volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise

View File

@ -46,7 +46,7 @@ i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder.db import api as session
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder import service
from cinder import utils
from cinder import version
@ -109,9 +109,9 @@ def main():
launcher.launch_service(server)
service_started = True
else:
LOG.error(_LE('Configuration for cinder-volume does not specify '
LOG.error('Configuration for cinder-volume does not specify '
'"enabled_backends". Using DEFAULT section to configure '
'drivers is not supported since Ocata.'))
'drivers is not supported since Ocata.')
if not service_started:
msg = _('No volume service(s) started successfully, terminating.')

View File

@ -48,7 +48,7 @@ from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
@ -104,7 +104,7 @@ def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
cinder.volume.utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error(_LE("Exists volume notification failed: %s"),
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
@ -119,7 +119,7 @@ def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
cinder.volume.utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error(_LE("Exists snapshot notification failed: %s"),
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
@ -134,7 +134,7 @@ def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error(_LE("Exists backups notification failed: %s"), exc_msg)
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
@ -155,7 +155,7 @@ def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error(_LE("Create %(type)s notification failed: %(exc_msg)s"),
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
@ -177,7 +177,7 @@ def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error(_LE("Delete %(type)s notification failed: %(exc_msg)s"),
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
@ -206,9 +206,9 @@ def main():
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info(_LI("Starting volume usage audit"))
msg = _LI("Creating usages for %(begin_period)s until %(end_period)s")
LOG.info(msg, {"begin_period": str(begin), "end_period": str(end)})
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begin_period": begin, "end_period": end})
extra_info = {
'audit_period_beginning': str(begin),
@ -219,7 +219,7 @@ def main():
begin,
end)
LOG.info(_LI("Found %d volumes"), len(volumes))
LOG.info("Found %d volumes", len(volumes))
for volume_ref in volumes:
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
admin_context, begin, end,
@ -228,7 +228,7 @@ def main():
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
begin, end)
LOG.info(_LI("Found %d snapshots"), len(snapshots))
LOG.info("Found %d snapshots", len(snapshots))
for snapshot_ref in snapshots:
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
admin_context, begin,
@ -238,10 +238,10 @@ def main():
backups = objects.BackupList.get_all_active_by_window(admin_context,
begin, end)
LOG.info(_LI("Found %d backups"), len(backups))
LOG.info("Found %d backups", len(backups))
for backup_ref in backups:
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_backup_usage,
"backup_id", "backup")
LOG.info(_LI("Volume usage audit completed"))
LOG.info("Volume usage audit completed")

View File

@ -27,7 +27,7 @@ from sqlalchemy.sql import type_api
from cinder.db import api
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -96,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
LOG.warning('Id not in sort_keys; is sort_keys unique?')
assert(not (sort_dir and sort_dirs))

View File

@ -28,7 +28,7 @@ from oslo_utils import timeutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields as c_fields
import cinder.policy
@ -110,8 +110,7 @@ class API(base.Base):
valid = self._valid_availability_zone(availability_zone)
if not valid:
msg = _LW(
"Availability zone '%s' is invalid") % (availability_zone)
msg = _("Availability zone '%s' is invalid.") % availability_zone
LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
@ -148,8 +147,8 @@ class API(base.Base):
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating consistency group"
" %s."), name)
LOG.error("Error occurred when creating consistency group "
"%s.", name)
request_spec_list = []
filter_properties_list = []
@ -189,19 +188,19 @@ class API(base.Base):
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
except exception.ConsistencyGroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Source CG %(source_cg)s not found when "
LOG.error("Source CG %(source_cg)s not found when "
"creating consistency group %(cg)s from "
"source."),
"source.",
{'cg': name, 'source_cg': source_cgid})
except exception.CgSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("CG snapshot %(cgsnap)s not found when creating "
"consistency group %(cg)s from source."),
LOG.error("CG snapshot %(cgsnap)s not found when creating "
"consistency group %(cg)s from source.",
{'cg': name, 'cgsnap': cgsnapshot_id})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating consistency group"
" %(cg)s from cgsnapshot %(cgsnap)s."),
LOG.error("Error occurred when creating consistency group"
" %(cg)s from cgsnapshot %(cgsnap)s.",
{'cg': name, 'cgsnap': cgsnapshot_id})
# Update quota for consistencygroups
@ -257,10 +256,10 @@ class API(base.Base):
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating volume "
LOG.error("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating consistency group %(group)s "
"from cgsnapshot %(cgsnap)s."),
"from cgsnapshot %(cgsnap)s.",
{'group': group.id,
'cgsnap': cgsnapshot.id})
except Exception:
@ -268,9 +267,9 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating consistency "
LOG.error("Error occurred when creating consistency "
"group %(group)s from cgsnapshot "
"%(cgsnap)s."),
"%(cgsnap)s.",
{'group': group.id,
'cgsnap': cgsnapshot.id})
@ -321,10 +320,10 @@ class API(base.Base):
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating cloned "
LOG.error("Error occurred when creating cloned "
"volume in the process of creating "
"consistency group %(group)s from "
"source CG %(source_cg)s."),
"source CG %(source_cg)s.",
{'group': group.id,
'source_cg': source_cg.id})
except Exception:
@ -332,9 +331,9 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating consistency "
LOG.error("Error occurred when creating consistency "
"group %(group)s from source CG "
"%(source_cg)s."),
"%(source_cg)s.",
{'group': group.id,
'source_cg': source_cg.id})
@ -390,9 +389,9 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when building "
LOG.error("Error occurred when building "
"request spec list for consistency group "
"%s."), group.id)
"%s.", group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
@ -418,8 +417,8 @@ class API(base.Base):
quota_utils.process_reserve_over_quota(
context, e, resource='groups')
finally:
LOG.error(_LE("Failed to update quota for "
"consistency group %s."), group.id)
LOG.error("Failed to update quota for "
"consistency group %s.", group.id)
@wrap_check_policy
def delete(self, context, group, force=False):
@ -749,8 +748,8 @@ class API(base.Base):
if cgsnapshot.obj_attr_is_set('id'):
cgsnapshot.destroy()
finally:
LOG.error(_LE("Error occurred when creating cgsnapshot"
" %s."), cgsnapshot_id)
LOG.error("Error occurred when creating cgsnapshot"
" %s.", cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)

View File

@ -25,7 +25,7 @@ from oslo_log import log as logging
from oslo_utils import timeutils
import six
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder import policy
context_opts = [
@ -214,6 +214,6 @@ def get_internal_tenant_context():
project_id=project_id,
is_admin=True)
else:
LOG.warning(_LW('Unable to get internal tenant context: Missing '
'required config parameters.'))
LOG.warning('Unable to get internal tenant context: Missing '
'required config parameters.')
return None

View File

@ -32,7 +32,7 @@ from tooz import coordination
from tooz import locking
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
LOG = log.getLogger(__name__)
@ -94,9 +94,9 @@ class Coordinator(object):
self._ev = eventlet.spawn(
lambda: tpool.execute(self.heartbeat))
except coordination.ToozError:
LOG.exception(_LE('Error starting coordination backend.'))
LOG.exception('Error starting coordination backend.')
raise
LOG.info(_LI('Coordination backend started successfully.'))
LOG.info('Coordination backend started successfully.')
def stop(self):
"""Disconnect from coordination backend and stop heartbeat."""
@ -154,17 +154,17 @@ class Coordinator(object):
self.coordinator.heartbeat()
return True
except coordination.ToozConnectionError:
LOG.exception(_LE('Connection error while sending a heartbeat '
'to coordination backend.'))
LOG.exception('Connection error while sending a heartbeat '
'to coordination backend.')
raise
except coordination.ToozError:
LOG.exception(_LE('Error sending a heartbeat to coordination '
'backend.'))
LOG.exception('Error sending a heartbeat to coordination '
'backend.')
return False
def _reconnect(self):
"""Reconnect with jittered exponential backoff increase."""
LOG.info(_LI('Reconnecting to coordination backend.'))
LOG.info('Reconnecting to coordination backend.')
cap = cfg.CONF.coordination.max_reconnect_backoff
backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
for attempt in itertools.count(1):
@ -173,11 +173,11 @@ class Coordinator(object):
break
except coordination.ToozError:
backoff = min(cap, random.uniform(base, backoff * 3))
msg = _LW('Reconnect attempt %(attempt)s failed. '
msg = ('Reconnect attempt %(attempt)s failed. '
'Next try in %(backoff).2fs.')
LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
self._dead.wait(backoff)
LOG.info(_LI('Reconnected to coordination backend.'))
LOG.info('Reconnected to coordination backend.')
COORDINATOR = Coordinator(prefix='cinder-')

View File

@ -57,7 +57,7 @@ from cinder.common import sqlalchemyutils
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _, _LW, _LE, _LI
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils
@ -120,7 +120,7 @@ def get_backend():
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
LOG.warning(_LW('Use of empty request context is deprecated'),
LOG.warning('Use of empty request context is deprecated',
DeprecationWarning)
raise Exception('die')
return context.is_admin
@ -234,8 +234,8 @@ def _retry_on_deadlock(f):
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning(_LW("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
LOG.warning("Deadlock detected when running "
"'%(func_name)s': Retrying...",
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
@ -1277,8 +1277,8 @@ def quota_reserve(context, resources, quotas, deltas, expire,
usages[resource].reserved += delta
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
LOG.warning("Change will make usage less than 0 for the following "
"resources: %s", unders)
if overs:
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
allocated=allocated.get(k, 0))
@ -3898,8 +3898,7 @@ def volume_type_destroy(context, id):
session=session).filter(
models.ConsistencyGroup.volume_type_id.contains(id)).count()
if results or group_count or cg_count:
LOG.error(_LE('VolumeType %s deletion failed, '
'VolumeType in use.'), id)
LOG.error('VolumeType %s deletion failed, VolumeType in use.', id)
raise exception.VolumeTypeInUse(volume_type_id=id)
updated_values = {'deleted': True,
'deleted_at': utcnow,
@ -3929,8 +3928,8 @@ def group_type_destroy(context, id):
# results = model_query(context, models.Group, session=session). \
# filter_by(group_type_id=id).all()
# if results:
# LOG.error(_LE('GroupType %s deletion failed, '
# 'GroupType in use.'), id)
# LOG.error('GroupType %s deletion failed, '
# 'GroupType in use.', id)
# raise exception.GroupTypeInUse(group_type_id=id)
model_query(context, models.GroupTypes, session=session).\
filter_by(id=id).\
@ -6086,8 +6085,8 @@ def purge_deleted_rows(context, age_in_days):
for table in reversed(metadata.sorted_tables):
if 'deleted' not in table.columns.keys():
continue
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s'), {'age': age_in_days,
LOG.info('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s', {'age': age_in_days,
'table': table})
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
try:
@ -6104,14 +6103,14 @@ def purge_deleted_rows(context, age_in_days):
table.delete()
.where(table.c.deleted_at < deleted_age))
except db_exc.DBReferenceError as ex:
LOG.error(_LE('DBError detected when purging from '
'%(tablename)s: %(error)s.'),
{'tablename': table, 'error': six.text_type(ex)})
LOG.error('DBError detected when purging from '
'%(tablename)s: %(error)s.',
{'tablename': table, 'error': ex})
raise
rows_purged = result.rowcount
if rows_purged != 0:
LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"),
LOG.info("Deleted %(row)d rows from table=%(table)s",
{'row': rows_purged, 'table': table})

View File

@ -32,7 +32,7 @@ import webob.exc
from webob.util import status_generic_reasons
from webob.util import status_reasons
from cinder.i18n import _, _LE
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -108,9 +108,9 @@ class CinderException(Exception):
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)

View File

@ -29,7 +29,7 @@ from oslo_utils import uuidutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields as c_fields
@ -117,9 +117,8 @@ class API(base.Base):
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning(_LW("Availability zone '%(s_az)s' "
"not found, falling back to "
"'%(s_fallback_az)s'."),
LOG.warning("Availability zone '%(s_az)s' not found, falling "
"back to '%(s_fallback_az)s'.",
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
@ -159,8 +158,8 @@ class API(base.Base):
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating group"
" %s."), name)
LOG.error("Error occurred when creating group"
" %s.", name)
request_spec_list = []
filter_properties_list = []
@ -222,19 +221,18 @@ class API(base.Base):
source_group_id=source_group_id)
except exception.GroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Source Group %(source_group)s not found when "
"creating group %(group)s from "
"source."),
LOG.error("Source Group %(source_group)s not found when "
"creating group %(group)s from source.",
{'group': name, 'source_group': source_group_id})
except exception.GroupSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Group snapshot %(group_snap)s not found when "
"creating group %(group)s from source."),
LOG.error("Group snapshot %(group_snap)s not found when "
"creating group %(group)s from source.",
{'group': name, 'group_snap': group_snapshot_id})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating group"
" %(group)s from group_snapshot %(grp_snap)s."),
LOG.error("Error occurred when creating group"
" %(group)s from group_snapshot %(grp_snap)s.",
{'group': name, 'grp_snap': group_snapshot_id})
# Update quota for groups
@ -286,9 +284,9 @@ class API(base.Base):
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info(_LI("A mapping entry already exists for group"
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again."),
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
pass
@ -306,10 +304,10 @@ class API(base.Base):
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating volume "
LOG.error("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating group %(group)s "
"from group snapshot %(group_snap)s."),
"from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
except Exception:
@ -317,9 +315,8 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating group "
"%(group)s from group snapshot "
"%(group_snap)s."),
LOG.error("Error occurred when creating group "
"%(group)s from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
@ -364,9 +361,9 @@ class API(base.Base):
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info(_LI("A mapping entry already exists for group"
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again."),
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
pass
@ -384,10 +381,10 @@ class API(base.Base):
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating cloned "
LOG.error("Error occurred when creating cloned "
"volume in the process of creating "
"group %(group)s from "
"source group %(source_group)s."),
"source group %(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
except Exception:
@ -395,9 +392,9 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating "
LOG.error("Error occurred when creating "
"group %(group)s from source group "
"%(source_group)s."),
"%(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
@ -467,9 +464,8 @@ class API(base.Base):
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when building "
"request spec list for group "
"%s."), group.id)
LOG.error("Error occurred when building request spec "
"list for group %s.", group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
@ -497,8 +493,7 @@ class API(base.Base):
quota_utils.process_reserve_over_quota(
context, e, resource='groups')
finally:
LOG.error(_LE("Failed to update quota for "
"group %s."), group.id)
LOG.error("Failed to update quota for group %s.", group.id)
@wrap_check_policy
def delete(self, context, group, delete_volumes=False):
@ -823,8 +818,8 @@ class API(base.Base):
if group_snapshot.obj_attr_is_set('id'):
group_snapshot.destroy()
finally:
LOG.error(_LE("Error occurred when creating group_snapshot"
" %s."), group_snapshot_id)
LOG.error("Error occurred when creating group_snapshot"
" %s.", group_snapshot_id)
self.volume_rpcapi.create_group_snapshot(context, group_snapshot)

View File

@ -27,16 +27,6 @@ _translators = i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def enable_lazy(enable=True):
return i18n.enable_lazy(enable)

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.i18n import _LW
from cinder import objects
from cinder import rpc
from cinder import utils
@ -176,8 +175,8 @@ class ImageVolumeCache(object):
# to 0.
if self.max_cache_size_gb > 0:
if current_size > self.max_cache_size_gb > 0:
LOG.warning(_LW('Image-volume cache for %(service)s does '
'not have enough space (GB).'),
LOG.warning('Image-volume cache for %(service)s does '
'not have enough space (GB).',
{'service': volume.service_topic_queue})
return False

View File

@ -36,7 +36,7 @@ from six.moves import range
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
glance_opts = [
@ -198,7 +198,7 @@ class GlanceClientWrapper(object):
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _LE("Error contacting glance server "
error_msg = _("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:

View File

@ -42,7 +42,7 @@ from oslo_utils import units
import psutil
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
@ -88,7 +88,7 @@ def get_qemu_img_version():
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warning(_LW("qemu-img is not installed."))
LOG.warning("qemu-img is not installed.")
return None
return _get_version_from_string(version.groups()[0])
@ -149,7 +149,7 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True):
image_size = qemu_img_info(source,
run_as_root=run_as_root).virtual_size
except ValueError as e:
msg = _LI("The image was successfully converted, but image size "
msg = ("The image was successfully converted, but image size "
"is unavailable. src %(src)s, dest %(dest)s. %(error)s")
LOG.info(msg, {"src": source,
"dest": dest,
@ -165,7 +165,7 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True):
"duration": duration,
"dest": dest})
msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s")
msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s"
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
@ -198,9 +198,9 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
with excutils.save_and_reraise_exception():
if e.errno == errno.ENOSPC:
# TODO(eharney): Fire an async error message for this
LOG.error(_LE("No space left in image_conversion_dir "
LOG.error("No space left in image_conversion_dir "
"path (%(path)s) while fetching "
"image %(image)s."),
"image %(image)s.",
{'path': os.path.dirname(path),
'image': image_id})
@ -217,7 +217,7 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
LOG.debug(msg, {"dest": image_file.name,
"sz": fsz_mb,
"duration": duration})
msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s")
msg = "Image download %(sz).2f MB at %(mbps).2f MB/s"
LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
@ -530,8 +530,8 @@ def cleanup_temporary_file(backend_name):
path = os.path.join(temp_dir, tmp_file)
os.remove(path)
except OSError as e:
LOG.warning(_LW("Exception caught while clearing temporary image "
"files: %s"), e)
LOG.warning("Exception caught while clearing temporary image "
"files: %s", e)
@contextlib.contextmanager

View File

@ -19,8 +19,6 @@ from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import importutils
from cinder.i18n import _LW
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -46,13 +44,13 @@ def set_overrides(conf):
try:
api_class = conf.key_manager.api_class
except cfg.NoSuchOptError:
LOG.warning(_LW("key_manager.api_class is not set, will use deprecated"
" option keymgr.api_class if set"))
LOG.warning("key_manager.api_class is not set, will use deprecated"
" option keymgr.api_class if set")
try:
api_class = CONF.keymgr.api_class
should_override = True
except cfg.NoSuchOptError:
LOG.warning(_LW("keymgr.api_class is not set"))
LOG.warning("keymgr.api_class is not set")
deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager'
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
@ -72,7 +70,7 @@ def set_overrides(conf):
should_override = True
# TODO(kfarr): key_manager.api_class should be set in DevStack, and
# this block can be removed
LOG.warning(_LW("key manager not set, using insecure default %s"),
LOG.warning("key manager not set, using insecure default %s",
castellan_mock)
api_class = castellan_mock

View File

@ -39,7 +39,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
key_mgr_opts = [
@ -67,8 +67,8 @@ class ConfKeyManager(key_manager.KeyManager):
def __init__(self, configuration):
if not ConfKeyManager.warning_logged:
LOG.warning(_LW('This key manager is insecure and is not '
'recommended for production deployments'))
LOG.warning('This key manager is insecure and is not '
'recommended for production deployments')
ConfKeyManager.warning_logged = True
super(ConfKeyManager, self).__init__(configuration)
@ -143,4 +143,4 @@ class ConfKeyManager(key_manager.KeyManager):
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warning(_LW("Not deleting key %s"), managed_object_id)
LOG.warning("Not deleting key %s", managed_object_id)

View File

@ -62,7 +62,6 @@ from cinder import context
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _LE, _LI, _LW
from cinder import objects
from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi
@ -141,7 +140,7 @@ class Manager(base.Base, PeriodicTasks):
We're utilizing it to reset RPC API version pins to avoid restart of
the service when rolling upgrade is completed.
"""
LOG.info(_LI('Resetting cached RPC version pins.'))
LOG.info('Resetting cached RPC version pins.')
rpc.LAST_OBJ_VERSIONS = {}
rpc.LAST_RPC_VERSIONS = {}
@ -198,7 +197,7 @@ class SchedulerDependentManager(ThreadPoolManager):
# This means we have Newton's c-sch in the deployment, so
# rpcapi cannot send the message. We can safely ignore the
# error. Log it because it shouldn't happen after upgrade.
msg = _LW("Failed to notify about cinder-volume service "
msg = ("Failed to notify about cinder-volume service "
"capabilities for host %(host)s. This is normal "
"during a live upgrade. Error: %(e)s")
LOG.warning(msg, {'host': self.host, 'e': e})
@ -210,7 +209,7 @@ class SchedulerDependentManager(ThreadPoolManager):
class CleanableManager(object):
def do_cleanup(self, context, cleanup_request):
LOG.info(_LI('Initiating service %s cleanup'),
LOG.info('Initiating service %s cleanup',
cleanup_request.service_id)
# If the 'until' field in the cleanup request is not set, we default to
@ -264,8 +263,8 @@ class CleanableManager(object):
'exp_sts': clean.status,
'found_sts': vo.status})
else:
LOG.info(_LI('Cleaning %(type)s with id %(id)s and status '
'%(status)s'),
LOG.info('Cleaning %(type)s with id %(id)s and status '
'%(status)s',
{'type': clean.resource_type,
'id': clean.resource_id,
'status': clean.status},
@ -276,7 +275,7 @@ class CleanableManager(object):
# of it
keep_entry = self._do_cleanup(context, vo)
except Exception:
LOG.exception(_LE('Could not perform cleanup.'))
LOG.exception('Could not perform cleanup.')
# Return the worker DB entry to the original service
db.worker_update(context, clean.id,
service_id=original_service_id,
@ -288,10 +287,9 @@ class CleanableManager(object):
# method doesn't want to keep the entry (for example for delayed
# deletion).
if not keep_entry and not db.worker_destroy(context, id=clean.id):
LOG.warning(_LW('Could not remove worker entry %s.'), clean.id)
LOG.warning('Could not remove worker entry %s.', clean.id)
LOG.info(_LI('Service %s cleanup completed.'),
cleanup_request.service_id)
LOG.info('Service %s cleanup completed.', cleanup_request.service_id)
def _do_cleanup(self, ctxt, vo_resource):
return False

View File

@ -19,7 +19,6 @@ from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.db import base
from cinder.i18n import _LE, _LI
from cinder.message import defined_messages
@ -39,7 +38,7 @@ class API(base.Base):
def create(self, context, event_id, project_id, resource_type=None,
resource_uuid=None, level="ERROR"):
"""Create a message with the specified information."""
LOG.info(_LI("Creating message record for request_id = %s"),
LOG.info("Creating message record for request_id = %s",
context.request_id)
# Ensure valid event_id
defined_messages.get_message_text(event_id)
@ -57,8 +56,8 @@ class API(base.Base):
try:
self.db.message_create(context, message_record)
except Exception:
LOG.exception(_LE("Failed to create message record "
"for request_id %s"), context.request_id)
LOG.exception("Failed to create message record "
"for request_id %s", context.request_id)
def get(self, context, id):
"""Return message with the specified id."""

View File

@ -15,7 +15,7 @@ from oslo_log import log as logging
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
@ -149,7 +149,7 @@ class QualityOfServiceSpecs(base.CinderPersistentObject,
LOG.exception(msg)
raise exception.Invalid(msg)
except db_exc.DBError:
LOG.exception(_LE('DB error occurred when creating QoS specs.'))
LOG.exception('DB error occurred when creating QoS specs.')
raise exception.QoSSpecsCreateFailed(name=self.name,
qos_specs=self.specs)
# Save ID with the object

View File

@ -29,7 +29,7 @@ import six
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder import quota_utils
@ -1044,8 +1044,7 @@ class QuotaEngine(object):
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to commit "
"reservations %s"), reservations)
LOG.exception("Failed to commit reservations %s", reservations)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
@ -1065,8 +1064,7 @@ class QuotaEngine(object):
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to roll back reservations "
"%s"), reservations)
LOG.exception("Failed to roll back reservations %s", reservations)
def destroy_by_project(self, context, project_id):
"""Destroy all quota limits associated with a project.

View File

@ -22,7 +22,7 @@ from keystoneclient import exceptions
from cinder import db
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
CONF = cfg.CONF
CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__',
@ -265,7 +265,7 @@ def process_reserve_over_quota(context, over_quota_exception,
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
msg = ("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)dG %(s_resource)s (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
@ -284,7 +284,7 @@ def process_reserve_over_quota(context, over_quota_exception,
quota=quotas[over])
if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and
resource in over):
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
msg = ("Quota exceeded for %(s_pid)s, tried to create "
"%(s_resource)s (%(d_consumed)d %(s_resource)ss "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,

View File

@ -35,7 +35,7 @@ import six
import cinder.context
import cinder.exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder import utils
@ -93,7 +93,7 @@ def initialized():
def cleanup():
global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER
if NOTIFIER is None:
LOG.exception(_LE("RPC cleanup: NOTIFIER is None"))
LOG.exception("RPC cleanup: NOTIFIER is None")
TRANSPORT.cleanup()
NOTIFICATION_TRANSPORT.cleanup()
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
@ -256,8 +256,8 @@ class RPCAPI(object):
# If there is no service we assume they will come up later and will
# have the same version as we do.
version_cap = cls.RPC_API_VERSION
LOG.info(_LI('Automatically selected %(binary)s RPC version '
'%(version)s as minimum service version.'),
LOG.info('Automatically selected %(binary)s RPC version '
'%(version)s as minimum service version.',
{'binary': cls.BINARY, 'version': version_cap})
LAST_RPC_VERSIONS[cls.BINARY] = version_cap
return version_cap
@ -274,8 +274,8 @@ class RPCAPI(object):
# have the same version as we do.
if not version_cap:
version_cap = base.OBJ_VERSIONS.get_current()
LOG.info(_LI('Automatically selected %(binary)s objects version '
'%(version)s as minimum service version.'),
LOG.info('Automatically selected %(binary)s objects version '
'%(version)s as minimum service version.',
{'binary': cls.BINARY, 'version': version_cap})
LAST_OBJ_VERSIONS[cls.BINARY] = version_cap
return version_cap

View File

@ -17,9 +17,7 @@
Filter support
"""
from oslo_log import log as logging
import six
from cinder.i18n import _LI
from cinder.scheduler import base_handler
LOG = logging.getLogger(__name__)
@ -69,22 +67,17 @@ class BaseFilterHandler(base_handler.BaseHandler):
# Log the filtration history
rspec = filter_properties.get("request_spec", {})
msg_dict = {"vol_id": rspec.get("volume_id", ""),
"str_results": six.text_type(full_filter_results),
}
full_msg = ("Filtering removed all hosts for the request with "
"volume ID "
"'%(vol_id)s'. Filter results: %(str_results)s"
) % msg_dict
"str_results": full_filter_results}
LOG.debug("Filtering removed all hosts for the request with "
"volume ID '%(vol_id)s'. Filter results: %(str_results)s",
msg_dict)
msg_dict["str_results"] = ', '.join(
_LI("%(cls_name)s: (start: %(start)s, end: %(end)s)") % {
"%(cls_name)s: (start: %(start)s, end: %(end)s)" % {
"cls_name": value[0], "start": value[1], "end": value[2]}
for value in part_filter_results)
part_msg = _LI("Filtering removed all hosts for the request with "
"volume ID "
"'%(vol_id)s'. Filter results: %(str_results)s"
) % msg_dict
LOG.debug(full_msg)
LOG.info(part_msg)
LOG.info("Filtering removed all hosts for the request with "
"volume ID '%(vol_id)s'. Filter results: %(str_results)s",
msg_dict)
def get_filtered_objects(self, filter_classes, objs,
filter_properties, index=0):
@ -115,7 +108,7 @@ class BaseFilterHandler(base_handler.BaseHandler):
if filter_class.run_filter_for_index(index):
objs = filter_class.filter_all(list_objs, filter_properties)
if objs is None:
LOG.info(_LI("Filter %s returned 0 hosts"), cls_name)
LOG.info("Filter %s returned 0 hosts", cls_name)
full_filter_results.append((cls_name, None))
list_objs = None
break

View File

@ -25,7 +25,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
@ -246,8 +246,8 @@ class FilterScheduler(driver.Scheduler):
return # no previously attempted hosts, skip
last_backend = backends[-1]
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_backend)s : %(exc)s"),
LOG.error("Error scheduling %(volume_id)s from last vol-service: "
"%(last_backend)s : %(exc)s",
{'volume_id': volume_id,
'last_backend': last_backend,
'exc': exc})
@ -631,8 +631,8 @@ class FilterScheduler(driver.Scheduler):
if backend_id != group_backend:
weighed_backends.remove(backend)
if not weighed_backends:
LOG.warning(_LW('No weighed backend found for volume '
'with properties: %s'),
LOG.warning('No weighed backend found for volume '
'with properties: %s',
filter_properties['request_spec'].get('volume_type'))
return None
return self._choose_top_backend(weighed_backends, request_spec)

View File

@ -21,7 +21,6 @@ import math
from oslo_log import log as logging
from cinder.i18n import _LE, _LW
from cinder.scheduler import filters
@ -63,8 +62,8 @@ class CapacityFilter(filters.BaseBackendFilter):
if backend_state.free_capacity_gb is None:
# Fail Safe
LOG.error(_LE("Free capacity not set: "
"volume node info collection broken."))
LOG.error("Free capacity not set: "
"volume node info collection broken.")
return False
free_space = backend_state.free_capacity_gb
@ -88,9 +87,9 @@ class CapacityFilter(filters.BaseBackendFilter):
return False
total = float(total_space)
if total <= 0:
LOG.warning(_LW("Insufficient free space for volume creation. "
LOG.warning("Insufficient free space for volume creation. "
"Total capacity is %(total).2f on %(grouping)s "
"%(grouping_name)s."),
"%(grouping_name)s.",
{"total": total,
"grouping": grouping,
"grouping_name": backend_state.backend_id})
@ -125,12 +124,12 @@ class CapacityFilter(filters.BaseBackendFilter):
"grouping": grouping,
"grouping_name": backend_state.backend_id,
}
LOG.warning(_LW(
LOG.warning(
"Insufficient free space for thin provisioning. "
"The ratio of provisioned capacity over total capacity "
"%(provisioned_ratio).2f has exceeded the maximum over "
"subscription ratio %(oversub_ratio).2f on %(grouping)s "
"%(grouping_name)s."), msg_args)
"%(grouping_name)s.", msg_args)
return False
else:
# Thin provisioning is enabled and projected over-subscription
@ -143,10 +142,10 @@ class CapacityFilter(filters.BaseBackendFilter):
free * backend_state.max_over_subscription_ratio)
return adjusted_free_virtual >= requested_size
elif thin and backend_state.thin_provisioning_support:
LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s "
LOG.warning("Filtering out %(grouping)s %(grouping_name)s "
"with an invalid maximum over subscription ratio "
"of %(oversub_ratio).2f. The ratio should be a "
"minimum of 1.0."),
"minimum of 1.0.",
{"oversub_ratio":
backend_state.max_over_subscription_ratio,
"grouping": grouping,
@ -159,9 +158,9 @@ class CapacityFilter(filters.BaseBackendFilter):
"available": free}
if free < requested_size:
LOG.warning(_LW("Insufficient free space for volume creation "
LOG.warning("Insufficient free space for volume creation "
"on %(grouping)s %(grouping_name)s (requested / "
"avail): %(requested)s/%(available)s"),
"avail): %(requested)s/%(available)s",
msg_args)
return False

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
import six
from cinder.i18n import _LW
from cinder.scheduler.evaluator import evaluator
from cinder.scheduler import filters
@ -60,8 +59,8 @@ class DriverFilter(filters.BaseBackendFilter):
except Exception as ex:
# Warn the admin for now that there is an error in the
# filter function.
LOG.warning(_LW("Error in filtering function "
"'%(function)s' : '%(error)s' :: failing backend"),
LOG.warning("Error in filtering function "
"'%(function)s' : '%(error)s' :: failing backend",
{'function': stats['filter_function'],
'error': ex, })
return False

View File

@ -18,7 +18,7 @@ from oslo_utils import uuidutils
from cinder.compute import nova
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder.scheduler import filters
from cinder.volume import utils as volume_utils
@ -96,8 +96,8 @@ class InstanceLocalityFilter(filters.BaseBackendFilter):
return self._cache[instance_uuid] == backend
if not self._nova_has_extended_server_attributes(context):
LOG.warning(_LW('Hint "%s" dropped because '
'ExtendedServerAttributes not active in Nova.'),
LOG.warning('Hint "%s" dropped because '
'ExtendedServerAttributes not active in Nova.',
HINT_KEYWORD)
raise exception.CinderException(_('Hint "%s" not supported.') %
HINT_KEYWORD)
@ -107,10 +107,10 @@ class InstanceLocalityFilter(filters.BaseBackendFilter):
timeout=REQUESTS_TIMEOUT)
if not hasattr(server, INSTANCE_HOST_PROP):
LOG.warning(_LW('Hint "%s" dropped because Nova did not return '
LOG.warning('Hint "%s" dropped because Nova did not return '
'enough information. Either Nova policy needs to '
'be changed or a privileged account for Nova '
'should be specified in conf.'), HINT_KEYWORD)
'should be specified in conf.', HINT_KEYWORD)
raise exception.CinderException(_('Hint "%s" not supported.') %
HINT_KEYWORD)

View File

@ -17,7 +17,6 @@ from taskflow.patterns import linear_flow
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _LE
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
@ -96,7 +95,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
try:
self._notify_failure(context, request_spec, cause)
finally:
LOG.error(_LE("Failed to run task %(name)s: %(cause)s"),
LOG.error("Failed to run task %(name)s: %(cause)s",
{'cause': cause, 'name': self.name})
@utils.if_notifications_enabled
@ -114,8 +113,8 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
"payload %(payload)s"),
LOG.exception("Failed notifying on %(topic)s "
"payload %(payload)s",
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties, volume):

View File

@ -29,9 +29,8 @@ from cinder.common import constants
from cinder import context as cinder_context
from cinder import exception
from cinder import objects
from cinder import utils
from cinder.i18n import _LI, _LW
from cinder.scheduler import filters
from cinder import utils
from cinder.volume import utils as vol_utils
@ -484,8 +483,7 @@ class HostManager(object):
# Ignore older updates
if capab_old['timestamp'] and timestamp < capab_old['timestamp']:
LOG.info(_LI('Ignoring old capability report from %s.'),
backend)
LOG.info('Ignoring old capability report from %s.', backend)
return
# If the capabilites are not changed and the timestamp is older,
@ -559,7 +557,7 @@ class HostManager(object):
for service in volume_services.objects:
host = service.host
if not service.is_up:
LOG.warning(_LW("volume service is down. (host: %s)"), host)
LOG.warning("volume service is down. (host: %s)", host)
continue
backend_key = service.service_topic_queue
@ -601,8 +599,8 @@ class HostManager(object):
# the map when we are removing it because it has been added to a
# cluster.
if backend_key not in active_hosts:
LOG.info(_LI("Removing non-active backend: %(backend)s from "
"scheduler cache."), {'backend': backend_key})
LOG.info("Removing non-active backend: %(backend)s from "
"scheduler cache.", {'backend': backend_key})
del self.backend_state_map[backend_key]
def get_all_backend_states(self, context):

View File

@ -36,7 +36,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import manager
from cinder import objects
from cinder import quota
@ -141,15 +141,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
request_spec_list,
filter_properties_list)
except exception.NoValidBackend:
LOG.error(_LE("Could not find a backend for consistency group "
"%(group_id)s."),
LOG.error("Could not find a backend for consistency group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create consistency group "
"%(group_id)s."),
LOG.exception("Failed to create consistency group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
@ -166,15 +166,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
group_filter_properties,
filter_properties_list)
except exception.NoValidBackend:
LOG.error(_LE("Could not find a backend for group "
"%(group_id)s."),
LOG.error("Could not find a backend for group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create generic group "
"%(group_id)s."),
LOG.exception("Failed to create generic group "
"%(group_id)s.",
{'group_id': group.id})
group.status = 'error'
group.save()
@ -370,7 +370,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
msg = ("Failed to schedule_%(method)s: %(ex)s" %
{'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
@ -445,7 +445,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
if self.upgrading_cloud:
raise exception.UnavailableDuringUpgrade(action='workers cleanup')
LOG.info(_LI('Workers cleanup request started.'))
LOG.info('Workers cleanup request started.')
filters = dict(service_id=cleanup_request.service_id,
cluster_name=cleanup_request.cluster_name,
@ -475,7 +475,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
# If it's a scheduler or the service is up, send the request.
if not dest or dest.is_up:
LOG.info(_LI('Sending cleanup for %(binary)s %(dest_name)s.'),
LOG.info('Sending cleanup for %(binary)s %(dest_name)s.',
{'binary': service.binary,
'dest_name': dest_name})
cleanup_rpc(context, cleanup_request)
@ -483,11 +483,11 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
# We don't send cleanup requests when there are no services alive
# to do the cleanup.
else:
LOG.info(_LI('No service available to cleanup %(binary)s '
'%(dest_name)s.'),
LOG.info('No service available to cleanup %(binary)s '
'%(dest_name)s.',
{'binary': service.binary,
'dest_name': dest_name})
not_requested.append(service)
LOG.info(_LI('Cleanup requests completed.'))
LOG.info('Cleanup requests completed.')
return requested, not_requested

View File

@ -28,8 +28,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.i18n import _LE
scheduler_json_config_location_opt = cfg.StrOpt(
'scheduler_json_config_location',
@ -66,8 +64,8 @@ class SchedulerOptions(object):
try:
return os.path.getmtime(filename)
except os.error:
LOG.exception(_LE("Could not stat scheduler options file "
"%(filename)s."),
LOG.exception("Could not stat scheduler options file "
"%(filename)s.",
{'filename': filename})
raise
@ -76,7 +74,7 @@ class SchedulerOptions(object):
try:
return json.load(handle)
except ValueError:
LOG.exception(_LE("Could not decode scheduler options."))
LOG.exception("Could not decode scheduler options.")
return {}
def _get_time_now(self):

View File

@ -15,7 +15,6 @@
from oslo_log import log as logging
import six
from cinder.i18n import _LW
from cinder.scheduler.evaluator import evaluator
from cinder.scheduler import weights
@ -56,17 +55,17 @@ class GoodnessWeigher(weights.BaseHostWeigher):
goodness_rating = 0
if stats['goodness_function'] is None:
LOG.warning(_LW("Goodness function not set :: defaulting to "
"minimal goodness rating of 0"))
LOG.warning("Goodness function not set :: defaulting to "
"minimal goodness rating of 0")
else:
try:
goodness_result = self._run_evaluator(
stats['goodness_function'],
stats)
except Exception as ex:
LOG.warning(_LW("Error in goodness_function function "
LOG.warning("Error in goodness_function function "
"'%(function)s' : '%(error)s' :: Defaulting "
"to a goodness of 0"),
"to a goodness of 0",
{'function': stats['goodness_function'],
'error': ex, })
return goodness_rating
@ -75,9 +74,9 @@ class GoodnessWeigher(weights.BaseHostWeigher):
if goodness_result:
goodness_rating = 100
elif goodness_result < 0 or goodness_result > 100:
LOG.warning(_LW("Invalid goodness result. Result must be "
LOG.warning("Invalid goodness result. Result must be "
"between 0 and 100. Result generated: '%s' "
":: Defaulting to a goodness of 0"),
":: Defaulting to a goodness of 0",
goodness_result)
else:
goodness_rating = goodness_result

View File

@ -41,7 +41,7 @@ from cinder.common import constants
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields
@ -104,7 +104,7 @@ def setup_profiler(binary, host):
host=host
)
LOG.warning(
_LW("OSProfiler is enabled.\nIt means that person who knows "
"OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/cinder.conf can trace his requests. \n"
"In real life only operator can read this file so there "
@ -112,7 +112,7 @@ def setup_profiler(binary, host):
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSProfiler set in cinder.conf:\n"
"[profiler]\nenabled=false"))
"[profiler]\nenabled=false")
class Service(service.Service):
@ -183,9 +183,9 @@ class Service(service.Service):
# TODO(geguileo): In O - Remove self.is_upgrading_to_n part
if (service_ref.cluster_name != cluster and
not self.is_upgrading_to_n):
LOG.info(_LI('This service has been moved from cluster '
LOG.info('This service has been moved from cluster '
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
'will %(opt_no)sbe moved to the new cluster'),
'will %(opt_no)sbe moved to the new cluster',
{'cluster_svc': service_ref.cluster_name,
'cluster_cfg': cluster,
'opt_no': '' if self.added_to_cluster else 'NO '})
@ -231,7 +231,7 @@ class Service(service.Service):
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
LOG.info('Starting %(topic)s node (version %(version_string)s)',
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
@ -270,8 +270,8 @@ class Service(service.Service):
# TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version '
'%(version)s)'),
LOG.info('Starting %(topic)s cluster %(cluster)s (version '
'%(version)s)',
{'topic': self.topic, 'version': version_string,
'cluster': self.cluster})
target = messaging.Target(
@ -310,11 +310,11 @@ class Service(service.Service):
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
"service_down_time to: %(new_down_time)s",
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
@ -478,9 +478,9 @@ class Service(service.Service):
if not self.manager.is_working():
# NOTE(dulek): If manager reports a problem we're not sending
# heartbeats - to indicate that service is actually down.
LOG.error(_LE('Manager for service %(binary)s %(host)s is '
LOG.error('Manager for service %(binary)s %(host)s is '
'reporting problems, not sending heartbeat. '
'Service will appear "down".'),
'Service will appear "down".',
{'binary': self.binary,
'host': self.host})
return
@ -506,24 +506,24 @@ class Service(service.Service):
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
LOG.error('Recovered model server connection!')
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
LOG.exception('model server went away')
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('DBError encountered: '))
LOG.exception('DBError encountered: ')
except Exception:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('Exception encountered: '))
LOG.exception('Exception encountered: ')
def reset(self):
self.manager.reset()

View File

@ -27,7 +27,7 @@ import paramiko
import six
from cinder import exception
from cinder.i18n import _, _LI
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -79,8 +79,8 @@ class SSHPool(pools.Pool):
if 'hosts_key_file' in kwargs.keys():
self.hosts_key_file = kwargs.pop('hosts_key_file')
LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
"loaded along with %(conf)s from /etc/cinder.conf."),
LOG.info("Secondary ssh hosts key file %(kwargs)s will be "
"loaded along with %(conf)s from /etc/cinder.conf.",
{'kwargs': self.hosts_key_file,
'conf': CONF.ssh_hosts_key_file})

View File

@ -16,9 +16,7 @@
import mock
from cinder.scheduler import base_filter
from cinder.scheduler import host_manager
from cinder import test
from cinder.tests.unit import fake_constants as fake
class TestBaseFilter(test.TestCase):
@ -174,32 +172,3 @@ class TestBaseFilterHandler(test.TestCase):
result = self._get_filtered_objects(filter_classes, index=2)
self.assertEqual(filter_objs_expected, result)
self.assertEqual(1, fake5_filter_all.call_count)
def test_get_filtered_objects_info_and_debug_log_none_returned(self):
all_filters = [FilterA, FilterA, FilterB]
fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
for x in range(1, 4)]
filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID,
'volume_properties': {'project_id': fake.PROJECT_ID,
'size': 2048,
'host': 'host4'}}}
with mock.patch.object(base_filter, 'LOG') as mock_log:
result = self.handler.get_filtered_objects(
all_filters, fake_backends, filt_props)
self.assertFalse(result)
msg = "with volume ID '%s'" % fake.VOLUME_ID
# FilterA should leave Host1 and Host2; FilterB should leave None.
exp_output = ("FilterA: (start: 3, end: 2), "
"FilterA: (start: 2, end: 1)")
cargs = mock_log.info.call_args[0][0]
self.assertIn(msg, cargs)
self.assertIn(exp_output, cargs)
exp_output = ("[('FilterA', ['fake_be2', 'fake_be3']), "
"('FilterA', ['fake_be3']), "
+ "('FilterB', None)]")
cargs = mock_log.debug.call_args[0][0]
self.assertIn(msg, cargs)
self.assertIn(exp_output, cargs)

View File

@ -33,7 +33,6 @@ import six
from cinder import context
from cinder import exception
from cinder.i18n import _LW
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_volume
@ -91,9 +90,9 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
reserved_percentage = 100 * int(reserved_ratio)
self.assertEqual(reserved_percentage, result)
msg = _LW('The "netapp_size_multiplier" configuration option is '
msg = ('The "netapp_size_multiplier" configuration option is '
'deprecated and will be removed in the Mitaka release. '
'Please set "reserved_percentage = %d" instead.') % (
'Please set "reserved_percentage = %d" instead.' %
result)
mock_report.assert_called_once_with(block_base.LOG, msg)

View File

@ -29,7 +29,7 @@ import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import objects
from cinder import quota
from cinder import quota_utils
@ -72,7 +72,7 @@ class API(base.Base):
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.start")
if volume_ref['status'] != 'awaiting-transfer':
LOG.error(_LE("Volume in unexpected state"))
LOG.error("Volume in unexpected state")
self.db.transfer_destroy(context, transfer_id)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.end")
@ -115,7 +115,7 @@ class API(base.Base):
def create(self, context, volume_id, display_name):
"""Creates an entry in the transfers table."""
volume_api.check_policy(context, 'create_transfer')
LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
LOG.info("Generating transfer record for volume %s", volume_id)
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
@ -137,8 +137,7 @@ class API(base.Base):
try:
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
LOG.error(_LE("Failed to create transfer record "
"for %s"), volume_id)
LOG.error("Failed to create transfer record for %s", volume_id)
raise
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.end")
@ -200,8 +199,8 @@ class API(base.Base):
**reserve_opts)
except Exception:
donor_reservations = None
LOG.exception(_LE("Failed to update quota donating volume"
" transfer id %s"), transfer_id)
LOG.exception("Failed to update quota donating volume"
" transfer id %s", transfer_id)
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.start")
@ -219,7 +218,7 @@ class API(base.Base):
QUOTAS.commit(context, reservations)
if donor_reservations:
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
LOG.info(_LI("Volume %s has been transferred."), volume_id)
LOG.info("Volume %s has been transferred.", volume_id)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)

View File

@ -53,7 +53,7 @@ import six
import webob.exc
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder import keymgr
@ -398,7 +398,7 @@ def robust_file_write(directory, filename, data):
os.fsync(dirfd)
except OSError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to write persistence file: %(path)s."),
LOG.error("Failed to write persistence file: %(path)s.",
{'path': os.path.join(directory, filename)})
if os.path.isfile(tempname):
os.unlink(tempname)
@ -535,7 +535,7 @@ def require_driver_initialized(driver):
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
LOG.error("Volume driver %s not initialized", driver_name)
raise exception.DriverNotInitialized()
else:
log_unsupported_driver_warning(driver)
@ -545,9 +545,9 @@ def log_unsupported_driver_warning(driver):
"""Annoy the log about unsupported drivers."""
if not driver.supported:
# Check to see if the driver is flagged as supported.
LOG.warning(_LW("Volume driver (%(driver_name)s %(version)s) is "
LOG.warning("Volume driver (%(driver_name)s %(version)s) is "
"currently unsupported and may be removed in the "
"next release of OpenStack. Use at your own risk."),
"next release of OpenStack. Use at your own risk.",
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()},
resource={'type': 'driver',
@ -944,7 +944,7 @@ def setup_tracing(trace_flags):
except TypeError: # Handle when trace_flags is None or a test mock
trace_flags = []
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
LOG.warning('Invalid trace flag: %s', invalid_flag)
TRACE_METHOD = 'method' in trace_flags
TRACE_API = 'api' in trace_flags

View File

@ -36,7 +36,7 @@ from cinder import db
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import keymgr as key_manager
@ -170,7 +170,7 @@ class API(base.Base):
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info(_LI("Availability Zones retrieved successfully."))
LOG.info("Availability Zones retrieved successfully.")
return tuple(azs)
def _retype_is_possible(self, context,
@ -349,7 +349,7 @@ class API(base.Base):
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Volume created successfully."), resource=vref)
LOG.info("Volume created successfully.", resource=vref)
return vref
@wrap_check_policy
@ -380,8 +380,8 @@ class API(base.Base):
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
LOG.exception("Failed to update quota while "
"deleting volume.")
volume.destroy()
if reservations:
@ -389,7 +389,7 @@ class API(base.Base):
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
LOG.info("Delete volume request issued successfully.",
resource={'type': 'volume',
'id': volume.id})
return
@ -468,14 +468,14 @@ class API(base.Base):
try:
self.key_manager.delete(context, encryption_key_id)
except Exception as e:
LOG.warning(_LW("Unable to delete encryption key for "
"volume: %s."), e.msg, resource=volume)
LOG.warning("Unable to delete encryption key for "
"volume: %s.", e.msg, resource=volume)
self.volume_rpcapi.delete_volume(context,
volume,
unmanage_only,
cascade)
LOG.info(_LI("Delete volume request issued successfully."),
LOG.info("Delete volume request issued successfully.",
resource=volume)
@wrap_check_policy
@ -488,8 +488,8 @@ class API(base.Base):
volume = objects.Volume._from_db_object(context, vol_obj, volume)
if volume.status == 'maintenance':
LOG.info(_LI("Unable to update volume, "
"because it is in maintenance."), resource=volume)
LOG.info("Unable to update volume, "
"because it is in maintenance.", resource=volume)
msg = _("The volume cannot be updated during maintenance.")
raise exception.InvalidVolume(reason=msg)
@ -497,7 +497,7 @@ class API(base.Base):
volume.update(fields)
volume.save()
LOG.info(_LI("Volume updated successfully."), resource=volume)
LOG.info("Volume updated successfully.", resource=volume)
def get(self, context, volume_id, viewable_admin_meta=False):
volume = objects.Volume.get_by_id(context, volume_id)
@ -516,7 +516,7 @@ class API(base.Base):
volume.admin_metadata = admin_metadata
volume.obj_reset_changes()
LOG.info(_LI("Volume info retrieved successfully."), resource=volume)
LOG.info("Volume info retrieved successfully.", resource=volume)
return volume
def get_all(self, context, marker=None, limit=None, sort_keys=None,
@ -565,7 +565,7 @@ class API(base.Base):
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
offset=offset)
LOG.info(_LI("Get all volumes completed successfully."))
LOG.info("Get all volumes completed successfully.")
return volumes
def get_volume_summary(self, context, filters=None):
@ -583,7 +583,7 @@ class API(base.Base):
volumes = objects.VolumeList.get_volume_summary_by_project(
context, context.project_id)
LOG.info(_LI("Get summary completed successfully."))
LOG.info("Get summary completed successfully.")
return volumes
def get_snapshot(self, context, snapshot_id):
@ -592,7 +592,7 @@ class API(base.Base):
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info(_LI("Snapshot retrieved successfully."),
LOG.info("Snapshot retrieved successfully.",
resource={'type': 'snapshot',
'id': snapshot.id})
return snapshot
@ -600,7 +600,7 @@ class API(base.Base):
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
volume = objects.Volume.get_by_id(context, volume_id)
LOG.info(_LI("Volume retrieved successfully."), resource=volume)
LOG.info("Volume retrieved successfully.", resource=volume)
return volume
def get_all_snapshots(self, context, search_opts=None, marker=None,
@ -621,7 +621,7 @@ class API(base.Base):
context, context.project_id, search_opts, marker, limit,
sort_keys, sort_dirs, offset)
LOG.info(_LI("Get all snapshots completed successfully."))
LOG.info("Get all snapshots completed successfully.")
return snapshots
@wrap_check_policy
@ -640,7 +640,7 @@ class API(base.Base):
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Reserve volume completed successfully."),
LOG.info("Reserve volume completed successfully.",
resource=volume)
@wrap_check_policy
@ -658,7 +658,7 @@ class API(base.Base):
resource=volume)
return
LOG.info(_LI("Unreserve volume completed successfully."),
LOG.info("Unreserve volume completed successfully.",
resource=volume)
@wrap_check_policy
@ -678,22 +678,22 @@ class API(base.Base):
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Begin detaching volume completed successfully."),
LOG.info("Begin detaching volume completed successfully.",
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
volume.conditional_update({'status': 'in-use'},
{'status': 'detaching'})
LOG.info(_LI("Roll detaching of volume completed successfully."),
LOG.info("Roll detaching of volume completed successfully.",
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
if volume.status == 'maintenance':
LOG.info(_LI('Unable to attach volume, '
'because it is in maintenance.'), resource=volume)
LOG.info('Unable to attach volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot be attached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
@ -712,36 +712,36 @@ class API(base.Base):
host_name,
mountpoint,
mode)
LOG.info(_LI("Attach volume completed successfully."),
LOG.info("Attach volume completed successfully.",
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to detach volume, '
'because it is in maintenance.'), resource=volume)
LOG.info('Unable to detach volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot be detached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info(_LI("Detach volume completed successfully."),
LOG.info("Detach volume completed successfully.",
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
if volume.status == 'maintenance':
LOG.info(_LI('Unable to initialize the connection for '
LOG.info('Unable to initialize the connection for '
'volume, because it is in '
'maintenance.'), resource=volume)
'maintenance.', resource=volume)
msg = _("The volume connection cannot be initialized in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info(_LI("Initialize volume connection completed successfully."),
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return init_results
@ -751,22 +751,22 @@ class API(base.Base):
volume,
connector,
force)
LOG.info(_LI("Terminate volume connection completed successfully."),
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
self.unreserve_volume(context, volume)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to accept transfer for volume, '
'because it is in maintenance.'), resource=volume)
LOG.info('Unable to accept transfer for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot accept transfer in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info(_LI("Transfer volume completed successfully."),
LOG.info("Transfer volume completed successfully.",
resource=volume)
return results
@ -798,8 +798,8 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg)
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to create the snapshot for volume, '
'because it is in maintenance.'), resource=volume)
LOG.info('Unable to create the snapshot for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
@ -911,8 +911,8 @@ class API(base.Base):
check_policy(context, 'create_snapshot', volume)
if volume['status'] == 'maintenance':
LOG.info(_LI('Unable to create the snapshot for volume, '
'because it is in maintenance.'), resource=volume)
LOG.info('Unable to create the snapshot for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
@ -981,7 +981,7 @@ class API(base.Base):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id,
group_snapshot_id)
LOG.info(_LI("Snapshot create request issued successfully."),
LOG.info("Snapshot create request issued successfully.",
resource=result)
return result
@ -990,7 +990,7 @@ class API(base.Base):
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info(_LI("Snapshot force create request issued successfully."),
LOG.info("Snapshot force create request issued successfully.",
resource=result)
return result
@ -1021,7 +1021,7 @@ class API(base.Base):
raise exception.InvalidSnapshot(reason=msg)
self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only)
LOG.info(_LI("Snapshot delete request issued successfully."),
LOG.info("Snapshot delete request issued successfully.",
resource=snapshot)
@wrap_check_policy
@ -1033,7 +1033,7 @@ class API(base.Base):
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume metadata completed successfully."),
LOG.info("Get volume metadata completed successfully.",
resource=volume)
return dict(rv)
@ -1042,7 +1042,7 @@ class API(base.Base):
"""Creates volume metadata."""
db_meta = self._update_volume_metadata(context, volume, metadata)
LOG.info(_LI("Create volume metadata completed successfully."),
LOG.info("Create volume metadata completed successfully.",
resource=volume)
return db_meta
@ -1056,7 +1056,7 @@ class API(base.Base):
LOG.info(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
self.db.volume_metadata_delete(context, volume.id, key, meta_type)
LOG.info(_LI("Delete volume metadata completed successfully."),
LOG.info("Delete volume metadata completed successfully.",
resource=volume)
def _update_volume_metadata(self, context, volume, metadata, delete=False,
@ -1084,7 +1084,7 @@ class API(base.Base):
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume metadata completed successfully."),
LOG.info("Update volume metadata completed successfully.",
resource=volume)
return db_meta
@ -1092,7 +1092,7 @@ class API(base.Base):
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume admin metadata completed successfully."),
LOG.info("Get volume admin metadata completed successfully.",
resource=volume)
return dict(rv)
@ -1112,7 +1112,7 @@ class API(base.Base):
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume admin metadata completed successfully."),
LOG.info("Update volume admin metadata completed successfully.",
resource=volume)
return db_meta
@ -1120,7 +1120,7 @@ class API(base.Base):
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot.id)
LOG.info(_LI("Get snapshot metadata completed successfully."),
LOG.info("Get snapshot metadata completed successfully.",
resource=snapshot)
return snapshot_obj.metadata
@ -1129,7 +1129,7 @@ class API(base.Base):
"""Delete the given metadata item from a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot.id)
snapshot_obj.delete_metadata_key(context, key)
LOG.info(_LI("Delete snapshot metadata completed successfully."),
LOG.info("Delete snapshot metadata completed successfully.",
resource=snapshot)
@wrap_check_policy
@ -1156,12 +1156,12 @@ class API(base.Base):
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update snapshot metadata completed successfully."),
LOG.info("Update snapshot metadata completed successfully.",
resource=snapshot)
return snapshot.metadata
def get_snapshot_metadata_value(self, snapshot, key):
LOG.info(_LI("Get snapshot metadata value not implemented."),
LOG.info("Get snapshot metadata value not implemented.",
resource=snapshot)
# FIXME(jdg): Huh? Pass?
pass
@ -1178,7 +1178,7 @@ class API(base.Base):
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
LOG.info("Get volume image-metadata completed successfully.",
resource=volume)
return {meta_entry.key: meta_entry.value for meta_entry in db_data}
@ -1195,8 +1195,8 @@ class API(base.Base):
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
if not CONF.enable_force_upload and force:
LOG.info(_LI("Force upload to image is disabled, "
"Force option will be ignored."),
LOG.info("Force upload to image is disabled, "
"Force option will be ignored.",
resource={'type': 'volume', 'id': volume['id']})
force = False
@ -1262,7 +1262,7 @@ class API(base.Base):
response['is_public'] = recv_metadata.get('is_public')
elif 'visibility' in recv_metadata:
response['visibility'] = recv_metadata.get('visibility')
LOG.info(_LI("Copy volume to image completed successfully."),
LOG.info("Copy volume to image completed successfully.",
resource=volume)
return response
@ -1272,7 +1272,7 @@ class API(base.Base):
expected = {'status': 'available'}
def _roll_back_status():
msg = _LE('Could not return volume %s to available.')
msg = _('Could not return volume %s to available.')
try:
if not volume.conditional_update(expected, value):
LOG.error(msg, volume.id)
@ -1323,10 +1323,10 @@ class API(base.Base):
gb_quotas = exc.kwargs['quotas']['gigabytes']
consumed = gigabytes['reserved'] + gigabytes['in_use']
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
LOG.error("Quota exceeded for %(s_pid)s, tried to extend volume "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
"already consumed).",
{'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': consumed,
'd_quota': gb_quotas})
@ -1357,7 +1357,7 @@ class API(base.Base):
# NOTE(erlon): During rolling upgrades scheduler and volume can
# have different versions. This check makes sure that a new
# version of the volume service won't break.
msg = _LW("Failed to send extend volume request to scheduler. "
msg = ("Failed to send extend volume request to scheduler. "
"Falling back to old behaviour. This is normal during a "
"live-upgrade. Error: %(e)s")
LOG.warning(msg, {'e': e})
@ -1365,7 +1365,7 @@ class API(base.Base):
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
LOG.info("Extend volume request issued successfully.",
resource=volume)
@wrap_check_policy
@ -1451,7 +1451,7 @@ class API(base.Base):
cluster_name or host,
force_copy,
request_spec)
LOG.info(_LI("Migrate volume request issued successfully."),
LOG.info("Migrate volume request issued successfully.",
resource=volume)
@wrap_check_policy
@ -1490,7 +1490,7 @@ class API(base.Base):
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Migrate volume completion issued successfully."),
LOG.info("Migrate volume completion issued successfully.",
resource=volume)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@ -1505,8 +1505,8 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
LOG.info(_LI("Update readonly setting on volume "
"completed successfully."),
LOG.info("Update readonly setting on volume "
"completed successfully.",
resource=volume)
@wrap_check_policy
@ -1592,7 +1592,7 @@ class API(base.Base):
self.scheduler_rpcapi.retype(context, volume,
request_spec=request_spec,
filter_properties={})
LOG.info(_LI("Retype volume request issued successfully."),
LOG.info("Retype volume request issued successfully.",
resource=volume)
def _get_service_by_host_cluster(self, context, host, cluster_name,
@ -1613,20 +1613,20 @@ class API(base.Base):
cluster_name=svc_cluster)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service: %(service)s for '
'given host: %(host)s and cluster %(cluster)s.'),
LOG.error('Unable to find service: %(service)s for '
'given host: %(host)s and cluster %(cluster)s.',
{'service': constants.VOLUME_BINARY, 'host': host,
'cluster': cluster_name})
if service.disabled and (not service.cluster_name or
service.cluster.disabled):
LOG.error(_LE('Unable to manage existing %s on a disabled '
'service.'), resource)
LOG.error('Unable to manage existing %s on a disabled '
'service.', resource)
raise exception.ServiceUnavailable()
if not service.is_up:
LOG.error(_LE('Unable to manage existing %s on a service that is '
'down.'), resource)
LOG.error('Unable to manage existing %s on a service that is '
'down.', resource)
raise exception.ServiceUnavailable()
return service
@ -1673,7 +1673,7 @@ class API(base.Base):
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vol_ref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Manage volume request issued successfully."),
LOG.info("Manage volume request issued successfully.",
resource=vol_ref)
return vol_ref
@ -1791,7 +1791,7 @@ class API(base.Base):
cluster.save()
raise exception.InvalidInput(
reason=_('No service could be changed: %s') % msg)
LOG.warning(_LW('Some services could not be changed: %s'), msg)
LOG.warning('Some services could not be changed: %s', msg)
return cluster, services

View File

@ -26,7 +26,7 @@ from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
@ -424,8 +424,8 @@ class BaseVD(object):
self._is_non_recoverable(ex.stderr, non_recoverable):
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
LOG.exception("Recovering from a failed execute. "
"Try number %s", tries)
time.sleep(tries ** 2)
def _detach_volume(self, context, attach_info, volume, properties,
@ -458,8 +458,8 @@ class BaseVD(object):
LOG.debug("volume %s: removing export", volume['id'])
self.remove_export(context, volume)
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to remove export failure."),
LOG.exception("Error detaching volume %(volume)s, "
"due to remove export failure.",
{"volume": volume['id']})
raise exception.RemoveExportException(volume=volume['id'],
reason=ex)
@ -480,8 +480,8 @@ class BaseVD(object):
# flag in the interface is for anticipation that it will be enabled
# in the future.
if remote:
LOG.error(_LE("Detaching snapshot from a remote node "
"is not supported."))
LOG.error("Detaching snapshot from a remote node "
"is not supported.")
raise exception.NotSupportedOperation(
operation=_("detach snapshot from remote node"))
else:
@ -501,8 +501,8 @@ class BaseVD(object):
LOG.debug("Snapshot %s: removing export.", snapshot.id)
self.remove_export_snapshot(context, snapshot)
except Exception as ex:
LOG.exception(_LE("Error detaching snapshot %(snapshot)s, "
"due to remove export failure."),
LOG.exception("Error detaching snapshot %(snapshot)s, "
"due to remove export failure.",
{"snapshot": snapshot.id})
raise exception.RemoveExportException(volume=snapshot.id,
reason=ex)
@ -532,8 +532,8 @@ class BaseVD(object):
self._throttle = throttling.BlkioCgroup(int(bps_limit),
cgroup_name)
except processutils.ProcessExecutionError as err:
LOG.warning(_LW('Failed to activate volume copy throttling: '
'%(err)s'), {'err': err})
LOG.warning('Failed to activate volume copy throttling: '
'%(err)s', {'err': err})
throttling.Throttle.set_default(self._throttle)
def get_version(self):
@ -737,9 +737,9 @@ class BaseVD(object):
if ':' in vendor_name:
old_name = vendor_name
vendor_name = vendor_name.replace(':', '_')
LOG.warning(_LW('The colon in vendor name was replaced '
LOG.warning('The colon in vendor name was replaced '
'by underscore. Updated vendor name is '
'%(name)s".'), {'name': vendor_name})
'%(name)s".', {'name': vendor_name})
for key in vendor_prop:
# If key has colon in vendor name field, we replace it to
@ -751,10 +751,10 @@ class BaseVD(object):
updated_vendor_prop[new_key] = vendor_prop[key]
continue
if not key.startswith(vendor_name + ':'):
LOG.warning(_LW('Vendor unique property "%(property)s" '
LOG.warning('Vendor unique property "%(property)s" '
'must start with vendor prefix with colon '
'"%(prefix)s". The property was '
'not registered on capabilities list.'),
'not registered on capabilities list.',
{'prefix': vendor_name + ':',
'property': key})
continue
@ -952,9 +952,9 @@ class BaseVD(object):
rpcapi.terminate_connection(context, volume,
properties, force=True)
except Exception:
LOG.warning(_LW("Failed terminating the connection "
LOG.warning("Failed terminating the connection "
"of volume %(volume_id)s, but it is "
"acceptable."),
"acceptable.",
{'volume_id': volume['id']})
else:
# Call local driver's create_export and initialize_connection.
@ -969,9 +969,9 @@ class BaseVD(object):
volume.save()
except exception.CinderException as ex:
if model_update:
LOG.exception(_LE("Failed updating model of volume "
LOG.exception("Failed updating model of volume "
"%(volume_id)s with driver provided "
"model %(model)s"),
"model %(model)s",
{'volume_id': volume['id'],
'model': model_update})
raise exception.ExportFailure(reason=ex)
@ -1008,7 +1008,7 @@ class BaseVD(object):
properties, force=True,
remote=remote)
except Exception:
LOG.exception(_LE('Error detaching volume %s'),
LOG.exception('Error detaching volume %s',
volume['id'])
raise
@ -1024,8 +1024,8 @@ class BaseVD(object):
# flag in the interface is for anticipation that it will be enabled
# in the future.
if remote:
LOG.error(_LE("Attaching snapshot from a remote node "
"is not supported."))
LOG.error("Attaching snapshot from a remote node "
"is not supported.")
raise exception.NotSupportedOperation(
operation=_("attach snapshot from remote node"))
else:
@ -1045,9 +1045,9 @@ class BaseVD(object):
snapshot.save()
except exception.CinderException as ex:
if model_update:
LOG.exception(_LE("Failed updating model of snapshot "
LOG.exception("Failed updating model of snapshot "
"%(snapshot_id)s with driver provided "
"model %(model)s."),
"model %(model)s.",
{'snapshot_id': snapshot.id,
'model': model_update})
raise exception.ExportFailure(reason=ex)
@ -1094,7 +1094,7 @@ class BaseVD(object):
unavailable = not connector.check_valid_device(host_device,
root_access)
except Exception:
LOG.exception(_LE('Could not validate device %s'), host_device)
LOG.exception('Could not validate device %s', host_device)
if unavailable:
raise exception.DeviceUnavailable(path=host_device,
@ -2612,8 +2612,7 @@ class ISCSIDriver(VolumeDriver):
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warning(_LW("ISCSI provider_location not "
"stored, using discovery"))
LOG.warning("ISCSI provider_location not stored, using discovery")
volume_name = volume['name']
@ -2626,7 +2625,7 @@ class ISCSIDriver(VolumeDriver):
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("ISCSI discovery attempt failed for:%s"),
LOG.error("ISCSI discovery attempt failed for:%s",
volume['host'].split('@')[0])
LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
return None
@ -2815,8 +2814,8 @@ class ISCSIDriver(VolumeDriver):
# iSCSI drivers require the initiator information
required = 'initiator'
if required not in connector:
LOG.error(_LE('The volume driver requires %(data)s '
'in the connector.'), {'data': required})
LOG.error('The volume driver requires %(data)s '
'in the connector.', {'data': required})
raise exception.InvalidConnectorException(missing=required)
def terminate_connection(self, volume, connector, **kwargs):
@ -2969,9 +2968,9 @@ class FibreChannelDriver(VolumeDriver):
def validate_connector_has_setting(connector, setting):
"""Test for non-empty setting in connector."""
if setting not in connector or not connector[setting]:
LOG.error(_LE(
LOG.error(
"FibreChannelDriver validate_connector failed. "
"No '%(setting)s'. Make sure HBA state is Online."),
"No '%(setting)s'. Make sure HBA state is Online.",
{'setting': setting})
raise exception.InvalidConnectorException(missing=setting)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from cinder import context
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
@ -41,9 +40,9 @@ class VolumeDriverUtils(object):
self._data_namespace
)
except exception.CinderException:
LOG.exception(_LE("Failed to get driver initiator data for"
LOG.exception("Failed to get driver initiator data for"
" initiator %(initiator)s and namespace"
" %(namespace)s"),
" %(namespace)s",
{'initiator': initiator,
'namespace': self._data_namespace})
raise
@ -63,9 +62,9 @@ class VolumeDriverUtils(object):
value
)
except exception.CinderException:
LOG.exception(_LE("Failed to insert initiator data for"
LOG.exception("Failed to insert initiator data for"
" initiator %(initiator)s and backend"
" %(backend)s for key %(key)s."),
" %(backend)s for key %(key)s.",
{'initiator': initiator,
'backend': self._data_namespace,
'key': key})

View File

@ -23,7 +23,7 @@ from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import objects
@ -87,7 +87,7 @@ class BlockDeviceDriver(driver.BaseVD,
@utils.synchronized('block_device', external=True)
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume.size)
LOG.info(_LI("Creating %(volume)s on %(device)s"),
LOG.info("Creating %(volume)s on %(device)s",
{"volume": volume.name, "device": device})
self._update_provider_location(volume, device)
@ -109,7 +109,7 @@ class BlockDeviceDriver(driver.BaseVD,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
else:
LOG.warning(_LW("The device %s won't be cleared."), device)
LOG.warning("The device %s won't be cleared.", device)
if device.status == "error_deleting":
msg = _("Failed to delete device.")
@ -141,7 +141,7 @@ class BlockDeviceDriver(driver.BaseVD,
@utils.synchronized('block_device', external=True)
def create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Creating clone of volume: %s.'), src_vref.id)
LOG.info('Creating clone of volume: %s.', src_vref.id)
device = self.find_appropriate_size_device(src_vref.size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(
@ -260,7 +260,7 @@ class BlockDeviceDriver(driver.BaseVD,
LOG.error(msg, resource=volume)
raise exception.CinderException(msg)
LOG.info(_LI('Creating volume snapshot: %s.'), snapshot.id)
LOG.info('Creating volume snapshot: %s.', snapshot.id)
device = self.find_appropriate_size_device(snapshot.volume_size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(
@ -275,7 +275,7 @@ class BlockDeviceDriver(driver.BaseVD,
@utils.synchronized('block_device', external=True)
def create_volume_from_snapshot(self, volume, snapshot):
LOG.info(_LI('Creating volume %s from snapshot.'), volume.id)
LOG.info('Creating volume %s from snapshot.', volume.id)
device = self.find_appropriate_size_device(snapshot.volume_size)
dev_size = self._get_devices_sizes([device])
volutils.copy_volume(

View File

@ -29,8 +29,6 @@ import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.i18n import _LI
from cinder.objects import fields
from cinder.volume.drivers.coprhd.helpers import (
authentication as coprhd_auth)
@ -254,7 +252,7 @@ class EMCCoprHDDriverCommon(object):
coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") %
{'name': name, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s creation failed") % name)
log_err_msg = ("Volume : %s creation failed" % name)
self._raise_or_log_exception(
e.err_code, coprhd_err_msg, log_err_msg)
@ -283,7 +281,7 @@ class EMCCoprHDDriverCommon(object):
" create failed\n%(err)s") %
{'name': name, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Consistency Group : %s creation failed") %
log_err_msg = ("Consistency Group : %s creation failed" %
name)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -321,7 +319,7 @@ class EMCCoprHDDriverCommon(object):
" update failed\n%(err)s") %
{'cg_uri': cg_uri, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Consistency Group : %s update failed") %
log_err_msg = ("Consistency Group : %s update failed" %
cg_uri)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -357,7 +355,7 @@ class EMCCoprHDDriverCommon(object):
volumes_model_update.append(update_item)
LOG.exception(_LE("Failed to delete the volume %s of CG."),
LOG.exception("Failed to delete the volume %s of CG.",
vol['name'])
self.consistencygroup_obj.delete(
@ -375,7 +373,7 @@ class EMCCoprHDDriverCommon(object):
" delete failed\n%(err)s") %
{'name': name, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Consistency Group : %s deletion failed") %
log_err_msg = ("Consistency Group : %s deletion failed" %
name)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -395,8 +393,8 @@ class EMCCoprHDDriverCommon(object):
coprhd_cgid = self._get_coprhd_cgid(cg_id)
cg_name = self._get_consistencygroup_name(cg_group)
LOG.info(_LI('Start to create cgsnapshot for consistency group'
': %(group_name)s'),
LOG.info('Start to create cgsnapshot for consistency group'
': %(group_name)s',
{'group_name': cg_name})
try:
@ -484,8 +482,8 @@ class EMCCoprHDDriverCommon(object):
{'cg_name': cg_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Snapshot %(name)s for Consistency"
" Group: %(cg_name)s creation failed") %
log_err_msg = ("Snapshot %(name)s for Consistency"
" Group: %(cg_name)s creation failed" %
{'cg_name': cg_name,
'name': cgsnapshot_name})
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
@ -505,8 +503,8 @@ class EMCCoprHDDriverCommon(object):
cg_name = self._get_consistencygroup_name(cg_group)
model_update = {}
LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
'%(group_name)s'), {'snap_name': cgsnapshot['name'],
LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: '
'%(group_name)s', {'snap_name': cgsnapshot['name'],
'group_name': cg_name})
try:
@ -545,8 +543,8 @@ class EMCCoprHDDriverCommon(object):
'cg_name': cg_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Snapshot %(name)s for Consistency"
" Group: %(cg_name)s deletion failed") %
log_err_msg = ("Snapshot %(name)s for Consistency"
" Group: %(cg_name)s deletion failed" %
{'cg_name': cg_name,
'name': cgsnapshot_name})
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
@ -618,10 +616,9 @@ class EMCCoprHDDriverCommon(object):
"%s:%s:%s" % (self.OPENSTACK_TAG, prop,
six.text_type(value)))
except TypeError:
LOG.error(
_LE("Error tagging the resource property %s"), prop)
LOG.error("Error tagging the resource property %s", prop)
except TypeError:
LOG.error(_LE("Error tagging the resource properties"))
LOG.error("Error tagging the resource properties")
try:
self.tag_obj.tag_resource(
@ -683,13 +680,13 @@ class EMCCoprHDDriverCommon(object):
"", full_project_name, name, True)
except IndexError:
LOG.exception(_LE("Volume clone detach returned empty task list"))
LOG.exception("Volume clone detach returned empty task list")
except coprhd_utils.CoprHdError as e:
coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") %
{'name': name, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : {%s} clone failed") % name)
log_err_msg = ("Volume : {%s} clone failed" % name)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -712,7 +709,7 @@ class EMCCoprHDDriverCommon(object):
{'volume_name': name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s expand failed") % name)
log_err_msg = ("Volume : %s expand failed" % name)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -735,8 +732,7 @@ class EMCCoprHDDriverCommon(object):
{'volume_name': volume_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s expand failed") %
volume_name)
log_err_msg = "Volume : %s expand failed" % volume_name
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -789,8 +785,7 @@ class EMCCoprHDDriverCommon(object):
{'src_snapshot_name': src_snapshot_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Snapshot : %s clone failed") %
src_snapshot_name)
log_err_msg = "Snapshot : %s clone failed" % src_snapshot_name
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -809,8 +804,7 @@ class EMCCoprHDDriverCommon(object):
{'volume_name': new_volume_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s expand failed") %
new_volume_name)
log_err_msg = "Volume : %s expand failed" % new_volume_name
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -825,16 +819,16 @@ class EMCCoprHDDriverCommon(object):
self.volume_obj.delete(full_project_name, name, sync=True)
except coprhd_utils.CoprHdError as e:
if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR:
LOG.info(_LI(
LOG.info(
"Volume %s"
" no longer exists; volume deletion is"
" considered successful."), name)
" considered successful.", name)
else:
coprhd_err_msg = (_("Volume %(name)s: delete failed"
"\n%(err)s") %
{'name': name, 'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s delete failed") % name)
log_err_msg = "Volume : %s delete failed" % name
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -851,7 +845,7 @@ class EMCCoprHDDriverCommon(object):
_("Snapshot can't be taken individually on a volume"
" that is part of a Consistency Group"))
except KeyError:
LOG.info(_LI("No Consistency Group associated with the volume"))
LOG.info("No Consistency Group associated with the volume")
if self.configuration.coprhd_emulate_snapshot:
self.create_cloned_volume(snapshot, volume, truncate_name)
@ -899,7 +893,7 @@ class EMCCoprHDDriverCommon(object):
"\n%(err)s") % {'snapshotname': snapshotname,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Snapshot : %s create failed") % snapshotname)
log_err_msg = "Snapshot : %s create failed" % snapshotname
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -916,7 +910,7 @@ class EMCCoprHDDriverCommon(object):
_("Snapshot delete can't be done individually on a volume"
" that is part of a Consistency Group"))
except KeyError:
LOG.info(_LI("No Consistency Group associated with the volume"))
LOG.info("No Consistency Group associated with the volume")
if self.configuration.coprhd_emulate_snapshot:
self.delete_volume(snapshot)
@ -936,10 +930,10 @@ class EMCCoprHDDriverCommon(object):
project=projectname,
tenant=tenantname)
if resource_uri is None:
LOG.info(_LI(
LOG.info(
"Snapshot %s"
" is not found; snapshot deletion"
" is considered successful."), snapshotname)
" is considered successful.", snapshotname)
else:
snapshotname = self._get_coprhd_snapshot_name(
snapshot, resource_uri)
@ -954,7 +948,7 @@ class EMCCoprHDDriverCommon(object):
coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") %
snapshotname)
log_err_msg = (_LE("Snapshot : %s delete failed") % snapshotname)
log_err_msg = "Snapshot : %s delete failed" % snapshotname
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)
@ -976,11 +970,11 @@ class EMCCoprHDDriverCommon(object):
foundhostname = self._find_host(initiator_ports[i])
if foundhostname:
LOG.info(_LI("Found host %s"), foundhostname)
LOG.info("Found host %s", foundhostname)
break
if not foundhostname:
LOG.error(_LE("Auto host creation not supported"))
LOG.error("Auto host creation not supported")
# create an export group for this host
foundgroupname = foundhostname + 'SG'
# create a unique name
@ -1056,9 +1050,9 @@ class EMCCoprHDDriverCommon(object):
None,
None)
else:
LOG.info(_LI(
LOG.info(
"No export group found for the host: %s"
"; this is considered already detached."), hostname)
"; this is considered already detached.", hostname)
return itls
@ -1133,11 +1127,11 @@ class EMCCoprHDDriverCommon(object):
if itls is None:
# No device number found after 10 tries; return an empty itl
LOG.info(_LI(
LOG.info(
"No device number has been found after 10 tries; "
"this likely indicates an unsuccessful attach of "
"volume volumename=%(volumename)s to"
" initiator initiator_ports=%(initiator_ports)s"),
" initiator initiator_ports=%(initiator_ports)s",
{'volumename': volumename,
'initiator_ports': initiator_ports})
@ -1408,7 +1402,7 @@ class EMCCoprHDDriverCommon(object):
except coprhd_utils.CoprHdError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume stats failed"))
LOG.exception("Update volume stats failed")
@retry_wrapper
def retype(self, ctxt, volume, new_type, diff, host):
@ -1434,7 +1428,7 @@ class EMCCoprHDDriverCommon(object):
"\n%(err)s") % {'volume_name': volume_name,
'err': six.text_type(e.msg)})
log_err_msg = (_LE("Volume : %s type update failed") %
log_err_msg = ("Volume : %s type update failed" %
volume_name)
self._raise_or_log_exception(e.err_code, coprhd_err_msg,
log_err_msg)

View File

@ -24,7 +24,6 @@ from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LI
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.coprhd import common as coprhd_common
@ -266,7 +265,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" %
(server_ip, six.text_type(server_port), ip_double_encoded))
LOG.info(_LI("ScaleIO get client id by ip request: %s"), request)
LOG.info("ScaleIO get client id by ip request: %s", request)
if self.configuration.scaleio_verify_server_certificate:
verify_cert = self.configuration.scaleio_server_certificate_path
@ -292,7 +291,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
'message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("ScaleIO sdc id is %s"), sdc_id)
LOG.info("ScaleIO sdc id is %s", sdc_id)
return sdc_id
def _check_response(self, response, request,
@ -300,7 +299,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
server_username, server_password):
if response.status_code == 401 or response.status_code == 403:
LOG.info(
_LI("Token is invalid, going to re-login and get a new one"))
"Token is invalid, going to re-login and get a new one")
login_request = ("https://%s:%s/api/login" %
(server_ip, six.text_type(server_port)))
@ -317,7 +316,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver):
token = r.json()
self.server_token = token
# repeat request with valid token
LOG.info(_LI("Going to perform request again %s with valid token"),
LOG.info("Going to perform request again %s with valid token",
request)
res = requests.get(
request, auth=(server_username, self.server_token),

View File

@ -24,8 +24,8 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.i18n import _, _LI, _LW, _LE
from cinder import exception
from cinder.i18n import _
from cinder.volume import utils as volutils
import cinder.volume.drivers.datera.datera_common as datc
@ -98,8 +98,8 @@ class DateraApi(object):
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning(_LW("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s"),
LOG.warning("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s",
volume=volume, template=template)
return
@ -164,9 +164,9 @@ class DateraApi(object):
method='delete',
api_version='2')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
LOG.info("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.",
datc._get_name(volume['id']))
# =================
# = Ensure Export =
@ -341,7 +341,7 @@ class DateraApi(object):
self._issue_api_request(url, method='put', body=data,
api_version='2')
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
msg = ("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
@ -436,7 +436,7 @@ class DateraApi(object):
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
msg = ("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
@ -610,8 +610,8 @@ class DateraApi(object):
results = self._issue_api_request('system', api_version='2')
if 'uuid' not in results:
LOG.error(_LE(
'Failed to get updated stats from Datera Cluster.'))
LOG.error(
'Failed to get updated stats from Datera Cluster.')
backend_name = self.configuration.safe_get(
'volume_backend_name')
@ -629,8 +629,7 @@ class DateraApi(object):
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
LOG.error('Failed to get updated stats from Datera cluster.')
return self.cluster_stats
def _is_manageable(self, app_inst):
@ -662,10 +661,10 @@ class DateraApi(object):
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
LOG.error('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
'service again.')
# ===========
# = Polling =

View File

@ -23,8 +23,8 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.i18n import _, _LI, _LW, _LE
from cinder import exception
from cinder.i18n import _
from cinder.volume import utils as volutils
import cinder.volume.drivers.datera.datera_common as datc
@ -104,8 +104,8 @@ class DateraApi(object):
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning(_LW("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s"),
LOG.warning("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s",
volume=volume, template=template)
return
@ -184,7 +184,7 @@ class DateraApi(object):
api_version='2.1',
tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
msg = ("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
@ -378,7 +378,7 @@ class DateraApi(object):
self._issue_api_request(url, method='put', body=data,
api_version='2.1', tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
msg = ("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
@ -481,7 +481,7 @@ class DateraApi(object):
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
msg = ("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
@ -772,10 +772,10 @@ class DateraApi(object):
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
LOG.error('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
'service again.')
# ===========
# = Polling =
@ -834,8 +834,8 @@ class DateraApi(object):
'system', api_version='2.1')['data']
if 'uuid' not in results:
LOG.error(_LE(
'Failed to get updated stats from Datera Cluster.'))
LOG.error(
'Failed to get updated stats from Datera Cluster.')
backend_name = self.configuration.safe_get(
'volume_backend_name')
@ -854,8 +854,7 @@ class DateraApi(object):
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
LOG.error('Failed to get updated stats from Datera cluster.')
return self.cluster_stats
# =======

View File

@ -21,7 +21,7 @@ import time
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LI, _LE
from cinder.i18n import _
LOG = logging.getLogger(__name__)
@ -156,7 +156,7 @@ def _api_lookup(func):
name = "_" + "_".join(
(func.__name__, api_version.replace(".", "_")))
try:
LOG.info(_LI("Trying method: %s"), name)
LOG.info("Trying method: %s", name)
return getattr(obj, name)(*args[1:], **kwargs)
except AttributeError as e:
# If we find the attribute name in the error message
@ -206,6 +206,6 @@ def _get_supported_api_versions(driver):
str(resp.json().get("code")) == "99"):
results.append(version)
else:
LOG.error(_LE("No supported API versions available, "
"Please upgrade your Datera EDF software"))
LOG.error("No supported API versions available, "
"Please upgrade your Datera EDF software")
return results

View File

@ -25,7 +25,7 @@ import six
import uuid
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import utils
LOG = logging.getLogger(__name__)
@ -152,15 +152,15 @@ class HttpClient(object):
url = url + id
else:
# No hope.
LOG.error(_LE('_get_async_url: Bogus return async task %r'),
LOG.error('_get_async_url: Bogus return async task %r',
asyncTask)
raise exception.VolumeBackendAPIException(
message=_('_get_async_url: Invalid URL.'))
# Check for an odd error case
if url.startswith('<') and url.endswith('>'):
LOG.error(_LE('_get_async_url: Malformed URL '
'(XML returned). (%r)'), asyncTask)
LOG.error('_get_async_url: Malformed URL (XML returned). (%r)',
asyncTask)
raise exception.VolumeBackendAPIException(
message=_('_get_async_url: Malformed URL.'))
@ -308,8 +308,8 @@ class StorageCenterApiHelper(object):
self.san_login = self.config.secondary_san_login
self.san_password = self.config.secondary_san_password
else:
LOG.info(_LI('Swapping DSM credentials: Secondary DSM '
'credentials are not set or are incomplete.'))
LOG.info('Swapping DSM credentials: Secondary DSM '
'credentials are not set or are incomplete.')
# Cannot swap.
return False
# Odds on this hasn't changed so no need to make setting this a
@ -322,7 +322,7 @@ class StorageCenterApiHelper(object):
self.san_login = self.config.san_login
self.san_password = self.config.san_password
self.san_port = self.config.dell_sc_api_port
LOG.info(_LI('Swapping DSM credentials: New DSM IP is %r.'),
LOG.info('Swapping DSM credentials: New DSM IP is %r.',
self.san_ip)
return True
@ -363,7 +363,7 @@ class StorageCenterApiHelper(object):
:raises: VolumeBackendAPIException
"""
connection = None
LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'),
LOG.info('open_connection to %(ssn)s at %(ip)s',
{'ssn': self.primaryssn,
'ip': self.config.san_ip})
if self.primaryssn:
@ -376,11 +376,11 @@ class StorageCenterApiHelper(object):
connection = self._setup_connection()
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to connect to the API. '
'No backup DSM provided.'))
LOG.error('Failed to connect to the API. '
'No backup DSM provided.')
# Save our api version for next time.
if self.apiversion != connection.apiversion:
LOG.info(_LI('open_connection: Updating API version to %s'),
LOG.info('open_connection: Updating API version to %s',
connection.apiversion)
self.apiversion = connection.apiversion
@ -488,7 +488,7 @@ class StorageCenterApi(object):
'reason': rest_response.reason,
'text': response_text})
else:
LOG.warning(_LW('Failed to get REST call result.'))
LOG.warning('Failed to get REST call result.')
return False
@staticmethod
@ -563,12 +563,11 @@ class StorageCenterApi(object):
try:
return blob.json()
except AttributeError:
LOG.error(_LE('Error invalid json: %s'),
blob)
LOG.error('Error invalid json: %s', blob)
except TypeError as ex:
LOG.error(_LE('Error TypeError. %s'), ex)
LOG.error('Error TypeError. %s', ex)
except scanner.JSONDecodeError as ex:
LOG.error(_LE('Error JSONDecodeError. %s'), ex)
LOG.error('Error JSONDecodeError. %s', ex)
# We are here so this went poorly. Log our blob.
LOG.debug('_get_json blob %s', blob)
return None
@ -583,12 +582,11 @@ class StorageCenterApi(object):
if isinstance(blob, dict):
return blob.get('instanceId')
except AttributeError:
LOG.error(_LE('Invalid API object: %s'),
blob)
LOG.error('Invalid API object: %s', blob)
except TypeError as ex:
LOG.error(_LE('Error TypeError. %s'), ex)
LOG.error('Error TypeError. %s', ex)
except scanner.JSONDecodeError as ex:
LOG.error(_LE('Error JSONDecodeError. %s'), ex)
LOG.error('Error JSONDecodeError. %s', ex)
LOG.debug('_get_id failed: blob %s', blob)
return None
@ -617,7 +615,7 @@ class StorageCenterApi(object):
except Exception:
# We don't care what failed. The clues are already in the logs.
# Just log a parsing error and move on.
LOG.error(_LE('_check_version_fail: Parsing error.'))
LOG.error('_check_version_fail: Parsing error.')
# Just eat this if it isn't a version error.
return response
@ -662,7 +660,7 @@ class StorageCenterApi(object):
except Exception:
# Good return but not the login response we were expecting.
# Log it and error out.
LOG.error(_LE('Unrecognized Login Response: %s'), r)
LOG.error('Unrecognized Login Response: %s', r)
def close_connection(self):
"""Logout of Dell REST API."""
@ -691,7 +689,7 @@ class StorageCenterApi(object):
'%(pid)r not valid on %(ssn)r',
{'pid': provider_id, 'ssn': self.ssn})
except Exception:
LOG.error(_LE('_use_provider_id: provider_id %s is invalid!'),
LOG.error('_use_provider_id: provider_id %s is invalid!',
provider_id)
return ret
@ -708,7 +706,7 @@ class StorageCenterApi(object):
r = self.client.get('StorageCenter/StorageCenter')
result = self._get_result(r, 'scSerialNumber', ssn)
if result is None:
LOG.error(_LE('Failed to find %(s)s. Result %(r)s'),
LOG.error('Failed to find %(s)s. Result %(r)s',
{'s': ssn,
'r': r})
raise exception.VolumeBackendAPIException(
@ -779,7 +777,7 @@ class StorageCenterApi(object):
scfolder = self._create_folder(url, instanceId, folder, ssn)
# If we haven't found a folder or created it then leave
if scfolder is None:
LOG.error(_LE('Unable to create folder path %s'), folderpath)
LOG.error('Unable to create folder path %s', folderpath)
break
# Next part of the path will need this
instanceId = self._get_id(scfolder)
@ -878,9 +876,9 @@ class StorageCenterApi(object):
# has likely been attempted before the volume has been instantiated
# on the Storage Center. In the real world no one will snapshot
# a volume without first putting some data in that volume.
LOG.warning(_LW('Volume %(name)s initialization failure. '
LOG.warning('Volume %(name)s initialization failure. '
'Operations such as snapshot and clone may fail due '
'to inactive volume.)'), {'name': scvolume['name']})
'to inactive volume.)', {'name': scvolume['name']})
def _find_storage_profile(self, storage_profile):
"""Looks for a Storage Profile on the array.
@ -1066,7 +1064,7 @@ class StorageCenterApi(object):
# If we actually have a place to put our volume create it
if folder is None:
LOG.warning(_LW('Unable to create folder %s'), self.vfname)
LOG.warning('Unable to create folder %s', self.vfname)
# Find our replay_profiles.
addids, removeids = self._find_replay_profiles(replay_profile_string)
@ -1108,17 +1106,17 @@ class StorageCenterApi(object):
# Our volume should be in the return.
scvolume = self._get_json(r)
if scvolume:
LOG.info(_LI('Created volume %(instanceId)s: %(name)s'),
LOG.info('Created volume %(instanceId)s: %(name)s',
{'instanceId': scvolume['instanceId'],
'name': scvolume['name']})
else:
LOG.error(_LE('ScVolume returned success with empty payload.'
' Attempting to locate volume'))
LOG.error('ScVolume returned success with empty payload.'
' Attempting to locate volume')
# In theory it is there since success was returned.
# Try one last time to find it before returning.
scvolume = self._search_for_volume(name)
else:
LOG.error(_LE('Unable to create volume on SC: %s'), name)
LOG.error('Unable to create volume on SC: %s', name)
return scvolume
@ -1170,8 +1168,7 @@ class StorageCenterApi(object):
# if there is no live volume then we return our provider_id.
primary_id = provider_id
lv = self.get_live_volume(provider_id, name)
LOG.info(_LI('Volume %(name)r, '
'id %(provider)s at primary %(primary)s.'),
LOG.info('Volume %(name)r, id %(provider)s at primary %(primary)s.',
{'name': name,
'provider': provider_id,
'primary': primary_id})
@ -1180,7 +1177,7 @@ class StorageCenterApi(object):
if lv and (self.is_swapped(provider_id, lv) and not self.failed_over
and self._autofailback(lv)):
lv = self.get_live_volume(provider_id)
LOG.info(_LI('After failback %s'), lv)
LOG.info('After failback %s', lv)
# Make sure we still have a LV.
if lv:
# At this point if the secondaryRole is Active we have
@ -1226,7 +1223,7 @@ class StorageCenterApi(object):
msg = (_('Unable to complete failover of %s.')
% name)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI('Imported %(fail)s to %(guid)s.'),
LOG.info('Imported %(fail)s to %(guid)s.',
{'fail': self._repl_name(name),
'guid': name})
else:
@ -1313,8 +1310,8 @@ class StorageCenterApi(object):
return self._get_json(r)
# If we can't find the volume then it is effectively gone.
LOG.warning(_LW('delete_volume: unable to find volume '
'provider_id: %s'), provider_id)
LOG.warning('delete_volume: unable to find volume '
'provider_id: %s', provider_id)
return True
def _find_server_folder(self, create=False, ssn=-1):
@ -1354,7 +1351,7 @@ class StorageCenterApi(object):
r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba'
% self._get_id(scserver), payload, True)
if not self._check_result(r):
LOG.error(_LE('_add_hba error: %(wwn)s to %(srvname)s'),
LOG.error('_add_hba error: %(wwn)s to %(srvname)s',
{'wwn': wwnoriscsiname,
'srvname': scserver['name']})
return False
@ -1385,7 +1382,7 @@ class StorageCenterApi(object):
# Found it return the id
return self._get_id(srvos)
LOG.warning(_LW('Unable to find appropriate OS %s'), osname)
LOG.warning('Unable to find appropriate OS %s', osname)
return None
@ -1412,7 +1409,7 @@ class StorageCenterApi(object):
for wwn in wwnlist:
if not self._add_hba(scserver, wwn):
# We failed so log it. Delete our server and return None.
LOG.error(_LE('Error adding HBA %s to server'), wwn)
LOG.error('Error adding HBA %s to server', wwn)
self._delete_server(scserver)
return None
return scserver
@ -1420,7 +1417,7 @@ class StorageCenterApi(object):
def _create_server(self, servername, folder, serveros, ssn):
ssn = self._vet_ssn(ssn)
LOG.info(_LI('Creating server %s'), servername)
LOG.info('Creating server %s', servername)
payload = {}
payload['Name'] = servername
payload['StorageCenter'] = ssn
@ -1445,9 +1442,9 @@ class StorageCenterApi(object):
if self._check_result(r):
# Server was created
scserver = self._first_result(r)
LOG.info(_LI('SC server created %s'), scserver)
LOG.info('SC server created %s', scserver)
return scserver
LOG.error(_LE('Unable to create SC server %s'), servername)
LOG.error('Unable to create SC server %s', servername)
return None
def _vet_ssn(self, ssn):
@ -1529,7 +1526,7 @@ class StorageCenterApi(object):
domains = self._get_json(r)
return domains
LOG.error(_LE('Error getting FaultDomainList for %s'), cportid)
LOG.error('Error getting FaultDomainList for %s', cportid)
return None
def _find_initiators(self, scserver):
@ -1549,7 +1546,7 @@ class StorageCenterApi(object):
wwn is not None):
initiators.append(wwn)
else:
LOG.error(_LE('Unable to find initiators'))
LOG.error('Unable to find initiators')
LOG.debug('_find_initiators: %s', initiators)
return initiators
@ -1580,8 +1577,8 @@ class StorageCenterApi(object):
if self._check_result(r):
mappings = self._get_json(r)
else:
LOG.error(_LE('_find_mappings: volume is not active'))
LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'),
LOG.error('_find_mappings: volume is not active')
LOG.info('Volume mappings for %(name)s: %(mappings)s',
{'name': scvolume.get('name'),
'mappings': mappings})
return mappings
@ -1598,7 +1595,7 @@ class StorageCenterApi(object):
if self._check_result(r):
mapping_profiles = self._get_json(r)
else:
LOG.error(_LE('Unable to find mapping profiles: %s'),
LOG.error('Unable to find mapping profiles: %s',
scvolume.get('name'))
LOG.debug(mapping_profiles)
return mapping_profiles
@ -1655,17 +1652,17 @@ class StorageCenterApi(object):
if lun is None:
lun = mappinglun
elif lun != mappinglun:
LOG.warning(_LW('Inconsistent Luns.'))
LOG.warning('Inconsistent Luns.')
else:
LOG.debug('%s not found in initiator list',
hbaname)
else:
LOG.warning(_LW('_find_wwn: serverhba is None.'))
LOG.warning('_find_wwn: serverhba is None.')
else:
LOG.warning(_LW('_find_wwn: Unable to find port wwn.'))
LOG.warning('_find_wwn: Unable to find port wwn.')
else:
LOG.warning(_LW('_find_wwn: controllerport is None.'))
LOG.info(_LI('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s'),
LOG.warning('_find_wwn: controllerport is None.')
LOG.info('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s',
{'lun': lun,
'wwn': wwns,
'map': itmap})
@ -1686,7 +1683,7 @@ class StorageCenterApi(object):
controller = volconfig.get('controller')
actvctrl = self._get_id(controller)
else:
LOG.error(_LE('Unable to retrieve VolumeConfiguration: %s'),
LOG.error('Unable to retrieve VolumeConfiguration: %s',
self._get_id(scvolume))
LOG.debug('_find_active_controller: %s', actvctrl)
return actvctrl
@ -1731,8 +1728,8 @@ class StorageCenterApi(object):
if self._check_result(r):
controllerport = self._first_result(r)
else:
LOG.error(_LE('_find_controller_port_iscsi_config: '
'Error finding configuration: %s'), cportid)
LOG.error('_find_controller_port_iscsi_config: '
'Error finding configuration: %s', cportid)
return controllerport
def find_iscsi_properties(self, scvolume):
@ -1904,7 +1901,7 @@ class StorageCenterApi(object):
mprofiles = self._find_mapping_profiles(scvolume)
for mprofile in mprofiles:
if self._get_id(mprofile.get('server')) == serverid:
LOG.info(_LI('Volume %(vol)s already mapped to %(srv)s'),
LOG.info('Volume %(vol)s already mapped to %(srv)s',
{'vol': scvolume['name'],
'srv': scserver['name']})
return mprofile
@ -1916,13 +1913,13 @@ class StorageCenterApi(object):
% volumeid, payload, True)
if self._check_result(r):
# We just return our mapping
LOG.info(_LI('Volume %(vol)s mapped to %(srv)s'),
LOG.info('Volume %(vol)s mapped to %(srv)s',
{'vol': scvolume['name'],
'srv': scserver['name']})
return self._first_result(r)
# Error out
LOG.error(_LE('Unable to map %(vol)s to %(srv)s'),
LOG.error('Unable to map %(vol)s to %(srv)s',
{'vol': scvolume['name'],
'srv': scserver['name']})
return None
@ -1956,12 +1953,12 @@ class StorageCenterApi(object):
if result is True or (type(result) is dict and
result.get('result')):
LOG.info(
_LI('Volume %(vol)s unmapped from %(srv)s'),
'Volume %(vol)s unmapped from %(srv)s',
{'vol': scvolume['name'],
'srv': scserver['name']})
continue
LOG.error(_LE('Unable to unmap %(vol)s from %(srv)s'),
LOG.error('Unable to unmap %(vol)s from %(srv)s',
{'vol': scvolume['name'],
'srv': scserver['name']})
# 1 failed unmap is as good as 100.
@ -2018,7 +2015,7 @@ class StorageCenterApi(object):
# Quick double check.
if replay is None:
LOG.warning(_LW('Unable to create snapshot %s'), replayid)
LOG.warning('Unable to create snapshot %s', replayid)
# Return replay or None.
return replay
@ -2052,10 +2049,10 @@ class StorageCenterApi(object):
# We found our replay so return it.
return replay
except Exception:
LOG.error(_LE('Invalid ReplayList return: %s'),
LOG.error('Invalid ReplayList return: %s',
r)
# If we are here then we didn't find the replay so warn and leave.
LOG.warning(_LW('Unable to find snapshot %s'),
LOG.warning('Unable to find snapshot %s',
replayid)
return None
@ -2075,7 +2072,7 @@ class StorageCenterApi(object):
self._get_id(screplay), payload, True)
if self._check_result(r):
return True
LOG.error(_LE('Error managing replay %s'),
LOG.error('Error managing replay %s',
screplay.get('description'))
return False
@ -2092,7 +2089,7 @@ class StorageCenterApi(object):
self._get_id(screplay), payload, True)
if self._check_result(r):
return True
LOG.error(_LE('Error unmanaging replay %s'),
LOG.error('Error unmanaging replay %s',
screplay.get('description'))
return False
@ -2162,12 +2159,11 @@ class StorageCenterApi(object):
# If we have a dr_profile to apply we should do so now.
if dr_profile and not self.update_datareduction_profile(volume,
dr_profile):
LOG.error(_LE('Unable to apply %s to volume.'), dr_profile)
LOG.error('Unable to apply %s to volume.', dr_profile)
volume = None
if volume is None:
LOG.error(_LE('Unable to create volume %s from replay'),
volname)
LOG.error('Unable to create volume %s from replay', volname)
return volume
@ -2230,7 +2226,7 @@ class StorageCenterApi(object):
:returns: The new volume's Dell volume object.
:raises: VolumeBackendAPIException if error doing copy.
"""
LOG.info(_LI('create_cloned_volume: Creating %(dst)s from %(src)s'),
LOG.info('create_cloned_volume: Creating %(dst)s from %(src)s',
{'dst': volumename,
'src': scvolume['name']})
@ -2273,7 +2269,7 @@ class StorageCenterApi(object):
self.delete_volume(volumename, self._get_id(newvol))
raise
# Tell the user.
LOG.error(_LE('create_cloned_volume: Unable to clone volume'))
LOG.error('create_cloned_volume: Unable to clone volume')
return None
def expand_volume(self, scvolume, newsize):
@ -2296,7 +2292,7 @@ class StorageCenterApi(object):
{'name': vol['name'],
'size': vol['configuredSize']})
else:
LOG.error(_LE('Error expanding volume %s.'), scvolume['name'])
LOG.error('Error expanding volume %s.', scvolume['name'])
return vol
def rename_volume(self, scvolume, name):
@ -2316,7 +2312,7 @@ class StorageCenterApi(object):
if self._check_result(r):
return True
LOG.error(_LE('Error renaming volume %(original)s to %(name)s'),
LOG.error('Error renaming volume %(original)s to %(name)s',
{'original': scvolume['name'],
'name': name})
return False
@ -2329,13 +2325,13 @@ class StorageCenterApi(object):
return False
if not prefs.get(allowprefname):
LOG.error(_LE('User does not have permission to change '
'%s selection.'), profiletype)
LOG.error('User does not have permission to change '
'%s selection.', profiletype)
return False
if profilename:
if not profile:
LOG.error(_LE('%(ptype)s %(pname)s was not found.'),
LOG.error('%(ptype)s %(pname)s was not found.',
{'ptype': profiletype,
'pname': profilename})
return False
@ -2343,10 +2339,10 @@ class StorageCenterApi(object):
# Going from specific profile to the user default
profile = prefs.get(restname)
if not profile and not continuewithoutdefault:
LOG.error(_LE('Default %s was not found.'), profiletype)
LOG.error('Default %s was not found.', profiletype)
return False
LOG.info(_LI('Switching volume %(vol)s to profile %(prof)s.'),
LOG.info('Switching volume %(vol)s to profile %(prof)s.',
{'vol': scvolume['name'],
'prof': profile.get('name')})
payload = {}
@ -2356,8 +2352,8 @@ class StorageCenterApi(object):
if self._check_result(r):
return True
LOG.error(_LE('Error changing %(ptype)s for volume '
'%(original)s to %(name)s'),
LOG.error('Error changing %(ptype)s for volume '
'%(original)s to %(name)s',
{'ptype': profiletype,
'original': scvolume['name'],
'name': profilename})
@ -2467,7 +2463,7 @@ class StorageCenterApi(object):
profilelist = self._get_json(r)
if profilelist:
if len(profilelist) > 1:
LOG.error(_LE('Multiple replay profiles under name %s'),
LOG.error('Multiple replay profiles under name %s',
name)
raise exception.VolumeBackendAPIException(
data=_('Multiple profiles found.'))
@ -2507,12 +2503,12 @@ class StorageCenterApi(object):
r = self.client.delete('StorageCenter/ScReplayProfile/%s' %
self._get_id(profile), async=True)
if self._check_result(r):
LOG.info(_LI('Profile %s has been deleted.'),
LOG.info('Profile %s has been deleted.',
profile.get('name'))
else:
# We failed due to a failure to delete an existing profile.
# This is reason to raise an exception.
LOG.error(_LE('Unable to delete profile %s.'), profile.get('name'))
LOG.error('Unable to delete profile %s.', profile.get('name'))
raise exception.VolumeBackendAPIException(
data=_('Error deleting replay profile.'))
@ -2580,9 +2576,9 @@ class StorageCenterApi(object):
if (self._update_volume_profiles(scvolume,
addid=profileid,
removeid=None)):
LOG.info(_LI('Added %s to cg.'), vol['id'])
LOG.info('Added %s to cg.', vol['id'])
else:
LOG.error(_LE('Failed to add %s to cg.'), vol['id'])
LOG.error('Failed to add %s to cg.', vol['id'])
return False
return True
@ -2599,9 +2595,9 @@ class StorageCenterApi(object):
if (self._update_volume_profiles(scvolume,
addid=None,
removeid=profileid)):
LOG.info(_LI('Removed %s from cg.'), vol['id'])
LOG.info('Removed %s from cg.', vol['id'])
else:
LOG.error(_LE('Failed to remove %s from cg.'), vol['id'])
LOG.error('Failed to remove %s from cg.', vol['id'])
return False
return True
@ -2622,10 +2618,10 @@ class StorageCenterApi(object):
ret = True
profileid = self._get_id(profile)
if add_volumes:
LOG.info(_LI('Adding volumes to cg %s.'), profile['name'])
LOG.info('Adding volumes to cg %s.', profile['name'])
ret = self._add_cg_volumes(profileid, add_volumes)
if ret and remove_volumes:
LOG.info(_LI('Removing volumes from cg %s.'), profile['name'])
LOG.info('Removing volumes from cg %s.', profile['name'])
ret = self._remove_cg_volumes(profileid, remove_volumes)
return ret
@ -2666,7 +2662,7 @@ class StorageCenterApi(object):
'CreateReplay'
% self._get_id(profile), payload, True)
if self._check_result(r):
LOG.info(_LI('CreateReplay success %s'), replayid)
LOG.info('CreateReplay success %s', replayid)
return True
return False
@ -2716,7 +2712,7 @@ class StorageCenterApi(object):
replays = self._get_json(r)
else:
LOG.error(_LE('Unable to locate snapshot %s'), replayid)
LOG.error('Unable to locate snapshot %s', replayid)
return replays
@ -2780,7 +2776,7 @@ class StorageCenterApi(object):
# If we actually have a place to put our volume create it
if folder is None:
LOG.warning(_LW('Unable to create folder %s'), self.vfname)
LOG.warning('Unable to create folder %s', self.vfname)
# Rename and move our volume.
payload = {}
@ -2882,7 +2878,7 @@ class StorageCenterApi(object):
r = self.client.put('StorageCenter/ScVolume/%s' %
self._get_id(scvolume), payload, True)
if self._check_result(r):
LOG.info(_LI('Volume %s unmanaged.'), scvolume['name'])
LOG.info('Volume %s unmanaged.', scvolume['name'])
else:
msg = _('Unable to rename volume %(existing)s to %(newname)s') % {
'existing': scvolume['name'],
@ -2917,7 +2913,7 @@ class StorageCenterApi(object):
if self._check_result(r):
return self._get_json(r)
LOG.error(_LE('Unable to find or create QoS Node named %s'), qosnode)
LOG.error('Unable to find or create QoS Node named %s', qosnode)
raise exception.VolumeBackendAPIException(
data=_('Failed to find QoSnode'))
@ -2961,7 +2957,7 @@ class StorageCenterApi(object):
if replication.get('destinationScSerialNumber') == destssn:
return replication
# Unable to locate replication.
LOG.warning(_LW('Unable to locate replication %(vol)s to %(ssn)s'),
LOG.warning('Unable to locate replication %(vol)s to %(ssn)s',
{'vol': scvolume.get('name'),
'ssn': destssn})
return None
@ -2985,13 +2981,13 @@ class StorageCenterApi(object):
async=True)
if self._check_result(r):
# check that we whacked the dest volume
LOG.info(_LI('Replication %(vol)s to %(dest)s.'),
LOG.info('Replication %(vol)s to %(dest)s.',
{'vol': scvolume.get('name'),
'dest': destssn})
return True
LOG.error(_LE('Unable to delete replication for '
'%(vol)s to %(dest)s.'),
LOG.error('Unable to delete replication for '
'%(vol)s to %(dest)s.',
{'vol': scvolume.get('name'),
'dest': destssn})
return False
@ -3014,8 +3010,8 @@ class StorageCenterApi(object):
diskfolder = self._get_json(r)[0]
except Exception:
# We just log this as an error and return nothing.
LOG.error(_LE('Unable to find '
'disk folder %(name)s on %(ssn)s'),
LOG.error('Unable to find '
'disk folder %(name)s on %(ssn)s',
{'name': foldername,
'ssn': ssn})
return diskfolder
@ -3061,7 +3057,7 @@ class StorageCenterApi(object):
r = self.client.post('StorageCenter/ScReplication', payload, True)
# 201 expected.
if self._check_result(r):
LOG.info(_LI('Replication created for %(volname)s to %(destsc)s'),
LOG.info('Replication created for %(volname)s to %(destsc)s',
{'volname': scvolume.get('name'),
'destsc': destssn})
screpl = self._get_json(r)
@ -3069,7 +3065,7 @@ class StorageCenterApi(object):
# Check we did something.
if not screpl:
# Failed to launch. Inform user. Throw.
LOG.error(_LE('Unable to replicate %(volname)s to %(destsc)s'),
LOG.error('Unable to replicate %(volname)s to %(destsc)s',
{'volname': scvolume.get('name'),
'destsc': destssn})
return screpl
@ -3206,8 +3202,8 @@ class StorageCenterApi(object):
True)
# 201 expected.
if self._check_result(r):
LOG.info(_LI('Replication created for '
'%(src)s to %(dest)s'),
LOG.info('Replication created for '
'%(src)s to %(dest)s',
{'src': svolume.get('name'),
'dest': dvolume.get('name')})
screpl = self._get_json(r)
@ -3267,8 +3263,8 @@ class StorageCenterApi(object):
if (self.rename_volume(svolume, self._repl_name(name)) and
self.rename_volume(dvolume, name)):
return True
LOG.warning(_LW('flip_replication: Unable to replicate '
'%(name)s from %(src)s to %(dst)s'),
LOG.warning('flip_replication: Unable to replicate '
'%(name)s from %(src)s to %(dst)s',
{'name': name,
'src': dvolume['scSerialNumber'],
'dst': svolume['scSerialNumber']})
@ -3290,8 +3286,8 @@ class StorageCenterApi(object):
progress['amountRemaining'].split(' ', 1)[0])
return progress['synced'], remaining
except Exception:
LOG.warning(_LW('replication_progress: Invalid replication'
' progress information returned: %s'),
LOG.warning('replication_progress: Invalid replication'
' progress information returned: %s',
progress)
return None, None
@ -3416,14 +3412,14 @@ class StorageCenterApi(object):
pscqos = self._find_qos(primaryqos)
sscqos = self._find_qos(secondaryqos, destssn)
if not destssn:
LOG.error(_LE('create_live_volume: Unable to find remote %s'),
LOG.error('create_live_volume: Unable to find remote %s',
remotessn)
elif not pscqos:
LOG.error(_LE('create_live_volume: Unable to find or create '
'qos node %s'), primaryqos)
LOG.error('create_live_volume: Unable to find or create '
'qos node %s', primaryqos)
elif not sscqos:
LOG.error(_LE('create_live_volume: Unable to find or create remote'
' qos node %(qos)s on %(ssn)s'),
LOG.error('create_live_volume: Unable to find or create remote'
' qos node %(qos)s on %(ssn)s',
{'qos': secondaryqos, 'ssn': destssn})
else:
payload = {}
@ -3451,12 +3447,12 @@ class StorageCenterApi(object):
r = self.client.post('StorageCenter/ScLiveVolume', payload, True)
if self._check_result(r):
LOG.info(_LI('create_live_volume: Live Volume created from'
'%(svol)s to %(ssn)s'),
LOG.info('create_live_volume: Live Volume created from'
'%(svol)s to %(ssn)s',
{'svol': self._get_id(scvolume), 'ssn': remotessn})
return self._get_json(r)
LOG.error(_LE('create_live_volume: Failed to create Live Volume from'
'%(svol)s to %(ssn)s'),
LOG.error('create_live_volume: Failed to create Live Volume from'
'%(svol)s to %(ssn)s',
{'svol': self._get_id(scvolume), 'ssn': remotessn})
return None

View File

@ -20,7 +20,7 @@ from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
@ -88,7 +88,7 @@ class DellCommonDriver(driver.ManageableVD,
self.is_direct_connect = False
self.active_backend_id = kwargs.get('active_backend_id', None)
self.failed_over = True if self.active_backend_id else False
LOG.info(_LI('Loading %(name)s: Failover state is %(state)r'),
LOG.info('Loading %(name)s: Failover state is %(state)r',
{'name': self.backend_name,
'state': self.failed_over})
self.storage_protocol = 'iSCSI'
@ -279,7 +279,7 @@ class DellCommonDriver(driver.ManageableVD,
try:
api.delete_volume(volumename)
except exception.VolumeBackendAPIException as ex:
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
LOG.info('Non fatal cleanup error: %s.', ex.msg)
def create_volume(self, volume):
"""Create a volume."""
@ -324,7 +324,7 @@ class DellCommonDriver(driver.ManageableVD,
# clean up the volume now.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
LOG.error('Failed to create volume %s',
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
@ -374,16 +374,15 @@ class DellCommonDriver(driver.ManageableVD,
if (sclivevolume and
sclivevolume.get('secondaryScSerialNumber') == ssn and
api.delete_live_volume(sclivevolume, True)):
LOG.info(_LI('%(vname)s\'s replication live volume has '
'been deleted from storage Center %(sc)s,'),
LOG.info('%(vname)s\'s replication live volume has '
'been deleted from storage Center %(sc)s,',
{'vname': volume.get('id'),
'sc': ssn})
return True
# If we are here either we do not have a live volume, we do not have
# one on our configured SC or we were not able to delete it.
# Either way, warn and leave.
LOG.warning(_LW('Unable to delete %s live volume.'),
volume.get('id'))
LOG.warning('Unable to delete %s live volume.', volume.get('id'))
return False
def _delete_replications(self, api, volume):
@ -409,8 +408,8 @@ class DellCommonDriver(driver.ManageableVD,
ssn = int(ssnstring)
# Are we a replication or a live volume?
if not api.delete_replication(scvol, ssn):
LOG.warning(_LW('Unable to delete replication of Volume '
'%(vname)s to Storage Center %(sc)s.'),
LOG.warning('Unable to delete replication of Volume '
'%(vname)s to Storage Center %(sc)s.',
{'vname': volume_name,
'sc': ssnstring})
# If none of that worked or there was nothing to do doesn't matter.
@ -439,7 +438,7 @@ class DellCommonDriver(driver.ManageableVD,
deleted = api.delete_volume(volume_name, provider_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
LOG.error('Failed to delete volume %s',
volume_name)
# if there was an error we will have raised an
@ -466,8 +465,7 @@ class DellCommonDriver(driver.ManageableVD,
return {'status': fields.SnapshotStatus.AVAILABLE,
'provider_id': scvolume['instanceId']}
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
LOG.warning('Unable to locate volume:%s', volume_name)
snapshot['status'] = fields.SnapshotStatus.ERROR
msg = _('Failed to create snapshot %s') % snapshot_id
@ -540,8 +538,7 @@ class DellCommonDriver(driver.ManageableVD,
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
LOG.error('Failed to create volume %s', volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
@ -604,8 +601,7 @@ class DellCommonDriver(driver.ManageableVD,
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
LOG.error('Failed to create volume %s', volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
@ -656,7 +652,7 @@ class DellCommonDriver(driver.ManageableVD,
self._is_live_vol(volume))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
LOG.error('Failed to ensure export of volume %s',
volume_name)
if scvolume is None:
msg = _('Unable to find volume %s') % volume_name
@ -738,7 +734,7 @@ class DellCommonDriver(driver.ManageableVD,
data['free_capacity_gb'] = freespacegb
else:
# Soldier on. Just return 0 for this iteration.
LOG.error(_LE('Unable to retrieve volume stats.'))
LOG.error('Unable to retrieve volume stats.')
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
@ -782,7 +778,7 @@ class DellCommonDriver(driver.ManageableVD,
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
LOG.error('Unable to rename the logical volume for volume: %s',
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
@ -799,7 +795,7 @@ class DellCommonDriver(driver.ManageableVD,
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
LOG.info('Created Consistency Group %s', gid)
return
msg = _('Unable to create consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
@ -860,11 +856,11 @@ class DellCommonDriver(driver.ManageableVD,
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
LOG.error('Cannot find Consistency Group %s', gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
LOG.info('Updated Consistency Group %s', gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
@ -900,9 +896,9 @@ class DellCommonDriver(driver.ManageableVD,
return model_update, snapshot_updates
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
LOG.error('Failed to snap Consistency Group %s', cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
LOG.error('Cannot find Consistency Group %s', cgid)
msg = _('Unable to snap Consistency Group %s') % cgid
raise exception.VolumeBackendAPIException(data=msg)
@ -924,7 +920,7 @@ class DellCommonDriver(driver.ManageableVD,
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
LOG.info('Deleting snapshot %(ss)s from %(pro)s',
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
@ -1058,7 +1054,7 @@ class DellCommonDriver(driver.ManageableVD,
'spec': requested})
return current, requested
else:
LOG.info(_LI('Retype was to same Storage Profile.'))
LOG.info('Retype was to same Storage Profile.')
return None, None
def _retype_replication(self, api, volume, scvolume, new_type, diff):
@ -1104,8 +1100,8 @@ class DellCommonDriver(driver.ManageableVD,
dictionary of its reported capabilities (Not Used).
:returns: Boolean or Boolean, model_update tuple.
"""
LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s '
'diff: %(diff)s host: %(host)s'),
LOG.info('retype: volume_name: %(name)s new_type: %(newtype)s '
'diff: %(diff)s host: %(host)s',
{'name': volume.get('id'), 'newtype': new_type,
'diff': diff, 'host': host})
model_update = None
@ -1118,7 +1114,7 @@ class DellCommonDriver(driver.ManageableVD,
# Get our volume
scvolume = api.find_volume(volume_name, provider_id)
if scvolume is None:
LOG.error(_LE('Retype unable to find volume %s.'),
LOG.error('Retype unable to find volume %s.',
volume_name)
return False
# Check our specs.
@ -1130,7 +1126,7 @@ class DellCommonDriver(driver.ManageableVD,
# if there is a change and it didn't work fast fail.
if (current != requested and not
api.update_storage_profile(scvolume, requested)):
LOG.error(_LE('Failed to update storage profile'))
LOG.error('Failed to update storage profile')
return False
# Replay profiles.
@ -1141,7 +1137,7 @@ class DellCommonDriver(driver.ManageableVD,
# if there is a change and it didn't work fast fail.
if requested and not api.update_replay_profiles(scvolume,
requested):
LOG.error(_LE('Failed to update replay profiles'))
LOG.error('Failed to update replay profiles')
return False
# Volume QOS profiles.
@ -1151,8 +1147,7 @@ class DellCommonDriver(driver.ManageableVD,
'storagetype:volumeqos'))
if current != requested:
if not api.update_qos_profile(scvolume, requested):
LOG.error(_LE('Failed to update volume '
'qos profile'))
LOG.error('Failed to update volume qos profile')
# Group QOS profiles.
current, requested = (
@ -1162,8 +1157,7 @@ class DellCommonDriver(driver.ManageableVD,
if current != requested:
if not api.update_qos_profile(scvolume, requested,
True):
LOG.error(_LE('Failed to update group '
'qos profile'))
LOG.error('Failed to update group qos profile')
return False
# Data reduction profiles.
@ -1174,8 +1168,8 @@ class DellCommonDriver(driver.ManageableVD,
if current != requested:
if not api.update_datareduction_profile(scvolume,
requested):
LOG.error(_LE('Failed to update data reduction '
'profile'))
LOG.error('Failed to update data reduction '
'profile')
return False
# Active Replay
@ -1186,8 +1180,8 @@ class DellCommonDriver(driver.ManageableVD,
if current != requested and not (
api.update_replicate_active_replay(
scvolume, requested == '<is> True')):
LOG.error(_LE('Failed to apply '
'replication:activereplay setting'))
LOG.error('Failed to apply '
'replication:activereplay setting')
return False
# Deal with replication.
@ -1231,8 +1225,8 @@ class DellCommonDriver(driver.ManageableVD,
destssn = ssn
break
except exception.VolumeBackendAPIException:
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
LOG.info(_LI('replication failover secondary is %(ssn)s'),
LOG.warning('SSN %s appears to be down.', ssn)
LOG.info('replication failover secondary is %(ssn)s',
{'ssn': destssn})
return destssn
@ -1309,8 +1303,8 @@ class DellCommonDriver(driver.ManageableVD,
ovol, 'org:' + ovol['name']):
# Not a reason to fail but will possibly
# cause confusion so warn.
LOG.warning(_LW('Unable to locate and rename '
'original volume: %s'),
LOG.warning('Unable to locate and rename '
'original volume: %s',
item['ovol'])
item['status'] = 'synced'
else:
@ -1329,9 +1323,9 @@ class DellCommonDriver(driver.ManageableVD,
if lastremain == currentremain:
# One chance down. Warn user.
deadcount -= 1
LOG.warning(_LW('Waiting for replications to complete. '
LOG.warning('Waiting for replications to complete. '
'No progress for %(timeout)d seconds. '
'deadcount = %(cnt)d'),
'deadcount = %(cnt)d',
{'timeout': self.failback_timeout,
'cnt': deadcount})
else:
@ -1341,13 +1335,13 @@ class DellCommonDriver(driver.ManageableVD,
# If we've used up our 5 chances we error and log..
if deadcount == 0:
LOG.error(_LE('Replication progress has stopped: '
'%f remaining.'), currentremain)
LOG.error('Replication progress has stopped: %f remaining.',
currentremain)
for item in items:
if item['status'] == 'inprogress':
LOG.error(_LE('Failback failed for volume: %s. '
LOG.error('Failback failed for volume: %s. '
'Timeout waiting for replication to '
'sync with original volume.'),
'sync with original volume.',
item['volume']['id'])
item['status'] = 'error'
break
@ -1426,7 +1420,7 @@ class DellCommonDriver(driver.ManageableVD,
:param qosnode: Dell QOS node object.
:return: replitem dict.
"""
LOG.info(_LI('failback_volumes: replicated volume'))
LOG.info('failback_volumes: replicated volume')
# Get our current volume.
cvol = api.find_volume(volume['id'], volume['provider_id'])
# Original volume on the primary.
@ -1446,7 +1440,7 @@ class DellCommonDriver(driver.ManageableVD,
nvolid = screpl['destinationVolume']['instanceId']
status = 'inprogress'
else:
LOG.error(_LE('Unable to restore %s'), volume['id'])
LOG.error('Unable to restore %s', volume['id'])
screplid = None
nvolid = None
status = 'error'
@ -1481,14 +1475,14 @@ class DellCommonDriver(driver.ManageableVD,
sclivevolume = api.get_live_volume(provider_id)
# TODO(tswanson): Check swapped state first.
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
LOG.info('Success swapping sclivevolume roles %s', id)
model_update = {
'status': 'available',
'replication_status': fields.ReplicationStatus.ENABLED,
'provider_id':
sclivevolume['secondaryVolume']['instanceId']}
else:
LOG.info(_LI('Failure swapping roles %s'), id)
LOG.info('Failure swapping roles %s', id)
model_update = {'status': 'error'}
return model_update
@ -1509,7 +1503,7 @@ class DellCommonDriver(driver.ManageableVD,
:param volumes: List of volumes that need to be failed back.
:return: volume_updates for the list of volumes.
"""
LOG.info(_LI('failback_volumes'))
LOG.info('failback_volumes')
with self._client.open_connection() as api:
# Get our qosnode. This is a good way to make sure the backend
# is still setup so that we can do this.
@ -1524,7 +1518,7 @@ class DellCommonDriver(driver.ManageableVD,
# Trundle through the volumes. Update non replicated to alive again
# and reverse the replications for the remaining volumes.
for volume in volumes:
LOG.info(_LI('failback_volumes: starting volume: %s'), volume)
LOG.info('failback_volumes: starting volume: %s', volume)
model_update = {}
if volume.get('replication_driver_data'):
rspecs = self._get_replication_specs(
@ -1567,12 +1561,12 @@ class DellCommonDriver(driver.ManageableVD,
rvol = api.break_replication(id, provider_id, destssn)
model_update = {}
if rvol:
LOG.info(_LI('Success failing over volume %s'), id)
LOG.info('Success failing over volume %s', id)
model_update = {'replication_status':
fields.ReplicationStatus.FAILED_OVER,
'provider_id': rvol['instanceId']}
else:
LOG.info(_LI('Failed failing over volume %s'), id)
LOG.info('Failed failing over volume %s', id)
model_update = {'status': 'error'}
return model_update
@ -1585,11 +1579,11 @@ class DellCommonDriver(driver.ManageableVD,
swapped = api.is_swapped(provider_id, sclivevolume)
# If we aren't swapped try it. If fail error out.
if not swapped and not api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Failure swapping roles %s'), id)
LOG.info('Failure swapping roles %s', id)
model_update = {'status': 'error'}
return model_update
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
LOG.info('Success swapping sclivevolume roles %s', id)
sclivevolume = api.get_live_volume(provider_id)
model_update = {
'replication_status':
@ -1628,7 +1622,7 @@ class DellCommonDriver(driver.ManageableVD,
raise exception.InvalidReplicationTarget(
reason=_('Already failed over'))
LOG.info(_LI('Failing backend to %s'), secondary_id)
LOG.info('Failing backend to %s', secondary_id)
# basic check
if self.replication_enabled:
with self._client.open_connection() as api:
@ -1747,9 +1741,9 @@ class DellCommonDriver(driver.ManageableVD,
raise exception.VolumeBackendAPIException(data=msg)
# Life is good. Let the world know what we've done.
LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on '
LOG.info('manage_existing_snapshot: snapshot %(exist)s on '
'volume %(volume)s has been renamed to %(id)s and is '
'now managed by Cinder.'),
'now managed by Cinder.',
{'exist': screplay.get('description'),
'volume': volume_name,
'id': snapshot_id})

View File

@ -18,7 +18,7 @@ from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_common
@ -147,11 +147,11 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
'discard': True}}
LOG.debug('Return FC data: %s', data)
return data
LOG.error(_LE('Lun mapping returned null!'))
LOG.error('Lun mapping returned null!')
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to initialize connection.'))
LOG.error('Failed to initialize connection.')
# We get here because our mapping is none so blow up.
raise exception.VolumeBackendAPIException(_('Unable to map volume.'))
@ -187,8 +187,8 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
sclivevolume['secondaryVolume']['instanceId'])
if secondaryvol:
return api.find_wwns(secondaryvol, secondary)
LOG.warning(_LW('Unable to map live volume secondary volume'
' %(vol)s to secondary server wwns: %(wwns)r'),
LOG.warning('Unable to map live volume secondary volume'
' %(vol)s to secondary server wwns: %(wwns)r',
{'vol': sclivevolume['secondaryVolume']['instanceName'],
'wwns': wwns})
return None, [], {}
@ -253,7 +253,7 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection'))
LOG.error('Failed to terminate connection')
raise exception.VolumeBackendAPIException(
_('Terminate connection unable to connect to backend.'))

View File

@ -18,7 +18,7 @@ from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_common
@ -92,8 +92,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
islivevol = self._is_live_vol(volume)
initiator_name = connector.get('initiator')
multipath = connector.get('multipath', False)
LOG.info(_LI('initialize_ connection: %(vol)s:%(pid)s:'
'%(intr)s. Multipath is %(mp)r'),
LOG.info('initialize_ connection: %(vol)s:%(pid)s:'
'%(intr)s. Multipath is %(mp)r',
{'vol': volume_name,
'pid': provider_id,
'intr': initiator_name,
@ -166,7 +166,7 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
# Re-raise any backend exception.
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to initialize connection'))
LOG.error('Failed to initialize connection')
# If there is a data structure issue then detail the exception
# and bail with a Backend Exception.
except Exception as error:
@ -211,8 +211,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
'target_lun': None,
'target_luns': [],
}
LOG.warning(_LW('Unable to map live volume secondary volume'
' %(vol)s to secondary server intiator: %(init)r'),
LOG.warning('Unable to map live volume secondary volume'
' %(vol)s to secondary server intiator: %(init)r',
{'vol': sclivevolume['secondaryVolume']['instanceName'],
'init': initiatorname})
return data
@ -255,8 +255,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection '
'%(initiator)s %(vol)s'),
LOG.error('Failed to terminate connection '
'%(initiator)s %(vol)s',
{'initiator': initiator_name,
'vol': volume_name})
raise exception.VolumeBackendAPIException(

View File

@ -29,7 +29,7 @@ from oslo_utils import excutils
from six.moves import range
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder.i18n import _
from cinder import interface
from cinder import ssh_utils
from cinder import utils
@ -199,7 +199,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
desc = _("Error executing PS command")
cmdout = '\n'.join(out)
LOG.error(_LE("%s"), cmdout)
LOG.error(cmdout)
raise processutils.ProcessExecutionError(
stdout=cmdout, cmd=command, description=desc)
return out
@ -232,12 +232,12 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
while attempts > 0:
attempts -= 1
try:
LOG.info(_LI('PS-driver: executing "%s".'), command)
LOG.info('PS-driver: executing "%s".', command)
return self._ssh_execute(
ssh, command,
timeout=self.configuration.ssh_conn_timeout)
except Exception:
LOG.exception(_LE('Error running command.'))
LOG.exception('Error running command.')
greenthread.sleep(random.randint(20, 500) / 100.0)
msg = (_("SSH Command failed after '%(total_attempts)r' "
"attempts : '%(command)s'") %
@ -247,7 +247,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error running SSH command: "%s".'), command)
LOG.error('Error running SSH command: "%s".', command)
def check_for_setup_error(self):
super(PSSeriesISCSIDriver, self).check_for_setup_error()
@ -398,11 +398,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
out_tup = line.rstrip().partition(' ')
self._group_ip = out_tup[-1]
LOG.info(_LI('PS-driver: Setup is complete, group IP is "%s".'),
LOG.info('PS-driver: Setup is complete, group IP is "%s".',
self._group_ip)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to setup the Dell EMC PS driver.'))
LOG.error('Failed to setup the Dell EMC PS driver.')
def create_volume(self, volume):
"""Create a volume."""
@ -419,7 +419,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume "%s".'), volume['name'])
LOG.error('Failed to create volume "%s".', volume['name'])
def add_multihost_access(self, volume):
"""Add multihost-access to a volume. Needed for live migration."""
@ -429,8 +429,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
self._eql_execute(*cmd)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to add multihost-access '
'for volume "%s".'),
LOG.error('Failed to add multihost-access '
'for volume "%s".',
volume['name'])
def _set_volume_description(self, volume, description):
@ -441,8 +441,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
self._eql_execute(*cmd)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to set description '
'for volume "%s".'),
LOG.error('Failed to set description '
'for volume "%s".',
volume['name'])
def delete_volume(self, volume):
@ -452,12 +452,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
self._eql_execute('volume', 'select', volume['name'], 'offline')
self._eql_execute('volume', 'delete', volume['name'])
except exception.VolumeNotFound:
LOG.warning(_LW('Volume %s was not found while trying to delete '
'it.'), volume['name'])
LOG.warning('Volume %s was not found while trying to delete it.',
volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete '
'volume "%s".'), volume['name'])
LOG.error('Failed to delete volume "%s".', volume['name'])
def create_snapshot(self, snapshot):
"""Create snapshot of existing volume on appliance."""
@ -472,7 +471,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create snapshot of volume "%s".'),
LOG.error('Failed to create snapshot of volume "%s".',
snapshot['volume_name'])
def create_volume_from_snapshot(self, volume, snapshot):
@ -495,7 +494,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume from snapshot "%s".'),
LOG.error('Failed to create volume from snapshot "%s".',
snapshot['name'])
def create_cloned_volume(self, volume, src_vref):
@ -513,7 +512,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
return self._get_volume_data(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create clone of volume "%s".'),
LOG.error('Failed to create clone of volume "%s".',
volume['name'])
def delete_snapshot(self, snapshot):
@ -526,8 +525,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
LOG.debug('Snapshot %s could not be found.', snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete snapshot %(snap)s of '
'volume %(vol)s.'),
LOG.error('Failed to delete snapshot %(snap)s of '
'volume %(vol)s.',
{'snap': snapshot['name'],
'vol': snapshot['volume_name']})
@ -548,8 +547,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to initialize connection '
'to volume "%s".'),
LOG.error('Failed to initialize connection to volume "%s".',
volume['name'])
def terminate_connection(self, volume, connector, force=False, **kwargs):
@ -563,8 +561,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
'access', 'delete', connection_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection '
'to volume "%s".'),
LOG.error('Failed to terminate connection to volume "%s".',
volume['name'])
def create_export(self, context, volume, connector):
@ -585,11 +582,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
try:
self._check_volume(volume)
except exception.VolumeNotFound:
LOG.warning(_LW('Volume %s is not found!, it may have been '
'deleted.'), volume['name'])
LOG.warning('Volume %s is not found!, it may have been deleted.',
volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume "%s".'),
LOG.error('Failed to ensure export of volume "%s".',
volume['name'])
def remove_export(self, context, volume):
@ -606,15 +603,15 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
try:
self._eql_execute('volume', 'select', volume['name'],
'size', "%sG" % new_size)
LOG.info(_LI('Volume %(name)s resized from '
'%(current_size)sGB to %(new_size)sGB.'),
LOG.info('Volume %(name)s resized from '
'%(current_size)sGB to %(new_size)sGB.',
{'name': volume['name'],
'current_size': volume['size'],
'new_size': new_size})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to extend_volume %(name)s from '
'%(current_size)sGB to %(new_size)sGB.'),
LOG.error('Failed to extend_volume %(name)s from '
'%(current_size)sGB to %(new_size)sGB.',
{'name': volume['name'],
'current_size': volume['size'],
'new_size': new_size})
@ -643,14 +640,14 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
self.add_multihost_access(volume)
data = self._get_volume_info(volume['name'])
updates = self._get_model_update(data['iSCSI_Name'])
LOG.info(_LI("Backend volume %(back_vol)s renamed to "
"%(vol)s and is now managed by cinder."),
LOG.info("Backend volume %(back_vol)s renamed to "
"%(vol)s and is now managed by cinder.",
{'back_vol': existing_volume_name,
'vol': volume['name']})
return updates
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to manage volume "%s".'), volume['name'])
LOG.error('Failed to manage volume "%s".', volume['name'])
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
@ -674,13 +671,13 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver):
"""
try:
self._set_volume_description(volume, '"OpenStack UnManaged"')
LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no "
"longer managed."),
LOG.info("Virtual volume %(disp)s '%(vol)s' is no "
"longer managed.",
{'disp': volume['display_name'],
'vol': volume['name']})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to unmanage volume "%s".'),
LOG.error('Failed to unmanage volume "%s".',
volume['name'])
def local_path(self, volume):

View File

@ -31,7 +31,7 @@ from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
@ -135,10 +135,8 @@ class ScaleIODriver(driver.VolumeDriver):
if self.verify_server_certificate:
self.server_certificate_path = (
self.configuration.sio_server_certificate_path)
LOG.info(_LI(
"REST server IP: %(ip)s, port: %(port)s, username: %("
"user)s. "
"Verify server's certificate: %(verify_cert)s."),
LOG.info("REST server IP: %(ip)s, port: %(port)s, username: %("
"user)s. Verify server's certificate: %(verify_cert)s.",
{'ip': self.server_ip,
'port': self.server_port,
'user': self.server_username,
@ -153,29 +151,25 @@ class ScaleIODriver(driver.VolumeDriver):
self.storage_pool_name = self.configuration.sio_storage_pool_name
self.storage_pool_id = self.configuration.sio_storage_pool_id
if self.storage_pool_name is None and self.storage_pool_id is None:
LOG.warning(_LW("No storage pool name or id was found."))
LOG.warning("No storage pool name or id was found.")
else:
LOG.info(_LI(
"Storage pools names: %(pools)s, "
"storage pool name: %(pool)s, pool id: %(pool_id)s."),
LOG.info("Storage pools names: %(pools)s, "
"storage pool name: %(pool)s, pool id: %(pool_id)s.",
{'pools': self.storage_pools,
'pool': self.storage_pool_name,
'pool_id': self.storage_pool_id})
self.protection_domain_name = (
self.configuration.sio_protection_domain_name)
LOG.info(_LI(
"Protection domain name: %(domain_name)s."),
LOG.info("Protection domain name: %(domain_name)s.",
{'domain_name': self.protection_domain_name})
self.protection_domain_id = self.configuration.sio_protection_domain_id
LOG.info(_LI(
"Protection domain id: %(domain_id)s."),
LOG.info("Protection domain id: %(domain_id)s.",
{'domain_id': self.protection_domain_id})
self.provisioning_type = (
'thin' if self.configuration.san_thin_provision else 'thick')
LOG.info(_LI(
"Default provisioning type: %(provisioning_type)s."),
LOG.info("Default provisioning type: %(provisioning_type)s.",
{'provisioning_type': self.provisioning_type})
self.configuration.max_over_subscription_ratio = (
self.configuration.sio_max_over_subscription_ratio)
@ -199,8 +193,8 @@ class ScaleIODriver(driver.VolumeDriver):
def check_for_setup_error(self):
if (not self.protection_domain_name and
not self.protection_domain_id):
LOG.warning(_LW("No protection domain name or id "
"was specified in configuration."))
LOG.warning("No protection domain name or id "
"was specified in configuration.")
if self.protection_domain_name and self.protection_domain_id:
msg = _("Cannot specify both protection domain name "
@ -220,8 +214,8 @@ class ScaleIODriver(driver.VolumeDriver):
raise exception.InvalidInput(reason=msg)
if not self.verify_server_certificate:
LOG.warning(_LW("Verify certificate is not set, using default of "
"False."))
LOG.warning("Verify certificate is not set, using default of "
"False.")
if self.verify_server_certificate and not self.server_certificate_path:
msg = _("Path to REST server's certificate must be specified.")
@ -273,10 +267,10 @@ class ScaleIODriver(driver.VolumeDriver):
new_provisioning_type = storage_type.get(PROVISIONING_KEY)
old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY)
if new_provisioning_type is None and old_provisioning_type is not None:
LOG.info(_LI("Using sio:provisioning_type for defining "
LOG.info("Using sio:provisioning_type for defining "
"thin or thick volume will be deprecated in the "
"Ocata release of OpenStack. Please use "
"provisioning:type configuration option."))
"provisioning:type configuration option.")
provisioning_type = old_provisioning_type
else:
provisioning_type = new_provisioning_type
@ -298,11 +292,11 @@ class ScaleIODriver(driver.VolumeDriver):
if extraspecs_key is not None else None)
if extraspecs_limit is not None:
if qos_limit is not None:
LOG.warning(_LW("QoS specs are overriding extra_specs."))
LOG.warning("QoS specs are overriding extra_specs.")
else:
LOG.info(_LI("Using extra_specs for defining QoS specs "
LOG.info("Using extra_specs for defining QoS specs "
"will be deprecated in the N release "
"of OpenStack. Please use QoS specs."))
"of OpenStack. Please use QoS specs.")
return qos_limit if qos_limit is not None else extraspecs_limit
@staticmethod
@ -341,11 +335,10 @@ class ScaleIODriver(driver.VolumeDriver):
self._find_protection_domain_name_from_storage_type(storage_type))
provisioning_type = self._find_provisioning_type(storage_type)
LOG.info(_LI(
"Volume type: %(volume_type)s, "
LOG.info("Volume type: %(volume_type)s, "
"storage pool name: %(pool_name)s, "
"storage pool id: %(pool_id)s, protection domain id: "
"%(domain_id)s, protection domain name: %(domain_name)s."),
"%(domain_id)s, protection domain name: %(domain_name)s.",
{'volume_type': storage_type,
'pool_name': storage_pool_name,
'pool_id': storage_pool_id,
@ -382,7 +375,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
LOG.info("ScaleIO get domain id by name request: %s.",
request)
r = requests.get(
request,
@ -405,7 +398,7 @@ class ScaleIODriver(driver.VolumeDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
LOG.info("Domain id is %s.", domain_id)
pool_name = self.storage_pool_name
pool_id = self.storage_pool_id
if pool_name:
@ -417,7 +410,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
LOG.info("ScaleIO get pool id by name request: %s.", request)
r = requests.get(
request,
auth=(
@ -440,7 +433,7 @@ class ScaleIODriver(driver.VolumeDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
LOG.info("Pool id is %s.", pool_id)
if provisioning_type == 'thin':
provisioning = "ThinProvisioned"
# Default volume type is thick.
@ -455,7 +448,7 @@ class ScaleIODriver(driver.VolumeDriver):
'volumeType': provisioning,
'storagePoolId': pool_id}
LOG.info(_LI("Params for add volume request: %s."), params)
LOG.info("Params for add volume request: %s.", params)
r = requests.post(
"https://" +
self.server_ip +
@ -469,14 +462,14 @@ class ScaleIODriver(driver.VolumeDriver):
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Add volume response: %s"), response)
LOG.info("Add volume response: %s", response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Error creating volume: %s.") % response['message'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."),
LOG.info("Created volume %(volname)s, volume id %(volid)s.",
{'volname': volname, 'volid': volume.id})
real_size = int(self._round_to_num_gran(volume.size))
@ -501,7 +494,7 @@ class ScaleIODriver(driver.VolumeDriver):
return self._snapshot_volume(volume_id, snapname)
def _snapshot_volume(self, vol_id, snapname):
LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") %
LOG.info("Snapshot volume %(vol)s into snapshot %(id)s.",
{'vol': vol_id, 'id': snapname})
params = {
'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]}
@ -510,7 +503,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/System/action/snapshotVolumes") % req_vars
r, response = self._execute_scaleio_post_request(params, request)
LOG.info(_LI("Snapshot volume response: %s."), response)
LOG.info("Snapshot volume response: %s.", response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for volume %(volname)s: "
"%(response)s.") %
@ -537,8 +530,8 @@ class ScaleIODriver(driver.VolumeDriver):
def _check_response(self, response, request, is_get_request=True,
params=None):
if response.status_code == 401 or response.status_code == 403:
LOG.info(_LI("Token is invalid, going to re-login and get "
"a new one."))
LOG.info("Token is invalid, going to re-login and get "
"a new one.")
login_request = (
"https://" + self.server_ip +
":" + self.server_port + "/api/login")
@ -552,8 +545,7 @@ class ScaleIODriver(driver.VolumeDriver):
token = r.json()
self.server_token = token
# Repeat request with valid token.
LOG.info(_LI(
"Going to perform request again %s with valid token."),
LOG.info("Going to perform request again %s with valid token.",
request)
if is_get_request:
res = requests.get(request,
@ -579,9 +571,8 @@ class ScaleIODriver(driver.VolumeDriver):
# exposed by the system
volume_id = snapshot.provider_id
snapname = self._id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create volume from snapshot: snapshot %(snapname)s "
"to volume %(volname)s."),
LOG.info("ScaleIO create volume from snapshot: snapshot %(snapname)s "
"to volume %(volname)s.",
{'volname': volume_id,
'snapname': snapname})
@ -608,8 +599,8 @@ class ScaleIODriver(driver.VolumeDriver):
def _extend_volume(self, volume_id, old_size, new_size):
vol_id = volume_id
LOG.info(_LI(
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
LOG.info(
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s.",
{'volname': vol_id,
'new_size': new_size})
@ -619,7 +610,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/setVolumeSize") % req_vars
LOG.info(_LI("Change volume capacity request: %s."), request)
LOG.info("Change volume capacity request: %s.", request)
# Round up the volume size so that it is a granularity of 8 GBs
# because ScaleIO only supports volumes with a granularity of 8 GBs.
@ -630,8 +621,8 @@ class ScaleIODriver(driver.VolumeDriver):
round_volume_capacity = self.configuration.sio_round_volume_capacity
if not round_volume_capacity and not new_size % 8 == 0:
LOG.warning(_LW("ScaleIO only supports volumes with a granularity "
"of 8 GBs. The new volume size is: %d."),
LOG.warning("ScaleIO only supports volumes with a granularity "
"of 8 GBs. The new volume size is: %d.",
volume_new_size)
params = {'sizeInGB': six.text_type(volume_new_size)}
@ -658,9 +649,8 @@ class ScaleIODriver(driver.VolumeDriver):
"""Creates a cloned volume."""
volume_id = src_vref['provider_id']
snapname = self._id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create cloned volume: source volume %(src)s to "
"target volume %(tgt)s."),
LOG.info("ScaleIO create cloned volume: source volume %(src)s to "
"target volume %(tgt)s.",
{'src': volume_id,
'tgt': snapname})
@ -691,9 +681,8 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/removeMappedSdc") % req_vars
LOG.info(_LI(
"Trying to unmap volume from all sdcs"
" before deletion: %s."),
LOG.info("Trying to unmap volume from all sdcs"
" before deletion: %s.",
request)
r = requests.post(
request,
@ -725,14 +714,12 @@ class ScaleIODriver(driver.VolumeDriver):
response = r.json()
error_code = response['errorCode']
if error_code == VOLUME_NOT_FOUND_ERROR:
LOG.warning(_LW(
"Ignoring error in delete volume %s:"
" Volume not found."), vol_id)
LOG.warning("Ignoring error in delete volume %s:"
" Volume not found.", vol_id)
elif vol_id is None:
LOG.warning(_LW(
"Volume does not have provider_id thus does not "
LOG.warning("Volume does not have provider_id thus does not "
"map to a ScaleIO volume. "
"Allowing deletion to proceed."))
"Allowing deletion to proceed.")
else:
msg = (_("Error deleting volume %(vol)s: %(err)s.") %
{'vol': vol_id,
@ -743,7 +730,7 @@ class ScaleIODriver(driver.VolumeDriver):
def delete_snapshot(self, snapshot):
"""Deletes a ScaleIO snapshot."""
snap_id = snapshot.provider_id
LOG.info(_LI("ScaleIO delete snapshot."))
LOG.info("ScaleIO delete snapshot.")
return self._delete_volume(snap_id)
def initialize_connection(self, volume, connector, **kwargs):
@ -762,13 +749,13 @@ class ScaleIODriver(driver.VolumeDriver):
qos_specs = self._get_volumetype_qos(volume)
storage_type = extra_specs.copy()
storage_type.update(qos_specs)
LOG.info(_LI("Volume type is %s."), storage_type)
LOG.info("Volume type is %s.", storage_type)
round_volume_size = self._round_to_num_gran(volume.size)
iops_limit = self._get_iops_limit(round_volume_size, storage_type)
bandwidth_limit = self._get_bandwidth_limit(round_volume_size,
storage_type)
LOG.info(_LI("iops limit is %s"), iops_limit)
LOG.info(_LI("bandwidth limit is %s"), bandwidth_limit)
LOG.info("iops limit is %s", iops_limit)
LOG.info("bandwidth limit is %s", bandwidth_limit)
connection_properties['iopsLimit'] = iops_limit
connection_properties['bandwidthLimit'] = bandwidth_limit
return {'driver_volume_type': 'scaleio',
@ -782,10 +769,10 @@ class ScaleIODriver(driver.VolumeDriver):
max_bandwidth = (self._round_to_num_gran(int(max_bandwidth),
units.Ki))
max_bandwidth = six.text_type(max_bandwidth)
LOG.info(_LI("max bandwidth is: %s"), max_bandwidth)
LOG.info("max bandwidth is: %s", max_bandwidth)
bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB,
None)
LOG.info(_LI("bandwidth per gb is: %s"), bw_per_gb)
LOG.info("bandwidth per gb is: %s", bw_per_gb)
if bw_per_gb is None:
return max_bandwidth
# Since ScaleIO volumes size is in 8GB granularity
@ -805,9 +792,9 @@ class ScaleIODriver(driver.VolumeDriver):
def _get_iops_limit(self, size, storage_type):
max_iops = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY,
IOPS_LIMIT_KEY)
LOG.info(_LI("max iops is: %s"), max_iops)
LOG.info("max iops is: %s", max_iops)
iops_per_gb = self._find_limit(storage_type, QOS_IOPS_PER_GB, None)
LOG.info(_LI("iops per gb is: %s"), iops_per_gb)
LOG.info("iops per gb is: %s", iops_per_gb)
try:
if iops_per_gb is None:
if max_iops is not None:
@ -862,9 +849,9 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
LOG.info("ScaleIO get domain id by name request: %s.",
request)
LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."),
LOG.info("username: %(username)s, verify_cert: %(verify)s.",
{'username': self.server_username,
'verify': verify_cert})
r = requests.get(
@ -874,7 +861,7 @@ class ScaleIODriver(driver.VolumeDriver):
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
LOG.info(_LI("Get domain by name response: %s"), r.text)
LOG.info("Get domain by name response: %s", r.text)
domain_id = r.json()
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
@ -888,7 +875,7 @@ class ScaleIODriver(driver.VolumeDriver):
'err': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
LOG.info("Domain id is %s.", domain_id)
# Get pool id from name.
encoded_pool_name = urllib.parse.quote(pool_name, '')
@ -899,7 +886,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
LOG.info("ScaleIO get pool id by name request: %s.", request)
r = requests.get(
request,
auth=(
@ -921,7 +908,7 @@ class ScaleIODriver(driver.VolumeDriver):
'err': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
LOG.info("Pool id is %s.", pool_id)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
@ -941,7 +928,7 @@ class ScaleIODriver(driver.VolumeDriver):
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Query capacity stats response: %s."), response)
LOG.info("Query capacity stats response: %s.", response)
for res in response.values():
# Divide by two because ScaleIO creates a copy for each volume
total_capacity_kb = (
@ -956,10 +943,9 @@ class ScaleIODriver(driver.VolumeDriver):
provisioned_capacity = (
((res['thickCapacityInUseInKb'] +
res['thinCapacityAllocatedInKm']) / 2) / units.Mi)
LOG.info(_LI(
"free capacity of pool %(pool)s is: %(free)s, "
LOG.info("Free capacity of pool %(pool)s is: %(free)s, "
"total capacity: %(total)s, "
"provisioned capacity: %(prov)s"),
"provisioned capacity: %(prov)s",
{'pool': pool_name,
'free': free_capacity_gb,
'total': total_capacity_gb,
@ -983,15 +969,14 @@ class ScaleIODriver(driver.VolumeDriver):
stats['total_capacity_gb'] = total_capacity
stats['free_capacity_gb'] = free_capacity
LOG.info(_LI(
"Free capacity for backend is: %(free)s, total capacity: "
"%(total)s."),
LOG.info("Free capacity for backend is: %(free)s, total capacity: "
"%(total)s.",
{'free': free_capacity,
'total': total_capacity})
stats['pools'] = pools
LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"])
LOG.info("Backend name is %s.", stats["volume_backend_name"])
self._stats = stats
@ -1046,7 +1031,7 @@ class ScaleIODriver(driver.VolumeDriver):
def _sio_detach_volume(self, volume):
"""Call the connector.disconnect() """
LOG.info(_LI("Calling os-brick to detach ScaleIO volume."))
LOG.info("Calling os-brick to detach ScaleIO volume.")
connection_properties = dict(self.connection_properties)
connection_properties['scaleIO_volname'] = self._id_to_base64(
volume.id)
@ -1055,9 +1040,8 @@ class ScaleIODriver(driver.VolumeDriver):
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.info(_LI(
"ScaleIO copy_image_to_volume volume: %(vol)s image service: "
"%(service)s image id: %(id)s."),
LOG.info("ScaleIO copy_image_to_volume volume: %(vol)s image service: "
"%(service)s image id: %(id)s.",
{'vol': volume,
'service': six.text_type(image_service),
'id': six.text_type(image_id)})
@ -1075,9 +1059,8 @@ class ScaleIODriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.info(_LI(
"ScaleIO copy_volume_to_image volume: %(vol)s image service: "
"%(service)s image meta: %(meta)s."),
LOG.info("ScaleIO copy_volume_to_image volume: %(vol)s image service: "
"%(service)s image meta: %(meta)s.",
{'vol': volume,
'service': six.text_type(image_service),
'meta': six.text_type(image_meta)})
@ -1109,8 +1092,8 @@ class ScaleIODriver(driver.VolumeDriver):
current_name = new_volume['id']
new_name = volume['id']
vol_id = new_volume['provider_id']
LOG.info(_LI("Renaming %(id)s from %(current_name)s to "
"%(new_name)s."),
LOG.info("Renaming %(id)s from %(current_name)s to "
"%(new_name)s.",
{'id': vol_id, 'current_name': current_name,
'new_name': new_name})
@ -1134,7 +1117,7 @@ class ScaleIODriver(driver.VolumeDriver):
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(id)s/action/setVolumeName" %
req_vars)
LOG.info(_LI("ScaleIO rename volume request: %s."), request)
LOG.info("ScaleIO rename volume request: %s.", request)
params = {'newName': new_name}
r = requests.post(
@ -1153,8 +1136,8 @@ class ScaleIODriver(driver.VolumeDriver):
if ((error_code == VOLUME_NOT_FOUND_ERROR or
error_code == OLD_VOLUME_NOT_FOUND_ERROR or
error_code == ILLEGAL_SYNTAX)):
LOG.info(_LI("Ignoring renaming action because the volume "
"%(vol)s is not a ScaleIO volume."),
LOG.info("Ignoring renaming action because the volume "
"%(vol)s is not a ScaleIO volume.",
{'vol': vol_id})
else:
msg = (_("Error renaming volume %(vol)s: %(err)s.") %
@ -1162,14 +1145,14 @@ class ScaleIODriver(driver.VolumeDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI("ScaleIO volume %(vol)s was renamed to "
"%(new_name)s."),
LOG.info("ScaleIO volume %(vol)s was renamed to "
"%(new_name)s.",
{'vol': vol_id, 'new_name': new_name})
def _query_scaleio_volume(self, volume, existing_ref):
request = self._create_scaleio_get_volume_request(volume, existing_ref)
r, response = self._execute_scaleio_get_request(request)
LOG.info(_LI("Get Volume response: %(res)s"),
LOG.info("Get Volume response: %(res)s",
{'res': response})
self._manage_existing_check_legal_response(r, existing_ref)
return response
@ -1258,7 +1241,7 @@ class ScaleIODriver(driver.VolumeDriver):
'id': vol_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(id)s" % req_vars)
LOG.info(_LI("ScaleIO get volume by id request: %s."), request)
LOG.info("ScaleIO get volume by id request: %s.", request)
return request
@staticmethod
@ -1286,7 +1269,7 @@ class ScaleIODriver(driver.VolumeDriver):
ScaleIO won't create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.info(_LI("Creating Consistency Group"))
LOG.info("Creating Consistency Group")
model_update = {'status': 'available'}
return model_update
@ -1295,7 +1278,7 @@ class ScaleIODriver(driver.VolumeDriver):
ScaleIO will delete the volumes of the CG.
"""
LOG.info(_LI("Deleting Consistency Group"))
LOG.info("Deleting Consistency Group")
model_update = {'status': 'deleted'}
error_statuses = ['error', 'error_deleting']
volumes_model_update = []
@ -1311,8 +1294,8 @@ class ScaleIODriver(driver.VolumeDriver):
volumes_model_update.append(update_item)
if model_update['status'] not in error_statuses:
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
LOG.error("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s.",
{'vol': volume['name'], 'exception': err})
return model_update, volumes_model_update
@ -1323,7 +1306,7 @@ class ScaleIODriver(driver.VolumeDriver):
'snapshotName': self._id_to_base64(snapshot['id'])}
snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots))
r, response = self._snapshot_volume_group(snapshot_defs)
LOG.info(_LI("Snapshot volume response: %s."), response)
LOG.info("Snapshot volume response: %s.", response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for group: "
"%(response)s.") %
@ -1356,9 +1339,9 @@ class ScaleIODriver(driver.VolumeDriver):
snapshot_model_update.append(update_item)
if model_update['status'] not in error_statuses:
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the snapshot %(snap)s "
LOG.error("Failed to delete the snapshot %(snap)s "
"of cgsnapshot: %(cgsnapshot_id)s. "
"Exception: %(exception)s."),
"Exception: %(exception)s.",
{'snap': snapshot['name'],
'exception': err,
'cgsnapshot_id': cgsnapshot.id})
@ -1381,7 +1364,7 @@ class ScaleIODriver(driver.VolumeDriver):
source_vols,
volumes)
r, response = self._snapshot_volume_group(list(snapshot_defs))
LOG.info(_LI("Snapshot volume response: %s."), response)
LOG.info("Snapshot volume response: %s.", response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for group: "
"%(response)s.") %
@ -1407,7 +1390,7 @@ class ScaleIODriver(driver.VolumeDriver):
return None, None, None
def _snapshot_volume_group(self, snapshot_defs):
LOG.info(_LI("ScaleIO snapshot group of volumes"))
LOG.info("ScaleIO snapshot group of volumes")
params = {'snapshotDefs': snapshot_defs}
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}

View File

@ -21,8 +21,8 @@ from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.dell_emc.unity import client
from cinder.volume.drivers.dell_emc.unity import utils
from cinder.volume import utils as vol_utils
@ -111,21 +111,21 @@ class CommonAdapter(object):
matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id,
whitelist)
if not matched:
LOG.error(_LE('No matched ports filtered by all patterns: %s'),
LOG.error('No matched ports filtered by all patterns: %s',
whitelist)
raise exception.InvalidConfigurationValue(
option='%s.unity_io_ports' % self.config.config_group,
value=self.config.unity_io_ports)
if unmatched_whitelist:
LOG.error(_LE('No matched ports filtered by below patterns: %s'),
LOG.error('No matched ports filtered by below patterns: %s',
unmatched_whitelist)
raise exception.InvalidConfigurationValue(
option='%s.unity_io_ports' % self.config.config_group,
value=self.config.unity_io_ports)
LOG.info(_LI('These ports %(matched)s will be used based on '
'the option unity_io_ports: %(config)s'),
LOG.info('These ports %(matched)s will be used based on '
'the option unity_io_ports: %(config)s',
{'matched': matched,
'config': self.config.unity_io_ports})
return matched
@ -174,8 +174,8 @@ class CommonAdapter(object):
qos_specs = utils.get_backend_qos_specs(volume)
limit_policy = self.client.get_io_limit_policy(qos_specs)
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
'Pool: %(pool)s Qos: %(qos)s.'),
LOG.info('Create Volume: %(volume)s Size: %(size)s '
'Pool: %(pool)s Qos: %(qos)s.',
{'volume': volume_name,
'size': volume_size,
'pool': pool.name,
@ -193,8 +193,8 @@ class CommonAdapter(object):
def delete_volume(self, volume):
lun_id = self.get_lun_id(volume)
if lun_id is None:
LOG.info(_LI('Backend LUN not found, skipping the deletion. '
'Volume: %(volume_name)s.'),
LOG.info('Backend LUN not found, skipping the deletion. '
'Volume: %(volume_name)s.',
{'volume_name': volume.name})
else:
self.client.delete_lun(lun_id)
@ -457,8 +457,8 @@ class CommonAdapter(object):
except Exception:
with excutils.save_and_reraise_exception():
utils.ignore_exception(self.delete_volume, volume)
LOG.error(_LE('Failed to create cloned volume: %(vol_id)s, '
'from source unity snapshot: %(snap_name)s. '),
LOG.error('Failed to create cloned volume: %(vol_id)s, '
'from source unity snapshot: %(snap_name)s.',
{'vol_id': volume.id, 'snap_name': snap.name})
return model_update

View File

@ -25,7 +25,7 @@ else:
storops_ex = None
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.unity import utils
@ -98,13 +98,13 @@ class UnityClient(object):
lun = None
if lun_id is None and name is None:
LOG.warning(
_LW("Both lun_id and name are None to get LUN. Return None."))
"Both lun_id and name are None to get LUN. Return None.")
else:
try:
lun = self.system.get_lun(_id=lun_id, name=name)
except storops_ex.UnityResourceNotFoundError:
LOG.warning(
_LW("LUN id=%(id)s, name=%(name)s doesn't exist."),
"LUN id=%(id)s, name=%(name)s doesn't exist.",
{'id': lun_id, 'name': name})
return lun
@ -159,16 +159,16 @@ class UnityClient(object):
'err': err})
except storops_ex.UnityDeleteAttachedSnapError as err:
with excutils.save_and_reraise_exception():
LOG.warning(_LW("Failed to delete snapshot %(snap_name)s "
"which is in use. Message: %(err)s"),
LOG.warning("Failed to delete snapshot %(snap_name)s "
"which is in use. Message: %(err)s",
{'snap_name': snap.name, 'err': err})
def get_snap(self, name=None):
try:
return self.system.get_snap(name=name)
except storops_ex.UnityResourceNotFoundError as err:
msg = _LW("Snapshot %(name)s doesn't exist. Message: %(err)s")
LOG.warning(msg, {'name': name, 'err': err})
LOG.warning("Snapshot %(name)s doesn't exist. Message: %(err)s",
{'name': name, 'err': err})
return None
def create_host(self, name, uids):

View File

@ -24,7 +24,7 @@ from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as zm_utils
@ -70,11 +70,11 @@ def extract_provider_location(provider_location, key):
if len(fields) == 2 and fields[0] == key:
return fields[1]
else:
msg = _LW('"%(key)s" is not found in provider '
'location "%(location)s."')
LOG.warning(msg, {'key': key, 'location': provider_location})
LOG.warning('"%(key)s" is not found in provider '
'location "%(location)s."',
{'key': key, 'location': provider_location})
else:
LOG.warning(_LW('Empty provider location received.'))
LOG.warning('Empty provider location received.')
def byte_to_gib(byte):
@ -186,9 +186,9 @@ def ignore_exception(func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as ex:
LOG.warning(_LW('Error occurred but ignored. Function: %(func_name)s, '
LOG.warning('Error occurred but ignored. Function: %(func_name)s, '
'args: %(args)s, kwargs: %(kwargs)s, '
'exception: %(ex)s.'),
'exception: %(ex)s.',
{'func_name': func, 'args': args,
'kwargs': kwargs, 'ex': ex})

View File

@ -25,11 +25,11 @@ import six
import uuid
from cinder import exception
from cinder import utils as cinder_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects.consistencygroup import ConsistencyGroup
from cinder.i18n import _
import cinder.objects.consistencygroup as cg_obj
from cinder.objects import fields
from cinder.objects.group import Group
import cinder.objects.group as group_obj
from cinder import utils as cinder_utils
from cinder.volume.drivers.dell_emc.vmax import fast
from cinder.volume.drivers.dell_emc.vmax import https
from cinder.volume.drivers.dell_emc.vmax import masking
@ -138,9 +138,8 @@ class VMAXCommon(object):
active_backend_id=None):
if not pywbemAvailable:
LOG.info(_LI(
"Module PyWBEM not installed. "
"Install PyWBEM using the python-pywbem package."))
LOG.info("Module PyWBEM not installed. Install PyWBEM using the "
"python-pywbem package.")
self.protocol = prtcl
self.configuration = configuration
@ -221,9 +220,9 @@ class VMAXCommon(object):
LOG.debug("The replication configuration is %(rep_config)s.",
{'rep_config': self.rep_config})
elif self.rep_devices and len(self.rep_devices) > 1:
LOG.error(_LE("More than one replication target is configured. "
LOG.error("More than one replication target is configured. "
"EMC VMAX only suppports a single replication "
"target. Replication will not be enabled."))
"target. Replication will not be enabled.")
def _get_slo_workload_combinations(self, arrayInfoList):
"""Method to query the array for SLO and Workloads.
@ -356,9 +355,9 @@ class VMAXCommon(object):
volumeName,
extraSpecs)
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
LOG.info("Leaving create_volume: %(volumeName)s "
"Return code: %(rc)lu "
"volume dict: %(name)s."),
"volume dict: %(name)s.",
{'volumeName': volumeName,
'rc': rc,
'name': volumeDict})
@ -449,12 +448,12 @@ class VMAXCommon(object):
:param volume: volume Object
"""
LOG.info(_LI("Deleting Volume: %(volume)s"),
LOG.info("Deleting Volume: %(volume)s",
{'volume': volume['name']})
rc, volumeName = self._delete_volume(volume)
LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: "
"%(rc)lu."),
LOG.info("Leaving delete_volume: %(volumename)s Return code: "
"%(rc)lu.",
{'volumename': volumeName,
'rc': rc})
@ -476,7 +475,7 @@ class VMAXCommon(object):
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
"""
LOG.info(_LI("Delete Snapshot: %(snapshotName)s."),
LOG.info("Delete Snapshot: %(snapshotName)s.",
{'snapshotName': snapshot['name']})
self._delete_snapshot(snapshot, volume['host'])
@ -516,12 +515,12 @@ class VMAXCommon(object):
extraSpecs = self._get_replication_extraSpecs(
extraSpecs, self.rep_config)
volumename = volume['name']
LOG.info(_LI("Unmap volume: %(volume)s."),
LOG.info("Unmap volume: %(volume)s.",
{'volume': volumename})
device_info = self.find_device_number(volume, connector['host'])
if 'hostlunid' not in device_info:
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
LOG.info("Volume %s is not mapped. No volume to unmap.",
volumename)
return
@ -584,7 +583,7 @@ class VMAXCommon(object):
is_multipath = connector.get('multipath', False)
volumeName = volume['name']
LOG.info(_LI("Initialize connection: %(volume)s."),
LOG.info("Initialize connection: %(volume)s.",
{'volume': volumeName})
self.conn = self._get_ecom_connection()
deviceInfoDict = self._wrap_find_device_number(
@ -603,8 +602,8 @@ class VMAXCommon(object):
# the state as is.
deviceNumber = deviceInfoDict['hostlunid']
LOG.info(_LI("Volume %(volume)s is already mapped. "
"The device number is %(deviceNumber)s."),
LOG.info("Volume %(volume)s is already mapped. "
"The device number is %(deviceNumber)s.",
{'volume': volumeName,
'deviceNumber': deviceNumber})
# Special case, we still need to get the iscsi ip address.
@ -663,7 +662,7 @@ class VMAXCommon(object):
if 'hostlunid' not in deviceInfoDict:
# Did not successfully attach to host,
# so a rollback for FAST is required.
LOG.error(_LE("Error Attaching volume %(vol)s."),
LOG.error("Error Attaching volume %(vol)s.",
{'vol': volumeName})
if ((rollbackDict['fastPolicyName'] is not None) or
(rollbackDict['isV3'] is not None)):
@ -754,7 +753,7 @@ class VMAXCommon(object):
:params connector: the connector Object
"""
volumename = volume['name']
LOG.info(_LI("Terminate connection: %(volume)s."),
LOG.info("Terminate connection: %(volume)s.",
{'volume': volumename})
self._unmap_lun(volume, connector)
@ -1020,11 +1019,11 @@ class VMAXCommon(object):
provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = (
self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo))
LOG.info(_LI(
LOG.info(
"Capacity stats for SRP pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu, "
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu"),
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu",
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': totalManagedSpaceGbs,
@ -1055,7 +1054,7 @@ class VMAXCommon(object):
volumeName = volume['name']
volumeStatus = volume['status']
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
LOG.info("Migrating using retype Volume: %(volume)s.",
{'volume': volumeName})
extraSpecs = self._initial_setup(volume)
@ -1063,17 +1062,17 @@ class VMAXCommon(object):
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array. "
"No volume to migrate using retype."),
LOG.error("Volume %(name)s not found on the array. "
"No volume to migrate using retype.",
{'name': volumeName})
return False
if extraSpecs[ISV3]:
if self.utils.is_replication_enabled(extraSpecs):
LOG.error(_LE("Volume %(name)s is replicated - "
LOG.error("Volume %(name)s is replicated - "
"Replicated volumes are not eligible for "
"storage assisted retype. Host assisted "
"retype is supported."),
"retype is supported.",
{'name': volumeName})
return False
@ -1097,12 +1096,12 @@ class VMAXCommon(object):
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
LOG.warning(_LW("The VMAX plugin only supports Retype. "
LOG.warning("The VMAX plugin only supports Retype. "
"If a pool based migration is necessary "
"this will happen on a Retype "
"From the command line: "
"cinder --os-volume-api-version 2 retype <volumeId> "
"<volumeType> --migration-policy on-demand"))
"<volumeType> --migration-policy on-demand")
return True, {}
def _migrate_volume(
@ -1134,11 +1133,11 @@ class VMAXCommon(object):
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful.
LOG.warning(_LW(
LOG.warning(
"Failed to migrate: %(volumeName)s from "
"default source storage group "
"for FAST policy: %(sourceFastPolicyName)s. "
"Attempting cleanup... "),
"Attempting cleanup... ",
{'volumeName': volumeName,
'sourceFastPolicyName': sourceFastPolicyName})
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
@ -1162,9 +1161,9 @@ class VMAXCommon(object):
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
LOG.warning(_LW(
LOG.warning(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
"original pool %(sourcePoolInstanceName)s.",
{'volumeName': volumeName,
'sourcePoolInstanceName': sourcePoolInstanceName})
self._migrate_rollback(
@ -1194,7 +1193,7 @@ class VMAXCommon(object):
:param extraSpecs: extra specifications
"""
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
LOG.warning("_migrate_rollback on : %(volumeName)s.",
{'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
@ -1205,10 +1204,10 @@ class VMAXCommon(object):
conn, storageRelocationService, volumeInstance.path,
sourcePoolInstanceName, extraSpecs)
except Exception:
LOG.error(_LE(
LOG.error(
"Failed to return volume %(volumeName)s to "
"original storage pool. Please contact your system "
"administrator to return it to the correct location."),
"administrator to return it to the correct location.",
{'volumeName': volumeName})
if sourceFastPolicyName is not None:
@ -1230,7 +1229,7 @@ class VMAXCommon(object):
:returns: boolean -- True/False
"""
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
LOG.warning("_migrate_cleanup on : %(volumeName)s.",
{'volumeName': volumeName})
return_to_default = True
controllerConfigurationService = (
@ -1279,9 +1278,9 @@ class VMAXCommon(object):
:returns: boolean -- True/False
"""
falseRet = False
LOG.info(_LI(
LOG.info(
"Adding volume: %(volumeName)s to default storage group "
"for FAST policy: %(fastPolicyName)s."),
"for FAST policy: %(fastPolicyName)s.",
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
@ -1294,9 +1293,9 @@ class VMAXCommon(object):
self.conn, controllerConfigurationService,
targetFastPolicyName, volumeInstance, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Unable to create or get default storage group for FAST policy"
": %(fastPolicyName)s."),
": %(fastPolicyName)s.",
{'fastPolicyName': targetFastPolicyName})
return falseRet
@ -1306,9 +1305,9 @@ class VMAXCommon(object):
self.conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Failed to verify that volume was added to storage group for "
"FAST policy: %(fastPolicyName)s."),
"FAST policy: %(fastPolicyName)s.",
{'fastPolicyName': targetFastPolicyName})
return falseRet
@ -1348,9 +1347,9 @@ class VMAXCommon(object):
targetPoolInstanceName = self.utils.get_pool_by_name(
self.conn, targetPoolName, storageSystemName)
if targetPoolInstanceName is None:
LOG.error(_LE(
LOG.error(
"Error finding target pool instance name for pool: "
"%(targetPoolName)s."),
"%(targetPoolName)s.",
{'targetPoolName': targetPoolName})
return falseRet
try:
@ -1360,9 +1359,9 @@ class VMAXCommon(object):
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.exception(_LE(
LOG.exception(
"Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s."),
"to target pool %(targetPoolName)s.",
{'volumename': volumeName,
'targetPoolName': targetPoolName})
return falseRet
@ -1375,9 +1374,9 @@ class VMAXCommon(object):
if (foundPoolInstanceName is None or
(foundPoolInstanceName['InstanceID'] !=
targetPoolInstanceName['InstanceID'])):
LOG.error(_LE(
LOG.error(
"Volume : %(volumeName)s. was not successfully migrated to "
"target pool %(targetPoolName)s."),
"target pool %(targetPoolName)s.",
{'volumeName': volumeName,
'targetPoolName': targetPoolName})
return falseRet
@ -1427,10 +1426,10 @@ class VMAXCommon(object):
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"The volume: %(volumename)s "
"was not first part of the default storage "
"group for FAST policy %(fastPolicyName)s."),
"group for FAST policy %(fastPolicyName)s.",
{'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
@ -1455,10 +1454,10 @@ class VMAXCommon(object):
conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
LOG.error(
"Failed to add %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s."),
"%(fastPolicyName)s.",
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
@ -1483,7 +1482,7 @@ class VMAXCommon(object):
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE('Error getting array, pool, SLO and workload.'))
LOG.error('Error getting array, pool, SLO and workload.')
return falseRet
info = host['capabilities']['location_info']
@ -1496,24 +1495,24 @@ class VMAXCommon(object):
targetSlo = infoDetail[2]
targetWorkload = infoDetail[3]
except KeyError:
LOG.error(_LE("Error parsing array, pool, SLO and workload."))
LOG.error("Error parsing array, pool, SLO and workload.")
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
LOG.error(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s "
"skipping storage-assisted migration."),
"skipping storage-assisted migration.",
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
if targetPoolName not in sourcePoolName:
LOG.error(_LE(
LOG.error(
"Only SLO/workload migration within the same SRP Pool "
"is supported in this version "
"The source pool : %(sourcePoolName)s does not "
"match the target array: %(targetPoolName)s. "
"Skipping storage-assisted migration."),
"Skipping storage-assisted migration.",
{'sourcePoolName': sourcePoolName,
'targetPoolName': targetPoolName})
return falseRet
@ -1522,9 +1521,9 @@ class VMAXCommon(object):
self.utils.get_storage_group_from_volume(
self.conn, volumeInstanceName, sgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"Volume: %(volumeName)s is not currently "
"belonging to any storage group."),
"belonging to any storage group.",
{'volumeName': volumeName})
else:
@ -1539,10 +1538,10 @@ class VMAXCommon(object):
# Check if migration is from compression to non compression
# of vice versa
if not doChangeCompression:
LOG.error(_LE(
LOG.error(
"No action required. Volume: %(volumeName)s is "
"already part of slo/workload combination: "
"%(targetCombination)s."),
"%(targetCombination)s.",
{'volumeName': volumeName,
'targetCombination': targetCombination})
return falseRet
@ -1566,7 +1565,7 @@ class VMAXCommon(object):
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE("Error getting target pool name and array."))
LOG.error("Error getting target pool name and array.")
return falseRet
info = host['capabilities']['location_info']
@ -1578,14 +1577,14 @@ class VMAXCommon(object):
targetPoolName = infoDetail[1]
targetFastPolicy = infoDetail[2]
except KeyError:
LOG.error(_LE(
"Error parsing target pool name, array, and fast policy."))
LOG.error(
"Error parsing target pool name, array, and fast policy.")
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
LOG.error(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s, "
"skipping storage-assisted migration."),
"skipping storage-assisted migration.",
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
@ -1597,19 +1596,19 @@ class VMAXCommon(object):
assocPoolInstance = self.conn.GetInstance(
assocPoolInstanceName)
if assocPoolInstance['ElementName'] == targetPoolName:
LOG.error(_LE(
LOG.error(
"No action required. Volume: %(volumeName)s is "
"already part of pool: %(pool)s."),
"already part of pool: %(pool)s.",
{'volumeName': volumeName,
'pool': targetPoolName})
return falseRet
LOG.info(_LI("Volume status is: %s."), volumeStatus)
LOG.info("Volume status is: %s.", volumeStatus)
if (host['capabilities']['storage_protocol'] != self.protocol and
(volumeStatus != 'available' and volumeStatus != 'retyping')):
LOG.error(_LE(
LOG.error(
"Only available volumes can be migrated between "
"different protocols."))
"different protocols.")
return falseRet
return (True, targetPoolName, targetFastPolicy)
@ -1799,7 +1798,7 @@ class VMAXCommon(object):
foundVolumeinstance['ElementName']):
foundVolumeinstance = None
except Exception as e:
LOG.info(_LI("Exception in retrieving volume: %(e)s."),
LOG.info("Exception in retrieving volume: %(e)s.",
{'e': e})
foundVolumeinstance = None
@ -1944,9 +1943,9 @@ class VMAXCommon(object):
if not data:
if len(maskedvols) > 0:
data = maskedvols[0]
LOG.warning(_LW(
LOG.warning(
"Volume is masked but not to host %(host)s as is "
"expected. Assuming live migration."),
"expected. Assuming live migration.",
{'host': hoststr})
LOG.debug("Device info: %(data)s.", {'data': data})
@ -1982,15 +1981,15 @@ class VMAXCommon(object):
self.utils.get_target_endpoints(
self.conn, hardwareIdInstance))
if not targetEndpoints:
LOG.warning(_LW(
LOG.warning(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
"%(instance)s.",
{'instance': hardwareIdInstance})
continue
except Exception:
LOG.warning(_LW(
LOG.warning(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
"%(instance)s.",
{'instance': hardwareIdInstance}, exc_info=True)
continue
@ -2447,9 +2446,9 @@ class VMAXCommon(object):
volumeInstance.path, appendVolumeInstanceName, compositeType,
extraSpecs)
else:
LOG.error(_LE(
LOG.error(
"Unable to determine whether %(volumeName)s is "
"composite or not."),
"composite or not.",
{'volumeName': volumeName})
raise
@ -2497,9 +2496,9 @@ class VMAXCommon(object):
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
LOG.info(_LI(
LOG.info(
"Create a replica from Volume: Clone Volume: %(cloneName)s "
"Source Volume: %(sourceName)s."),
"Source Volume: %(sourceName)s.",
{'cloneName': cloneName,
'sourceName': sourceName})
@ -2555,8 +2554,8 @@ class VMAXCommon(object):
self.conn, sourceInstance))
if cloneVolume['size'] != old_size_gbs:
LOG.info(_LI("Extending clone %(cloneName)s to "
"%(newSize)d GBs"),
LOG.info("Extending clone %(cloneName)s to "
"%(newSize)d GBs",
{'cloneName': cloneName,
'newSize': cloneVolume['size']})
cloneInstance = self.utils.find_volume_instance(
@ -2638,9 +2637,9 @@ class VMAXCommon(object):
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE(
LOG.error(
"Volume %(name)s not found on the array. "
"No volume to delete."),
"No volume to delete.",
{'name': volumeName})
return errorRet
@ -2683,10 +2682,10 @@ class VMAXCommon(object):
self.masking.get_associated_masking_groups_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceNames:
LOG.warning(_LW(
LOG.warning(
"Pre check for deletion. "
"Volume: %(volumeName)s is part of a storage group. "
"Attempting removal from %(storageGroupInstanceNames)s."),
"Attempting removal from %(storageGroupInstanceNames)s.",
{'volumeName': volumeName,
'storageGroupInstanceNames': storageGroupInstanceNames})
for storageGroupInstanceName in storageGroupInstanceNames:
@ -2829,8 +2828,8 @@ class VMAXCommon(object):
# Delete the target device.
rc, snapshotname = self._delete_volume(snapshot, True, host)
LOG.info(_LI("Leaving delete_snapshot: %(ssname)s Return code: "
"%(rc)lu."),
LOG.info("Leaving delete_snapshot: %(ssname)s Return code: "
"%(rc)lu.",
{'ssname': snapshotname,
'rc': rc})
@ -2842,7 +2841,7 @@ class VMAXCommon(object):
:returns: dict -- modelUpdate = {'status': 'available'}
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Create Consistency Group: %(group)s."),
LOG.info("Create Consistency Group: %(group)s.",
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
@ -2876,7 +2875,7 @@ class VMAXCommon(object):
:returns: list -- list of volume objects
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Delete Consistency Group: %(group)s."),
LOG.info("Delete Consistency Group: %(group)s.",
{'group': group['id']})
modelUpdate = {}
@ -2894,7 +2893,7 @@ class VMAXCommon(object):
cgInstanceName, cgName = self._find_consistency_group(
replicationService, six.text_type(group['id']))
if cgInstanceName is None:
LOG.error(_LE("Cannot find CG group %(cgName)s."),
LOG.error("Cannot find CG group %(cgName)s.",
{'cgName': six.text_type(group['id'])})
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
volumes_model_update = self.utils.get_volume_model_updates(
@ -2980,9 +2979,9 @@ class VMAXCommon(object):
snapshots_model_update = []
LOG.info(_LI(
LOG.info(
"Create snapshot for Consistency Group %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
"cgsnapshotID: %(cgsnapshot)s.",
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
@ -3011,7 +3010,7 @@ class VMAXCommon(object):
interval_retries_dict)
targetCgInstanceName, targetCgName = self._find_consistency_group(
replicationService, cgsnapshot['id'])
LOG.info(_LI("Create target consistency group %(targetCg)s."),
LOG.info("Create target consistency group %(targetCg)s.",
{'targetCg': targetCgInstanceName})
for snapshot in snapshots:
@ -3135,9 +3134,9 @@ class VMAXCommon(object):
consistencyGroup = cgsnapshot.get('consistencygroup')
model_update = {}
snapshots_model_update = []
LOG.info(_LI(
LOG.info(
"Delete snapshot for source CG %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
"cgsnapshotID: %(cgsnapshot)s.",
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
@ -3278,9 +3277,9 @@ class VMAXCommon(object):
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy.
if extraSpecs[FASTPOLICY]:
LOG.info(_LI(
LOG.info(
"Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s."),
" for FAST policy: %(fastPolicyName)s.",
{'volumeName': volumeName,
'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
@ -3551,9 +3550,9 @@ class VMAXCommon(object):
storageSystemName = volumeInstance['SystemName']
if not isValid:
LOG.error(_LE(
LOG.error(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
"assisted migration using retype.",
{'name': volumeName})
return False
if volume['host'] != host['host'] or doChangeCompression:
@ -3601,9 +3600,9 @@ class VMAXCommon(object):
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"Volume : %(volumeName)s is not currently "
"belonging to any storage group."),
"belonging to any storage group.",
{'volumeName': volumeName})
else:
self.masking.remove_and_reset_members(
@ -3621,8 +3620,8 @@ class VMAXCommon(object):
poolName, targetSlo, targetWorkload, isCompressionDisabled,
storageSystemName, extraSpecs)
if targetSgInstanceName is None:
LOG.error(_LE(
"Failed to get or create storage group %(storageGroupName)s."),
LOG.error(
"Failed to get or create storage group %(storageGroupName)s.",
{'storageGroupName': storageGroupName})
return False
@ -3634,9 +3633,9 @@ class VMAXCommon(object):
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, storageGroupName))
if sgFromVolAddedInstanceName is None:
LOG.error(_LE(
LOG.error(
"Volume : %(volumeName)s has not been "
"added to target storage group %(storageGroup)s."),
"added to target storage group %(storageGroup)s.",
{'volumeName': volumeName,
'storageGroup': targetSgInstanceName})
return False
@ -3665,9 +3664,9 @@ class VMAXCommon(object):
volumeName, volumeStatus))
if not isValid:
LOG.error(_LE(
LOG.error(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
"assisted migration using retype.",
{'name': volumeName})
return False
if volume['host'] != host['host']:
@ -3718,10 +3717,10 @@ class VMAXCommon(object):
self.fast.get_capacities_associated_to_policy(
self.conn, arrayInfo['SerialNumber'],
arrayInfo['FastPolicy']))
LOG.info(_LI(
LOG.info(
"FAST: capacity stats for policy %(fastPolicyName)s on array "
"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
"free_capacity_gb=%(free_capacity_gb)lu.",
{'fastPolicyName': arrayInfo['FastPolicy'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
@ -3732,10 +3731,10 @@ class VMAXCommon(object):
self.utils.get_pool_capacities(self.conn,
arrayInfo['PoolName'],
arrayInfo['SerialNumber']))
LOG.info(_LI(
LOG.info(
"NON-FAST: capacity stats for pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
"free_capacity_gb=%(free_capacity_gb)lu.",
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
@ -3813,8 +3812,8 @@ class VMAXCommon(object):
sloFromExtraSpec = poolDetails[0]
workloadFromExtraSpec = poolDetails[1]
except KeyError:
LOG.error(_LE("Error parsing SLO, workload from "
"the provided extra_specs."))
LOG.error("Error parsing SLO, workload from "
"the provided extra_specs.")
else:
# Throw an exception as it is compulsory to have
# pool_name in the extra specs
@ -3904,10 +3903,10 @@ class VMAXCommon(object):
volumeInstance.path, volumeName, fastPolicyName,
extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"The volume: %(volumename)s. was not first part of the "
"default storage group for FAST policy %(fastPolicyName)s"
"."),
".",
{'volumename': volumeName,
'fastPolicyName': fastPolicyName})
# Check if it is part of another storage group.
@ -3946,12 +3945,12 @@ class VMAXCommon(object):
volumeInstance, volumeName, fastPolicyName,
extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
LOG.error(
"Failed to Roll back to re-add volume %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s. Please contact your sysadmin to "
"get the volume returned to the default "
"storage group."),
"storage group.",
{'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
@ -4208,8 +4207,8 @@ class VMAXCommon(object):
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs)
LOG.info(_LI("Snapshot creation %(cloneName)s completed. "
"Source Volume: %(sourceName)s."),
LOG.info("Snapshot creation %(cloneName)s completed. "
"Source Volume: %(sourceName)s.",
{'cloneName': cloneName,
'sourceName': sourceName})
@ -4246,8 +4245,8 @@ class VMAXCommon(object):
if mvInstanceName is not None:
targetWwns = self.masking.get_target_wwns(
self.conn, mvInstanceName)
LOG.info(_LI("Target wwns in masking view %(maskingView)s: "
"%(targetWwns)s."),
LOG.info("Target wwns in masking view %(maskingView)s: "
"%(targetWwns)s.",
{'maskingView': mvInstanceName,
'targetWwns': six.text_type(targetWwns)})
return targetWwns
@ -4347,9 +4346,9 @@ class VMAXCommon(object):
sourceInstance, extraSpecs, targetInstance, rsdInstance,
copyState))
except Exception:
LOG.warning(_LW(
LOG.warning(
"Clone failed on V3. Cleaning up the target volume. "
"Clone name: %(cloneName)s "),
"Clone name: %(cloneName)s ",
{'cloneName': cloneName})
if targetInstance:
self._cleanup_target(
@ -4361,7 +4360,7 @@ class VMAXCommon(object):
self.conn, job['Job'])
targetVolumeInstance = (
self.provisionv3.get_volume_from_job(self.conn, job['Job']))
LOG.info(_LI("The target instance device id is: %(deviceid)s."),
LOG.info("The target instance device id is: %(deviceid)s.",
{'deviceid': targetVolumeInstance['DeviceID']})
if not isSnapshot:
@ -4426,7 +4425,7 @@ class VMAXCommon(object):
replicationService, six.text_type(cgsnapshot['id']))
if cgInstanceName is None:
LOG.error(_LE("Cannot find CG group %(cgName)s."),
LOG.error("Cannot find CG group %(cgName)s.",
{'cgName': cgsnapshot['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
return modelUpdate, []
@ -4579,8 +4578,8 @@ class VMAXCommon(object):
# Manage existing volume is not supported if fast enabled.
if extraSpecs[FASTPOLICY]:
LOG.warning(_LW(
"FAST is enabled. Policy: %(fastPolicyName)s."),
LOG.warning(
"FAST is enabled. Policy: %(fastPolicyName)s.",
{'fastPolicyName': extraSpecs[FASTPOLICY]})
exceptionMessage = (_(
"Manage volume is not supported if FAST is enable. "
@ -4743,8 +4742,8 @@ class VMAXCommon(object):
:param remove_volumes: the volumes uuids you want to remove from
the CG
"""
LOG.info(_LI("Update Consistency Group: %(group)s. "
"This adds and/or removes volumes from a CG."),
LOG.info("Update Consistency Group: %(group)s. "
"This adds and/or removes volumes from a CG.",
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
@ -4780,7 +4779,7 @@ class VMAXCommon(object):
except exception.ConsistencyGroupNotFound:
raise
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
LOG.error("Exception: %(ex)s", {'ex': ex})
exceptionMessage = (_("Failed to update consistency group:"
" %(cgName)s.")
% {'cgName': group['id']})
@ -4799,7 +4798,7 @@ class VMAXCommon(object):
for volume in volumes:
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array."),
LOG.error("Volume %(name)s not found on the array.",
{'name': volume['name']})
else:
volumeInstanceNames.append(volumeInstance.path)
@ -5136,14 +5135,14 @@ class VMAXCommon(object):
extraSpecsDictList = []
isV3 = False
if isinstance(group, Group):
if isinstance(group, group_obj.Group):
for volume_type in group.volume_types:
extraSpecsDict, storageSystems, isV3 = (
self._update_extra_specs_list(
volume_type.extra_specs, len(group.volume_types),
volume_type.id))
extraSpecsDictList.append(extraSpecsDict)
elif isinstance(group, ConsistencyGroup):
elif isinstance(group, cg_obj.ConsistencyGroup):
volumeTypeIds = group.volume_type_id.split(",")
volumeTypeIds = list(filter(None, volumeTypeIds))
for volumeTypeId in volumeTypeIds:
@ -5321,7 +5320,7 @@ class VMAXCommon(object):
sourceVolume, sourceInstance, targetInstance, extraSpecs,
self.rep_config)
LOG.info(_LI('Successfully setup replication for %s.'),
LOG.info('Successfully setup replication for %s.',
sourceVolume['name'])
replication_status = REPLICATION_ENABLED
replication_driver_data = rdfDict['keybindings']
@ -5378,19 +5377,19 @@ class VMAXCommon(object):
self._cleanup_remote_target(
conn, repServiceInstanceName, sourceInstance,
targetInstance, extraSpecs, repExtraSpecs)
LOG.info(_LI('Successfully destroyed replication for '
'volume: %(volume)s'),
LOG.info('Successfully destroyed replication for '
'volume: %(volume)s',
{'volume': volumeName})
else:
LOG.warning(_LW('Replication target not found for '
'replication-enabled volume: %(volume)s'),
LOG.warning('Replication target not found for '
'replication-enabled volume: %(volume)s',
{'volume': volumeName})
except Exception as e:
LOG.error(_LE('Cannot get necessary information to cleanup '
LOG.error('Cannot get necessary information to cleanup '
'replication target for volume: %(volume)s. '
'The exception received was: %(e)s. Manual '
'clean-up may be required. Please contact '
'your administrator.'),
'your administrator.',
{'volume': volumeName, 'e': e})
def _cleanup_remote_target(
@ -5438,9 +5437,9 @@ class VMAXCommon(object):
:param volumeDict: the source volume dictionary
:param extraSpecs: the extra specifications
"""
LOG.warning(_LW(
LOG.warning(
"Replication failed. Cleaning up the source volume. "
"Volume name: %(sourceName)s "),
"Volume name: %(sourceName)s.",
{'sourceName': volumeName})
sourceInstance = self.utils.find_volume_instance(
conn, volumeDict, volumeName)
@ -5484,11 +5483,11 @@ class VMAXCommon(object):
repServiceInstanceName = self.utils.find_replication_service(
conn, storageSystem)
RDFGroupName = self.rep_config['rdf_group_label']
LOG.info(_LI("Replication group: %(RDFGroup)s."),
LOG.info("Replication group: %(RDFGroup)s.",
{'RDFGroup': RDFGroupName})
rdfGroupInstance = self.provisionv3.get_rdf_group_instance(
conn, repServiceInstanceName, RDFGroupName)
LOG.info(_LI("Found RDF group instance: %(RDFGroup)s."),
LOG.info("Found RDF group instance: %(RDFGroup)s.",
{'RDFGroup': rdfGroupInstance})
if rdfGroupInstance is None:
exception_message = (_("Cannot find replication group: "
@ -5597,11 +5596,10 @@ class VMAXCommon(object):
rep_data = six.text_type(replication_driver_data)
except Exception as ex:
msg = _LE(
LOG.error(
'Failed to failover volume %(volume_id)s. '
'Error: %(error)s.')
LOG.error(msg, {'volume_id': vol['id'],
'error': ex}, )
'Error: %(error)s.',
{'volume_id': vol['id'], 'error': ex})
new_status = FAILOVER_ERROR
model_update = {'volume_id': vol['id'],
@ -5628,7 +5626,7 @@ class VMAXCommon(object):
recovery = self.recover_volumes_on_failback(volume)
volume_update_list.append(recovery)
LOG.info(_LI("Failover host complete"))
LOG.info("Failover host complete")
return secondary_id, volume_update_list
@ -5733,24 +5731,24 @@ class VMAXCommon(object):
targetVolumeInstance, volumeName, repExtraSpecs,
None, False)
LOG.info(_LI("Breaking replication relationship..."))
LOG.info("Breaking replication relationship...")
self.break_rdf_relationship(
self.conn, repServiceInstanceName,
storageSynchronizationSv, extraSpecs)
# extend the source volume
LOG.info(_LI("Extending source volume..."))
LOG.info("Extending source volume...")
rc, volumeDict = self._extend_v3_volume(
volumeInstance, volumeName, newSize, extraSpecs)
# extend the target volume
LOG.info(_LI("Extending target volume..."))
LOG.info("Extending target volume...")
self._extend_v3_volume(targetVolumeInstance, volumeName,
newSize, repExtraSpecs)
# re-create replication relationship
LOG.info(_LI("Recreating replication relationship..."))
LOG.info("Recreating replication relationship...")
self.setup_volume_replication(
self.conn, volume, volumeDict,
extraSpecs, targetVolumeInstance)
@ -5826,9 +5824,9 @@ class VMAXCommon(object):
except Exception as e:
LOG.warning(
_LW("Remote replication failed. Cleaning up the target "
"Remote replication failed. Cleaning up the target "
"volume and returning source volume to default storage "
"group. Volume name: %(cloneName)s "),
"group. Volume name: %(cloneName)s ",
{'cloneName': volumeName})
self._cleanup_remote_target(
@ -5958,10 +5956,10 @@ class VMAXCommon(object):
extraSpecs[WORKLOAD])
except Exception:
LOG.warning(
_LW("The target array does not support the storage "
"The target array does not support the storage "
"pool setting for SLO %(slo)s or workload "
"%(workload)s. Not assigning any SLO or "
"workload."),
"workload.",
{'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
repExtraSpecs[SLO] = None
@ -5969,9 +5967,9 @@ class VMAXCommon(object):
repExtraSpecs[WORKLOAD] = None
else:
LOG.warning(_LW("Cannot determine storage pool settings of "
LOG.warning("Cannot determine storage pool settings of "
"target array. Not assigning any SLO or "
"workload"))
"workload")
repExtraSpecs[SLO] = None
if extraSpecs[WORKLOAD]:
repExtraSpecs[WORKLOAD] = None
@ -6004,9 +6002,9 @@ class VMAXCommon(object):
arrayInfo['Workload'])
except Exception:
LOG.info(
_LI("The target array does not support the storage "
"The target array does not support the storage "
"pool setting for SLO %(slo)s or workload "
"%(workload)s. SLO stats will not be reported."),
"%(workload)s. SLO stats will not be reported.",
{'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
secondaryInfo['SLO'] = None
@ -6016,8 +6014,8 @@ class VMAXCommon(object):
self.multiPoolSupportEnabled = False
else:
LOG.info(_LI("Cannot determine storage pool settings of "
"target array. SLO stats will not be reported."))
LOG.info("Cannot determine storage pool settings of "
"target array. SLO stats will not be reported.")
secondaryInfo['SLO'] = None
if arrayInfo['Workload']:
secondaryInfo['Workload'] = None

View File

@ -16,7 +16,7 @@
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import provision
from cinder.volume.drivers.dell_emc.vmax import utils
@ -50,12 +50,11 @@ class VMAXFast(object):
isTieringPolicySupported = self.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
if isTieringPolicySupported is None:
LOG.error(_LE("Cannot determine whether "
"Tiering Policy is supported on this array."))
LOG.error("Cannot determine whether "
"Tiering Policy is supported on this array.")
if isTieringPolicySupported is False:
LOG.error(_LE("Tiering Policy is not "
"supported on this array."))
LOG.error("Tiering Policy is not supported on this array.")
return isTieringPolicySupported
def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName):
@ -87,8 +86,8 @@ class VMAXFast(object):
break
if foundIsSupportsTieringPolicies is None:
LOG.error(_LE("Cannot determine if Tiering Policies "
"are supported."))
LOG.error("Cannot determine if Tiering Policies "
"are supported.")
return foundIsSupportsTieringPolicies
@ -113,8 +112,7 @@ class VMAXFast(object):
conn, controllerConfigService)
if not self._check_if_fast_supported(conn, storageSystemInstanceName):
LOG.error(_LE(
"FAST is not supported on this array."))
LOG.error("FAST is not supported on this array.")
raise
defaultSgName = self.format_default_sg_string(fastPolicyName)
@ -127,9 +125,9 @@ class VMAXFast(object):
controllerConfigService,
defaultSgName))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Unable to find default storage group "
"for FAST policy : %(fastPolicyName)s."),
"for FAST policy : %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
raise
@ -137,9 +135,9 @@ class VMAXFast(object):
foundDefaultStorageGroupInstanceName = (
assocStorageGroupInstanceName)
else:
LOG.warning(_LW(
LOG.warning(
"Volume: %(volumeName)s Does not belong "
"to storage group %(defaultSgName)s."),
"to storage group %(defaultSgName)s.",
{'volumeName': volumeName,
'defaultSgName': defaultSgName})
return foundDefaultStorageGroupInstanceName, defaultSgName
@ -177,8 +175,8 @@ class VMAXFast(object):
storageGroupInstanceName = self.utils.find_storage_masking_group(
conn, controllerConfigService, defaultSgName)
if storageGroupInstanceName is None:
LOG.error(_LE(
"Unable to get default storage group %(defaultSgName)s."),
LOG.error(
"Unable to get default storage group %(defaultSgName)s.",
{'defaultSgName': defaultSgName})
return failedRet
@ -214,9 +212,9 @@ class VMAXFast(object):
firstVolumeInstance = self._create_volume_for_default_volume_group(
conn, controllerConfigService, volumeInstance.path, extraSpecs)
if firstVolumeInstance is None:
LOG.error(_LE(
LOG.error(
"Failed to create a first volume for storage "
"group : %(storageGroupName)s."),
"group : %(storageGroupName)s.",
{'storageGroupName': storageGroupName})
return failedRet
@ -225,9 +223,9 @@ class VMAXFast(object):
conn, controllerConfigService, storageGroupName,
firstVolumeInstance.path, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Failed to create default storage group for "
"FAST policy : %(fastPolicyName)s."),
"FAST policy : %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
@ -240,9 +238,9 @@ class VMAXFast(object):
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
conn, tierPolicyServiceInstanceName, fastPolicyName)
if tierPolicyRuleInstanceName is None:
LOG.error(_LE(
LOG.error(
"Unable to get policy rule for fast policy: "
"%(fastPolicyName)s."),
"%(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
@ -280,7 +278,7 @@ class VMAXFast(object):
poolInstanceName = self.utils.get_assoc_pool_from_volume(
conn, volumeInstanceName)
if poolInstanceName is None:
LOG.error(_LE("Unable to get associated pool of volume."))
LOG.error("Unable to get associated pool of volume.")
return failedRet
volumeName = 'vol1'
@ -408,8 +406,8 @@ class VMAXFast(object):
if len(storageTierInstanceNames) == 0:
storageTierInstanceNames = None
LOG.warning(_LW(
"Unable to get storage tiers from tier policy rule."))
LOG.warning(
"Unable to get storage tiers from tier policy rule.")
return storageTierInstanceNames
@ -503,8 +501,8 @@ class VMAXFast(object):
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
conn, tierPolicyServiceInstanceName, fastPolicyName)
if tierPolicyRuleInstanceName is None:
LOG.error(_LE(
"Cannot find the fast policy %(fastPolicyName)s."),
LOG.error(
"Cannot find the fast policy %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
else:
@ -521,9 +519,9 @@ class VMAXFast(object):
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs)
except Exception:
LOG.exception(_LE(
LOG.exception(
"Failed to add storage group %(storageGroupInstanceName)s "
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
"to tier policy rule %(tierPolicyRuleInstanceName)s.",
{'storageGroupInstanceName': storageGroupInstanceName,
'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
return failedRet
@ -588,15 +586,15 @@ class VMAXFast(object):
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
LOG.error(_LE("Error disassociating storage group from "
"policy: %s."), errordesc)
LOG.error("Error disassociating storage group from "
"policy: %s.", errordesc)
else:
LOG.debug("Disassociated storage group from policy.")
else:
LOG.debug("ModifyStorageTierPolicyRule completed.")
except Exception as e:
LOG.info(_LI("Storage group not associated with the "
"policy. Exception is %s."), e)
LOG.info("Storage group not associated with the "
"policy. Exception is %s.", e)
def get_pool_associated_to_policy(
self, conn, fastPolicyName, arraySN,
@ -664,7 +662,7 @@ class VMAXFast(object):
isTieringPolicySupported = self.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
except Exception as e:
LOG.error(_LE("Exception: %s."), e)
LOG.error("Exception: %s.", e)
return False
return isTieringPolicySupported

View File

@ -18,7 +18,6 @@ import ast
from oslo_log import log as logging
import six
from cinder.i18n import _LW
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.vmax import common
@ -274,7 +273,7 @@ class VMAXFCDriver(driver.FibreChannelDriver):
'target_wwns': target_wwns,
'init_targ_map': init_targ_map}
else:
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
LOG.warning("Volume %(volume)s is not in any masking view.",
{'volume': volume['name']})
return zoning_mappings

View File

@ -30,7 +30,7 @@ import six
from six.moves import http_client
from six.moves import urllib
from cinder.i18n import _, _LI
from cinder.i18n import _
# Handle case where we are running in a monkey patched environment
if OpenSSL and patcher.is_monkey_patched('socket'):
@ -94,9 +94,9 @@ class HTTPSConnection(http_client.HTTPSConnection):
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, ca_certs=None, no_verification=False):
if not pywbemAvailable:
LOG.info(_LI(
LOG.info(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.'))
'Install PyWBEM using the python-pywbem package.')
if six.PY3:
excp_lst = (TypeError, ssl.SSLError)
else:

View File

@ -20,7 +20,7 @@ from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.vmax import common
@ -209,7 +209,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
iscsi_properties = self.smis_get_iscsi_properties(
volume, connector, ip_and_iqn, is_multipath)
LOG.info(_LI("iSCSI properties are: %s"), iscsi_properties)
LOG.info("iSCSI properties are: %s", iscsi_properties)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
@ -246,7 +246,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
isError = True
if isError:
LOG.error(_LE("Unable to get the lun id"))
LOG.error("Unable to get the lun id")
exception_message = (_("Cannot find device number for volume "
"%(volumeName)s.")
% {'volumeName': volume['name']})
@ -265,15 +265,14 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
LOG.info(_LI(
"ISCSI properties: %(properties)s."), {'properties': properties})
LOG.info(_LI(
"ISCSI volume is: %(volume)s."), {'volume': volume})
LOG.info(
"ISCSI properties: %(properties)s.", {'properties': properties})
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
if 'provider_auth' in volume:
auth = volume['provider_auth']
LOG.info(_LI(
"AUTH properties: %(authProps)s."), {'authProps': auth})
LOG.info(
"AUTH properties: %(authProps)s.", {'authProps': auth})
if auth is not None:
(auth_method, auth_username, auth_secret) = auth.split()
@ -282,7 +281,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
LOG.info(_LI("AUTH properties: %s."), properties)
LOG.info("AUTH properties: %s.", properties)
return properties

View File

@ -18,7 +18,7 @@ import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import fast
from cinder.volume.drivers.dell_emc.vmax import provision
from cinder.volume.drivers.dell_emc.vmax import provision_v3
@ -125,10 +125,10 @@ class VMAXMasking(object):
{'maskingViewInstanceName': maskingViewInstanceName,
'storageGroupInstanceName': storageGroupInstanceName})
except Exception as e:
LOG.exception(_LE(
LOG.exception(
"Masking View creation or retrieval was not successful "
"for masking view %(maskingViewName)s. "
"Attempting rollback."),
"Attempting rollback.",
{'maskingViewName': maskingViewDict['maskingViewName']})
errorMessage = e
@ -225,9 +225,9 @@ class VMAXMasking(object):
volumeName, maskingviewdict,
defaultStorageGroupInstanceName)
else:
LOG.warning(_LW(
LOG.warning(
"Volume: %(volumeName)s does not belong "
"to storage group %(defaultSgGroupName)s."),
"to storage group %(defaultSgGroupName)s.",
{'volumeName': volumeName,
'defaultSgGroupName': defaultSgGroupName})
return defaultStorageGroupInstanceName
@ -283,8 +283,7 @@ class VMAXMasking(object):
storageSystemName = maskingViewDict['storageSystemName']
maskingViewName = maskingViewDict['maskingViewName']
pgGroupName = maskingViewDict['pgGroupName']
LOG.info(_LI("Returning random Port Group: "
"%(portGroupName)s."),
LOG.info("Returning random Port Group: %(portGroupName)s.",
{'portGroupName': pgGroupName})
storageGroupInstanceName, errorMessage = (
@ -376,7 +375,7 @@ class VMAXMasking(object):
self._get_storage_group_instance_name(
conn, maskingViewDict, storageGroupInstanceName))
if storageGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Cannot get or create a storage group: %(sgGroupName)s"
" for volume %(volumeName)s ") %
@ -404,7 +403,7 @@ class VMAXMasking(object):
conn, maskingViewInstanceName))
if sgFromMvInstanceName is None:
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Cannot get storage group: %(sgGroupName)s "
"from masking view %(maskingViewInstanceName)s. ") %
@ -427,7 +426,7 @@ class VMAXMasking(object):
portGroupInstanceName = self._get_port_group_instance_name(
conn, controllerConfigService, pgGroupName)
if portGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Cannot get port group: %(pgGroupName)s. ") %
{'pgGroupName': pgGroupName})
@ -455,7 +454,7 @@ class VMAXMasking(object):
conn, controllerConfigService, igGroupName, connector,
storageSystemName, extraSpecs))
if initiatorGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Cannot get or create initiator group: "
"%(igGroupName)s. ") %
@ -486,7 +485,7 @@ class VMAXMasking(object):
conn, controllerConfigService, maskingViewName,
connector, storageSystemName, igGroupName,
extraSpecs):
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Unable to verify initiator group: %(igGroupName)s "
"in masking view %(maskingViewName)s. ") %
@ -518,7 +517,7 @@ class VMAXMasking(object):
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName, extraSpecs))
if maskingViewInstanceName is None:
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Cannot create masking view: %(maskingViewName)s. ") %
{'maskingViewName': maskingViewName})
@ -543,9 +542,9 @@ class VMAXMasking(object):
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance, sgGroupName):
LOG.warning(_LW(
LOG.warning(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s."),
"of storage group %(sgGroupName)s.",
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
else:
@ -576,7 +575,7 @@ class VMAXMasking(object):
volumeInstance, volumeName, sgGroupName, extraSpecs)
if not self._is_volume_in_storage_group(
conn, storageGroupInstanceName, volumeInstance, sgGroupName):
# This may be used in exception hence _ instead of _LE.
# This may be used in exception hence the use of _.
msg = (_(
"Volume: %(volumeName)s was not added "
"to storage group %(sgGroupName)s.") %
@ -584,8 +583,7 @@ class VMAXMasking(object):
'sgGroupName': sgGroupName})
LOG.error(msg)
else:
LOG.info(_LI("Successfully added %(volumeName)s to "
"%(sgGroupName)s."),
LOG.info("Successfully added %(volumeName)s to %(sgGroupName)s.",
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
return msg
@ -742,9 +740,9 @@ class VMAXMasking(object):
conn, foundMaskingViewInstanceName)
if instance is None:
foundMaskingViewInstanceName = None
LOG.error(_LE(
LOG.error(
"Looks like masking view: %(maskingViewName)s "
"has recently been deleted."),
"has recently been deleted.",
{'maskingViewName': maskingViewName})
else:
LOG.debug(
@ -800,21 +798,21 @@ class VMAXMasking(object):
storageGroupName, fastPolicyName,
maskingViewDict['extraSpecs']))
if assocTierPolicyInstanceName is None:
LOG.error(_LE(
LOG.error(
"Cannot add and verify tier policy association for "
"storage group : %(storageGroupName)s to "
"FAST policy : %(fastPolicyName)s."),
"FAST policy : %(fastPolicyName)s.",
{'storageGroupName': storageGroupName,
'fastPolicyName': fastPolicyName})
return failedRet
if foundStorageGroupInstanceName is None:
LOG.error(_LE(
"Cannot get storage Group from job : %(storageGroupName)s."),
LOG.error(
"Cannot get storage Group from job : %(storageGroupName)s.",
{'storageGroupName': storageGroupName})
return failedRet
else:
LOG.info(_LI(
"Created new storage group: %(storageGroupName)s."),
LOG.info(
"Created new storage group: %(storageGroupName)s.",
{'storageGroupName': storageGroupName})
return foundStorageGroupInstanceName
@ -843,9 +841,9 @@ class VMAXMasking(object):
break
if foundPortGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Could not find port group : %(portGroupName)s. Check that "
"the EMC configuration file has the correct port group name."),
"the EMC configuration file has the correct port group name.",
{'portGroupName': portGroupName})
return foundPortGroupInstanceName
@ -886,9 +884,9 @@ class VMAXMasking(object):
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.info(_LI(
LOG.info(
"Initiator Name(s) %(initiatorNames)s are not on array "
"%(storageSystemName)s."),
"%(storageSystemName)s.",
{'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
storageHardwareIDInstanceNames = (
@ -905,14 +903,12 @@ class VMAXMasking(object):
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames, extraSpecs)
LOG.info(_LI(
"Created new initiator group name: %(igGroupName)s."),
LOG.info("Created new initiator group name: %(igGroupName)s.",
{'igGroupName': igGroupName})
else:
initiatorGroupInstance = conn.GetInstance(
foundInitiatorGroupInstanceName, LocalOnly=False)
LOG.info(_LI(
"Using existing initiator group name: %(igGroupName)s."),
LOG.info("Using existing initiator group name: %(igGroupName)s.",
{'igGroupName': initiatorGroupInstance['ElementName']})
return foundInitiatorGroupInstanceName
@ -1100,8 +1096,7 @@ class VMAXMasking(object):
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.info(_LI(
"Created new masking view : %(maskingViewName)s."),
LOG.info("Created new masking view : %(maskingViewName)s.",
{'maskingViewName': maskingViewName})
return rc, job
@ -1148,7 +1143,7 @@ class VMAXMasking(object):
{'view': maskingViewName,
'masking': foundStorageGroupInstanceName})
else:
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
LOG.warning("Unable to find Masking view: %(view)s.",
{'view': maskingViewName})
return foundStorageGroupInstanceName
@ -1221,14 +1216,14 @@ class VMAXMasking(object):
foundPortGroupInstanceName = self.find_port_group(
conn, controllerConfigService, pgGroupName)
if foundPortGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Cannot find a portGroup with name %(pgGroupName)s. "
"The port group for a masking view must be pre-defined."),
"The port group for a masking view must be pre-defined.",
{'pgGroupName': pgGroupName})
return foundPortGroupInstanceName
LOG.info(_LI(
"Port group instance name is %(foundPortGroupInstanceName)s."),
LOG.info(
"Port group instance name is %(foundPortGroupInstanceName)s.",
{'foundPortGroupInstanceName': foundPortGroupInstanceName})
return foundPortGroupInstanceName
@ -1250,9 +1245,8 @@ class VMAXMasking(object):
conn, controllerConfigService, igGroupName, connector,
storageSystemName, extraSpecs))
if foundInitiatorGroupInstanceName is None:
LOG.error(_LE(
"Cannot create or find an initiator group with "
"name %(igGroupName)s."),
LOG.error("Cannot create or find an initiator group with "
"name %(igGroupName)s.",
{'igGroupName': igGroupName})
return foundInitiatorGroupInstanceName
@ -1278,9 +1272,9 @@ class VMAXMasking(object):
initiatorGroupInstanceName, extraSpecs))
foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
if foundMaskingViewInstanceName is None:
LOG.error(_LE(
LOG.error(
"Cannot find the new masking view just created with name "
"%(maskingViewName)s."),
"%(maskingViewName)s.",
{'maskingViewName': maskingViewName})
return foundMaskingViewInstanceName
@ -1324,11 +1318,11 @@ class VMAXMasking(object):
LOG.error(errorMessage)
message = (_("V3 rollback"))
else:
LOG.warning(_LW(
LOG.warning(
"No storage group found. "
"Performing rollback on Volume: %(volumeName)s "
"To return it to the default storage group for FAST "
"policy %(fastPolicyName)s."),
"policy %(fastPolicyName)s.",
{'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
assocDefaultStorageGroupName = (
@ -1341,20 +1335,19 @@ class VMAXMasking(object):
rollbackDict['fastPolicyName'],
rollbackDict['extraSpecs']))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
LOG.error(
"Failed to Roll back to re-add volume "
"%(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s: Please contact your sys "
"admin to get the volume re-added manually."),
"admin to get the volume re-added manually.",
{'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
message = (_("V2 rollback, volume is not in any storage "
"group."))
else:
LOG.info(_LI(
"The storage group found is "
"%(foundStorageGroupInstanceName)s."),
LOG.info("The storage group found is "
"%(foundStorageGroupInstanceName)s.",
{'foundStorageGroupInstanceName':
foundStorageGroupInstanceName})
@ -1422,7 +1415,7 @@ class VMAXMasking(object):
{'view': maskingViewName,
'masking': foundInitiatorMaskingGroupInstanceName})
else:
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
LOG.warning("Unable to find Masking view: %(view)s.",
{'view': maskingViewName})
return foundInitiatorMaskingGroupInstanceName
@ -1471,18 +1464,18 @@ class VMAXMasking(object):
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.info(_LI(
LOG.info(
"Initiator Name(s) %(initiatorNames)s are not on "
"array %(storageSystemName)s. "),
"array %(storageSystemName)s.",
{'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
storageHardwareIDInstanceNames = (
self._create_hardware_ids(conn, initiatorNames,
storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.error(_LE(
LOG.error(
"Failed to create hardware id(s) on "
"%(storageSystemName)s."),
"%(storageSystemName)s.",
{'storageSystemName': storageSystemName})
return False
@ -1532,11 +1525,11 @@ class VMAXMasking(object):
"%(maskingViewName)s.",
{'maskingViewName': maskingViewName})
else:
LOG.error(_LE(
LOG.error(
"One of the components of the original masking view "
"%(maskingViewName)s cannot be retrieved so "
"please contact your system administrator to check "
"that the correct initiator(s) are part of masking."),
"that the correct initiator(s) are part of masking.",
{'maskingViewName': maskingViewName})
return False
return True
@ -1708,9 +1701,9 @@ class VMAXMasking(object):
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
LOG.info(_LI(
LOG.info(
"Added volume: %(volumeName)s to existing storage group "
"%(sgGroupName)s."),
"%(sgGroupName)s.",
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
@ -1737,9 +1730,9 @@ class VMAXMasking(object):
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"Volume %(volumeName)s was not first part of the default "
"storage group for the FAST Policy."),
"storage group for the FAST Policy.",
{'volumeName': volumeName})
return failedRet
@ -1775,9 +1768,9 @@ class VMAXMasking(object):
defaultSgName))
if emptyStorageGroupInstanceName is not None:
LOG.error(_LE(
LOG.error(
"Failed to remove %(volumeName)s from the default storage "
"group for the FAST Policy."),
"group for the FAST Policy.",
{'volumeName': volumeName})
return failedRet
@ -1833,7 +1826,7 @@ class VMAXMasking(object):
if len(maskingGroupInstanceNames) > 0:
return maskingGroupInstanceNames
else:
LOG.info(_LI("Volume %(volumeName)s not in any storage group."),
LOG.info("Volume %(volumeName)s not in any storage group.",
{'volumeName': volumeInstanceName})
return None
@ -1870,7 +1863,7 @@ class VMAXMasking(object):
storageGroupInstanceName,
volumeInstance, extraSpecs)
else:
LOG.warning(_LW("Cannot get storage from connector."))
LOG.warning("Cannot get storage from connector.")
if reset:
self._return_back_to_default_sg(
@ -1895,8 +1888,8 @@ class VMAXMasking(object):
if storageGroupInstanceNames:
sgNum = len(storageGroupInstanceNames)
if len(storageGroupInstanceNames) > 1:
LOG.warning(_LW("Volume %(volumeName)s is belong to "
"%(sgNum)s storage groups."),
LOG.warning("Volume %(volumeName)s is belong to %(sgNum)s "
"storage groups.",
{'volumeName': volumeInstance['ElementName'],
'sgNum': sgNum})
for storageGroupInstanceName in storageGroupInstanceNames:
@ -2237,8 +2230,8 @@ class VMAXMasking(object):
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.info(_LI(
"Masking view %(maskingViewName)s successfully deleted."),
LOG.info(
"Masking view %(maskingViewName)s successfully deleted.",
{'maskingViewName': maskingViewName})
def _get_and_remove_rule_association(
@ -2355,8 +2348,8 @@ class VMAXMasking(object):
ResultClass='Symm_FCSCSIProtocolEndpoint')
numberOfPorts = len(targetPortInstanceNames)
if numberOfPorts <= 0:
LOG.warning(_LW("No target ports found in "
"masking view %(maskingView)s."),
LOG.warning("No target ports found in "
"masking view %(maskingView)s.",
{'numPorts': len(targetPortInstanceNames),
'maskingView': mvInstanceName})
for targetPortInstanceName in targetPortInstanceNames:
@ -2425,7 +2418,7 @@ class VMAXMasking(object):
'mv': maskingViewInstanceName})
return portGroupInstanceNames[0]
else:
LOG.warning(_LW("No port group found in masking view %(mv)s."),
LOG.warning("No port group found in masking view %(mv)s.",
{'mv': maskingViewInstanceName})
def get_initiator_group_from_masking_view(
@ -2444,8 +2437,8 @@ class VMAXMasking(object):
'mv': maskingViewInstanceName})
return initiatorGroupInstanceNames[0]
else:
LOG.warning(_LW("No Initiator group found in masking view "
"%(mv)s."), {'mv': maskingViewInstanceName})
LOG.warning("No Initiator group found in masking view "
"%(mv)s.", {'mv': maskingViewInstanceName})
def _get_sg_or_mv_associated_with_initiator(
self, conn, controllerConfigService, volumeInstanceName,
@ -2656,8 +2649,8 @@ class VMAXMasking(object):
LOG.debug("Deletion of initiator path %(hardwareIdPath)s "
"is successful.", {'hardwareIdPath': hardwareIdPath})
else:
LOG.warning(_LW("Deletion of initiator path %(hardwareIdPath)s "
"is failed."), {'hardwareIdPath': hardwareIdPath})
LOG.warning("Deletion of initiator path %(hardwareIdPath)s "
"is failed.", {'hardwareIdPath': hardwareIdPath})
def _delete_initiators_from_initiator_group(self, conn,
controllerConfigService,
@ -2740,16 +2733,16 @@ class VMAXMasking(object):
initiatorGroupInstanceName,
initiatorGroupName, extraSpecs)
else:
LOG.warning(_LW("Initiator group %(initiatorGroupName)s is "
LOG.warning("Initiator group %(initiatorGroupName)s is "
"associated with masking views and can't be "
"deleted. Number of associated masking view "
"is: %(nmv)d."),
"is: %(nmv)d.",
{'initiatorGroupName': initiatorGroupName,
'nmv': len(maskingViewInstanceNames)})
else:
LOG.warning(_LW("Initiator group %(initiatorGroupName)s was "
LOG.warning("Initiator group %(initiatorGroupName)s was "
"not created by the VMAX driver so will "
"not be deleted by the VMAX driver."),
"not be deleted by the VMAX driver.",
{'initiatorGroupName': initiatorGroupName})
def _create_hardware_ids(
@ -2793,9 +2786,9 @@ class VMAXMasking(object):
self._get_port_group_from_masking_view(
conn, maskingViewName, storageSystemName))
if portGroupInstanceName is None:
LOG.error(_LE(
LOG.error(
"Cannot get port group from masking view: "
"%(maskingViewName)s. "),
"%(maskingViewName)s.",
{'maskingViewName': maskingViewName})
else:
try:
@ -2804,8 +2797,8 @@ class VMAXMasking(object):
portGroupName = (
portGroupInstance['ElementName'])
except Exception:
LOG.error(_LE(
"Cannot get port group name."))
LOG.error(
"Cannot get port group name.")
return portGroupName, errorMessage
@coordination.synchronized('emc-sg-'

View File

@ -20,7 +20,7 @@ import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
@ -515,9 +515,9 @@ class VMAXProvisionV3(object):
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
LOG.error(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
"Return code: %(rc)lu. Error: %(error)s.",
{'groupName': groupName,
'rc': rc,
'error': errordesc})
@ -863,11 +863,11 @@ class VMAXProvisionV3(object):
remainingCapacityGb = remainingSLOCapacityGb
wlpEnabled = True
else:
LOG.warning(_LW(
LOG.warning(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
"not be what you expect.",
{'remainingCapacityGb': remainingCapacityGb})
return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb,

View File

@ -30,7 +30,7 @@ import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import volume_types
@ -85,9 +85,9 @@ class VMAXUtils(object):
def __init__(self, prtcl):
if not pywbemAvailable:
LOG.info(_LI(
LOG.info(
"Module PyWBEM not installed. "
"Install PyWBEM using the python-pywbem package."))
"Install PyWBEM using the python-pywbem package.")
self.protocol = prtcl
def find_storage_configuration_service(self, conn, storageSystemName):
@ -319,9 +319,8 @@ class VMAXUtils(object):
if retries > maxJobRetries:
kwargs['rc'], kwargs['errordesc'] = (
self._verify_job_state(conn, job))
LOG.error(_LE("_wait_for_job_complete "
"failed after %(retries)d "
"tries."),
LOG.error("_wait_for_job_complete failed after %(retries)d "
"tries.",
{'retries': retries})
raise loopingcall.LoopingCallDone()
@ -457,8 +456,7 @@ class VMAXUtils(object):
raise exception.VolumeBackendAPIException(exceptionMessage)
if kwargs['retries'] > maxJobRetries:
LOG.error(_LE("_wait_for_sync failed after %(retries)d "
"tries."),
LOG.error("_wait_for_sync failed after %(retries)d tries.",
{'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=maxJobRetries)
if kwargs['wait_for_sync_called']:
@ -526,7 +524,7 @@ class VMAXUtils(object):
if len(groups) > 0:
foundStorageSystemInstanceName = groups[0]
else:
LOG.error(_LE("Cannot get storage system."))
LOG.error("Cannot get storage system.")
raise
return foundStorageSystemInstanceName
@ -549,9 +547,9 @@ class VMAXUtils(object):
ResultClass='CIM_DeviceMaskingGroup')
if len(storageGroupInstanceNames) > 1:
LOG.info(_LI(
LOG.info(
"The volume belongs to more than one storage group. "
"Returning storage group %(sgName)s."),
"Returning storage group %(sgName)s.",
{'sgName': sgName})
for storageGroupInstanceName in storageGroupInstanceNames:
instance = self.get_existing_instance(
@ -1001,9 +999,9 @@ class VMAXUtils(object):
poolInstanceName = self.get_pool_by_name(
conn, poolName, storageSystemName)
if poolInstanceName is None:
LOG.error(_LE(
LOG.error(
"Unable to retrieve pool instance of %(poolName)s on "
"array %(array)s."),
"array %(array)s.",
{'poolName': poolName, 'array': storageSystemName})
return (0, 0)
storagePoolInstance = conn.GetInstance(
@ -1241,7 +1239,7 @@ class VMAXUtils(object):
infoDetail = host.split('@')
storageSystem = 'SYMMETRIX+' + infoDetail[0]
except Exception:
LOG.error(_LE("Error parsing array from host capabilities."))
LOG.error("Error parsing array from host capabilities.")
return storageSystem
@ -1292,15 +1290,15 @@ class VMAXUtils(object):
if foundSyncInstanceName:
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
if waitforsync:
LOG.warning(_LW(
LOG.warning(
"Expect a performance hit as volume is not fully "
"synced on %(deviceId)s."),
"synced on %(deviceId)s.",
{'deviceId': volumeInstance['DeviceID']})
startTime = time.time()
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
LOG.warning(_LW(
LOG.warning(
"Synchronization process took "
"took: %(delta)s H:MM:SS."),
"took: %(delta)s H:MM:SS.",
{'delta': self.get_time_delta(startTime,
time.time())})
@ -1336,9 +1334,9 @@ class VMAXUtils(object):
break
if foundSyncInstanceName is None:
LOG.warning(_LW(
LOG.warning(
"Group sync name not found for target group %(target)s "
"on %(storageSystem)s."),
"on %(storageSystem)s.",
{'target': targetRgInstanceName['InstanceID'],
'storageSystem': storageSystem})
else:
@ -1570,14 +1568,14 @@ class VMAXUtils(object):
break
if not isValidSLO:
LOG.error(_LE(
LOG.error(
"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, "
"Gold, Platinum, Diamond, Optimized, NONE."), {'slo': slo})
"Gold, Platinum, Diamond, Optimized, NONE.", {'slo': slo})
if not isValidWorkload:
LOG.error(_LE(
LOG.error(
"Workload: %(workload)s is not valid. Valid values are "
"DSS_REP, DSS, OLTP, OLTP_REP, NONE."), {'workload': workload})
"DSS_REP, DSS, OLTP, OLTP_REP, NONE.", {'workload': workload})
return isValidSLO, isValidWorkload
@ -1641,8 +1639,8 @@ class VMAXUtils(object):
if len(metaHeads) > 0:
metaHeadInstanceName = metaHeads[0]
if metaHeadInstanceName is None:
LOG.info(_LI(
"Volume %(volume)s does not have meta device members."),
LOG.info(
"Volume %(volume)s does not have meta device members.",
{'volume': volumeInstanceName})
return metaHeadInstanceName
@ -1714,7 +1712,7 @@ class VMAXUtils(object):
instance = None
else:
# Something else that we cannot recover from has happened.
LOG.error(_LE("Exception: %s"), desc)
LOG.error("Exception: %s", desc)
exceptionMessage = (_(
"Cannot verify the existence of object:"
"%(instanceName)s.")
@ -1806,8 +1804,8 @@ class VMAXUtils(object):
{'initiator': initiator, 'rc': rc, 'ret': ret})
hardwareIdList = ret['HardwareID']
else:
LOG.warning(_LW("CreateStorageHardwareID failed. initiator: "
"%(initiator)s, rc=%(rc)d, ret=%(ret)s."),
LOG.warning("CreateStorageHardwareID failed. initiator: "
"%(initiator)s, rc=%(rc)d, ret=%(ret)s.",
{'initiator': initiator, 'rc': rc, 'ret': ret})
return hardwareIdList
@ -1826,7 +1824,7 @@ class VMAXUtils(object):
if 'iqn' in initiator.lower():
hardwareTypeId = 5
if hardwareTypeId == 0:
LOG.warning(_LW("Cannot determine the hardware type."))
LOG.warning("Cannot determine the hardware type.")
return hardwareTypeId
def _process_tag(self, element, tagName):
@ -1976,15 +1974,15 @@ class VMAXUtils(object):
portGroup = self._get_random_portgroup(dom)
serialNumber = self._process_tag(dom, 'Array')
if serialNumber is None:
LOG.error(_LE(
LOG.error(
"Array Serial Number must be in the file "
"%(fileName)s."),
"%(fileName)s.",
{'fileName': fileName})
poolName = self._process_tag(dom, 'Pool')
if poolName is None:
LOG.error(_LE(
LOG.error(
"PoolName must be in the file "
"%(fileName)s."),
"%(fileName)s.",
{'fileName': fileName})
kwargs = self._fill_record(
connargs, serialNumber, poolName, portGroup, dom)
@ -2024,8 +2022,7 @@ class VMAXUtils(object):
% {'poolName': arrayInfoRec['PoolName'],
'array': arrayInfoRec['SerialNumber']})
if compString == pool:
LOG.info(_LI(
"The pool_name from extraSpecs is %(pool)s."),
LOG.info("The pool_name from extraSpecs is %(pool)s.",
{'pool': pool})
foundArrayInfoRec = arrayInfoRec
break
@ -2284,9 +2281,9 @@ class VMAXUtils(object):
break
if foundSyncInstanceName is None:
LOG.info(_LI(
LOG.info(
"No replication synchronization session found associated "
"with source volume %(source)s on %(storageSystem)s."),
"with source volume %(source)s on %(storageSystem)s.",
{'source': sourceDeviceId, 'storageSystem': storageSystem})
return foundSyncInstanceName
@ -2301,16 +2298,13 @@ class VMAXUtils(object):
:returns: volume_model_updates - updated volumes
"""
volume_model_updates = []
LOG.info(_LI(
"Updating status for CG: %(id)s."),
{'id': cgId})
LOG.info("Updaing status for CG: %(id)s.", {'id': cgId})
if volumes:
for volume in volumes:
volume_model_updates.append({'id': volume['id'],
'status': status})
else:
LOG.info(_LI("No volume found for CG: %(cg)s."),
{'cg': cgId})
LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId})
return volume_model_updates
def get_smi_version(self, conn):
@ -2612,7 +2606,7 @@ class VMAXUtils(object):
try:
max_subscription_percent_int = int(max_subscription_percent)
except ValueError:
LOG.error(_LE("Cannot convert max subscription percent to int."))
LOG.error("Cannot convert max subscription percent to int.")
return None
return float(max_subscription_percent_int) / 100
@ -2969,14 +2963,14 @@ class VMAXUtils(object):
if foundSyncInstanceName:
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
if waitforsync:
LOG.warning(_LW(
LOG.warning(
"Expect a performance hit as volume is not not fully "
"synced on %(deviceId)s."),
"synced on %(deviceId)s.",
{'deviceId': sourceInstance['DeviceID']})
startTime = time.time()
self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs)
LOG.warning(_LW(
"Synchronization process took: %(delta)s H:MM:SS."),
LOG.warning(
"Synchronization process took: %(delta)s H:MM:SS.",
{'delta': self.get_time_delta(startTime,
time.time())})
@ -3011,8 +3005,8 @@ class VMAXUtils(object):
extraSpecs[self.POOL] = poolDetails[2]
extraSpecs[self.ARRAY] = poolDetails[3]
except KeyError:
LOG.error(_LE("Error parsing SLO, workload from "
"the provided extra_specs."))
LOG.error("Error parsing SLO, workload from "
"the provided extra_specs.")
return extraSpecs
def get_default_intervals_retries(self):

View File

@ -27,7 +27,7 @@ if storops:
from storops import exception as storops_ex
from cinder import exception
from cinder.i18n import _, _LI, _LE, _LW
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.dell_emc.vnx import client
from cinder.volume.drivers.dell_emc.vnx import common
@ -96,9 +96,9 @@ class CommonAdapter(object):
# empty string.
naviseccli_path = self.config.naviseccli_path
if naviseccli_path is None or len(naviseccli_path.strip()) == 0:
LOG.warning(_LW('[%(group)s] naviseccli_path is not set or set to '
LOG.warning('[%(group)s] naviseccli_path is not set or set to '
'an empty string. None will be passed into '
'storops.'), {'group': self.config.config_group})
'storops.', {'group': self.config.config_group})
self.config.naviseccli_path = None
# Check option `storage_vnx_pool_names`.
@ -133,32 +133,32 @@ class CommonAdapter(object):
self.config.io_port_list = io_port_list
if self.config.ignore_pool_full_threshold:
LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
LOG.warning('[%(group)s] ignore_pool_full_threshold: True. '
'LUN creation will still be forced even if the '
'pool full threshold is exceeded.'),
'pool full threshold is exceeded.',
{'group': self.config.config_group})
if self.config.destroy_empty_storage_group:
LOG.warning(_LW('[%(group)s] destroy_empty_storage_group: True. '
LOG.warning('[%(group)s] destroy_empty_storage_group: True. '
'Empty storage group will be deleted after volume '
'is detached.'),
'is detached.',
{'group': self.config.config_group})
if not self.config.initiator_auto_registration:
LOG.info(_LI('[%(group)s] initiator_auto_registration: False. '
LOG.info('[%(group)s] initiator_auto_registration: False. '
'Initiator auto registration is not enabled. '
'Please register initiator manually.'),
'Please register initiator manually.',
{'group': self.config.config_group})
if self.config.force_delete_lun_in_storagegroup:
LOG.warning(_LW(
'[%(group)s] force_delete_lun_in_storagegroup=True'),
LOG.warning(
'[%(group)s] force_delete_lun_in_storagegroup=True',
{'group': self.config.config_group})
if self.config.ignore_pool_full_threshold:
LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
LOG.warning('[%(group)s] ignore_pool_full_threshold: True. '
'LUN creation will still be forced even if the '
'pool full threshold is exceeded.'),
'pool full threshold is exceeded.',
{'group': self.config.config_group})
def _build_port_str(self, port):
@ -217,10 +217,10 @@ class CommonAdapter(object):
tier = specs.tier
volume_metadata['snapcopy'] = 'False'
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s '
LOG.info('Create Volume: %(volume)s Size: %(size)s '
'pool: %(pool)s '
'provision: %(provision)s '
'tier: %(tier)s '),
'tier: %(tier)s ',
{'volume': volume_name,
'size': volume_size,
'pool': pool,
@ -463,7 +463,7 @@ class CommonAdapter(object):
model_update = {}
volumes_model_update = []
model_update['status'] = group.status
LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
LOG.info('Start to delete consistency group: %(cg_name)s',
{'cg_name': cg_name})
self.client.delete_consistency_group(cg_name)
@ -491,8 +491,8 @@ class CommonAdapter(object):
def do_create_cgsnap(self, group_name, snap_name, snapshots):
model_update = {}
snapshots_model_update = []
LOG.info(_LI('Creating consistency snapshot for group'
': %(group_name)s'),
LOG.info('Creating consistency snapshot for group'
': %(group_name)s',
{'group_name': group_name})
self.client.create_cg_snapshot(snap_name,
@ -516,8 +516,8 @@ class CommonAdapter(object):
model_update = {}
snapshots_model_update = []
model_update['status'] = snap_status
LOG.info(_LI('Deleting consistency snapshot %(snap_name)s for '
'group: %(group_name)s'),
LOG.info('Deleting consistency snapshot %(snap_name)s for '
'group: %(group_name)s',
{'snap_name': snap_name,
'group_name': group_name})
@ -640,10 +640,10 @@ class CommonAdapter(object):
'Non-existent pools: %s') % ','.join(nonexistent_pools)
raise exception.VolumeBackendAPIException(data=msg)
if nonexistent_pools:
LOG.warning(_LW('The following specified storage pools '
LOG.warning('The following specified storage pools '
'do not exist: %(nonexistent)s. '
'This host will only manage the storage '
'pools: %(exist)s'),
'pools: %(exist)s',
{'nonexistent': ','.join(nonexistent_pools),
'exist': ','.join(pool_names)})
else:
@ -651,8 +651,8 @@ class CommonAdapter(object):
','.join(pool_names))
else:
pool_names = [p.name for p in array_pools]
LOG.info(_LI('No storage pool is configured. This host will '
'manage all the pools on the VNX system.'))
LOG.info('No storage pool is configured. This host will '
'manage all the pools on the VNX system.')
return [pool for pool in array_pools if pool.name in pool_names]
@ -684,7 +684,7 @@ class CommonAdapter(object):
# or Deleting.
if pool.state in common.PoolState.VALID_CREATE_LUN_STATE:
pool_stats['free_capacity_gb'] = 0
LOG.warning(_LW('Storage Pool [%(pool)s] is [%(state)s].'),
LOG.warning('Storage Pool [%(pool)s] is [%(state)s].',
{'pool': pool.name,
'state': pool.state})
else:
@ -692,9 +692,9 @@ class CommonAdapter(object):
if (pool_feature.max_pool_luns <=
pool_feature.total_pool_luns):
LOG.warning(_LW('Maximum number of Pool LUNs %(max_luns)s '
LOG.warning('Maximum number of Pool LUNs %(max_luns)s '
'have been created for %(pool_name)s. '
'No more LUN creation can be done.'),
'No more LUN creation can be done.',
{'max_luns': pool_feature.max_pool_luns,
'pool_name': pool.name})
pool_stats['free_capacity_gb'] = 0
@ -1018,15 +1018,14 @@ class CommonAdapter(object):
lun = self.client.get_lun(lun_id=volume.vnx_lun_id)
hostname = host.name
if not sg.existed:
LOG.warning(_LW("Storage Group %s is not found. "
"Nothing can be done in terminate_connection()."),
LOG.warning("Storage Group %s is not found. "
"Nothing can be done in terminate_connection().",
hostname)
else:
try:
sg.detach_alu(lun)
except storops_ex.VNXDetachAluNotFoundError:
LOG.warning(_LW("Volume %(vol)s is not in Storage Group"
" %(sg)s."),
LOG.warning("Volume %(vol)s is not in Storage Group %(sg)s.",
{'vol': volume.name, 'sg': hostname})
def build_terminate_connection_return_data(self, host, sg):
@ -1042,19 +1041,19 @@ class CommonAdapter(object):
def _destroy_empty_sg(self, host, sg):
try:
LOG.info(_LI("Storage Group %s is empty."), sg.name)
LOG.info("Storage Group %s is empty.", sg.name)
sg.disconnect_host(sg.name)
sg.delete()
if self.itor_auto_dereg:
self._deregister_initiator(host)
except storops_ex.StoropsException:
LOG.warning(_LW("Failed to destroy Storage Group %s."),
LOG.warning("Failed to destroy Storage Group %s.",
sg.name)
try:
sg.connect_host(sg.name)
except storops_ex.StoropsException:
LOG.warning(_LW("Failed to connect host %(host)s "
"back to storage group %(sg)s."),
LOG.warning("Failed to connect host %(host)s "
"back to storage group %(sg)s.",
{'host': sg.name, 'sg': sg.name})
def _deregister_initiator(self, host):
@ -1062,7 +1061,7 @@ class CommonAdapter(object):
try:
self.client.deregister_initiators(initiators)
except storops_ex:
LOG.warning(_LW("Failed to deregister the initiators %s"),
LOG.warning("Failed to deregister the initiators %s",
initiators)
def _is_allowed_port(self, port):
@ -1138,7 +1137,7 @@ class CommonAdapter(object):
volume.name, lun_size,
provision, tier)
LOG.info(_LI('Successfully setup replication for %s.'), volume.id)
LOG.info('Successfully setup replication for %s.', volume.id)
rep_update.update({'replication_status':
fields.ReplicationStatus.ENABLED})
return rep_update
@ -1152,7 +1151,7 @@ class CommonAdapter(object):
mirror_view = self.build_mirror_view(self.config, True)
mirror_view.destroy_mirror(mirror_name, volume.name)
LOG.info(
_LI('Successfully destroyed replication for volume: %s'),
'Successfully destroyed replication for volume: %s',
volume.id)
def build_mirror_view(self, configuration, failover=True):
@ -1164,7 +1163,7 @@ class CommonAdapter(object):
"""
rep_devices = configuration.replication_device
if not rep_devices:
LOG.info(_LI('Replication is not configured on backend: %s.'),
LOG.info('Replication is not configured on backend: %s.',
configuration.config_group)
return None
elif len(rep_devices) == 1:
@ -1225,12 +1224,12 @@ class CommonAdapter(object):
try:
mirror_view.promote_image(mirror_name)
except storops_ex.VNXMirrorException as ex:
msg = _LE(
LOG.error(
'Failed to failover volume %(volume_id)s '
'to %(target)s: %(error)s.')
LOG.error(msg, {'volume_id': volume.id,
'to %(target)s: %(error)s.',
{'volume_id': volume.id,
'target': secondary_backend_id,
'error': ex},)
'error': ex})
new_status = fields.ReplicationStatus.ERROR
else:
# Transfer ownership to secondary_backend_id and
@ -1354,8 +1353,7 @@ class ISCSIAdapter(CommonAdapter):
raise exception.InvalidConfigurationValue(
option=option,
value=iscsi_initiators)
LOG.info(_LI("[%(group)s] iscsi_initiators is configured: "
"%(value)s"),
LOG.info("[%(group)s] iscsi_initiators is configured: %(value)s",
{'group': self.config.config_group,
'value': self.config.iscsi_initiators})

View File

@ -22,7 +22,7 @@ if storops:
from storops.lib import tasks as storops_tasks
from cinder import exception
from cinder.i18n import _, _LW, _LE, _LI
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import const
@ -95,7 +95,7 @@ class Client(object):
if queue_path:
self.queue = storops_tasks.PQueue(path=queue_path)
self.queue.start()
LOG.info(_LI('PQueue[%s] starts now.'), queue_path)
LOG.info('PQueue[%s] starts now.', queue_path)
def create_lun(self, pool, name, size, provision,
tier, cg_id=None, ignore_thresholds=False):
@ -143,8 +143,8 @@ class Client(object):
if smp_attached_snap:
smp_attached_snap.delete()
except storops_ex.VNXLunNotFoundError as ex:
LOG.info(_LI("LUN %(name)s is already deleted. This message can "
"be safely ignored. Message: %(msg)s"),
LOG.info("LUN %(name)s is already deleted. This message can "
"be safely ignored. Message: %(msg)s",
{'name': name, 'msg': ex.message})
def cleanup_async_lun(self, name, force=False):
@ -160,8 +160,8 @@ class Client(object):
def delay_delete_lun(self, name):
"""Delay the deletion by putting it in a storops queue."""
self.queue.put(self.vnx.delete_lun, name=name)
LOG.info(_LI("VNX object has been added to queue for later"
" deletion: %s"), name)
LOG.info("VNX object has been added to queue for later"
" deletion: %s", name)
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
backoff_rate=1)
@ -173,8 +173,8 @@ class Client(object):
lun.poll = poll
lun.expand(new_size, ignore_thresholds=True)
except storops_ex.VNXLunExpandSizeError as ex:
LOG.warning(_LW("LUN %(name)s is already expanded. "
"Message: %(msg)s."),
LOG.warning("LUN %(name)s is already expanded. "
"Message: %(msg)s.",
{'name': name, 'msg': ex.message})
except storops_ex.VNXLunPreparingError as ex:
@ -182,8 +182,7 @@ class Client(object):
# is 'Preparing'. Wait for a while so that the LUN may get out of
# the transitioning state.
with excutils.save_and_reraise_exception():
LOG.warning(_LW("LUN %(name)s is not ready for extension: "
"%(msg)s"),
LOG.warning("LUN %(name)s is not ready for extension: %(msg)s",
{'name': name, 'msg': ex.message})
utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
@ -206,7 +205,7 @@ class Client(object):
if not session.existed:
return True
elif session.current_state in ('FAULTED', 'STOPPED'):
LOG.warning(_LW('Session is %s, need to handled then.'),
LOG.warning('Session is %s, need to handled then.',
session.current_state)
return True
else:
@ -243,15 +242,15 @@ class Client(object):
session = self.vnx.get_migration_session(src_id)
src_lun = self.vnx.get_lun(lun_id=src_id)
if session.existed:
LOG.warning(_LW('Cancelling migration session: '
'%(src_id)s -> %(dst_id)s.'),
LOG.warning('Cancelling migration session: '
'%(src_id)s -> %(dst_id)s.',
{'src_id': src_id,
'dst_id': dst_id})
try:
src_lun.cancel_migrate()
except storops_ex.VNXLunNotMigratingError:
LOG.info(_LI('The LUN is not migrating or completed, '
'this message can be safely ignored'))
LOG.info('The LUN is not migrating or completed, '
'this message can be safely ignored')
except (storops_ex.VNXLunSyncCompletedError,
storops_ex.VNXMigrationError):
# Wait until session finishes
@ -266,8 +265,8 @@ class Client(object):
snap_name, allow_rw=True, auto_delete=False,
keep_for=keep_for)
except storops_ex.VNXSnapNameInUseError as ex:
LOG.warning(_LW('Snapshot %(name)s already exists. '
'Message: %(msg)s'),
LOG.warning('Snapshot %(name)s already exists. '
'Message: %(msg)s',
{'name': snap_name, 'msg': ex.message})
def delete_snapshot(self, snapshot_name):
@ -277,13 +276,13 @@ class Client(object):
try:
snap.delete()
except storops_ex.VNXSnapNotExistsError as ex:
LOG.warning(_LW("Snapshot %(name)s may be deleted already. "
"Message: %(msg)s"),
LOG.warning("Snapshot %(name)s may be deleted already. "
"Message: %(msg)s",
{'name': snapshot_name, 'msg': ex.message})
except storops_ex.VNXDeleteAttachedSnapError as ex:
with excutils.save_and_reraise_exception():
LOG.warning(_LW("Failed to delete snapshot %(name)s "
"which is in use. Message: %(msg)s"),
LOG.warning("Failed to delete snapshot %(name)s "
"which is in use. Message: %(msg)s",
{'name': snapshot_name, 'msg': ex.message})
def copy_snapshot(self, snap_name, new_snap_name):
@ -295,8 +294,8 @@ class Client(object):
try:
return lun.create_mount_point(name=smp_name)
except storops_ex.VNXLunNameInUseError as ex:
LOG.warning(_LW('Mount point %(name)s already exists. '
'Message: %(msg)s'),
LOG.warning('Mount point %(name)s already exists. '
'Message: %(msg)s',
{'name': smp_name, 'msg': ex.message})
# Ignore the failure that due to retry.
return self.vnx.get_lun(name=smp_name)
@ -306,9 +305,9 @@ class Client(object):
try:
lun.attach_snap(snap=snap_name)
except storops_ex.VNXSnapAlreadyMountedError as ex:
LOG.warning(_LW("Snapshot %(snap_name)s is attached to "
LOG.warning("Snapshot %(snap_name)s is attached to "
"snapshot mount point %(smp_name)s already. "
"Message: %(msg)s"),
"Message: %(msg)s",
{'snap_name': snap_name,
'smp_name': smp_name,
'msg': ex.message})
@ -318,8 +317,8 @@ class Client(object):
try:
lun.detach_snap()
except storops_ex.VNXSnapNotAttachedError as ex:
LOG.warning(_LW("Snapshot mount point %(smp_name)s is not "
"currently attached. Message: %(msg)s"),
LOG.warning("Snapshot mount point %(smp_name)s is not "
"currently attached. Message: %(msg)s",
{'smp_name': smp_name, 'msg': ex.message})
def modify_snapshot(self, snap_name, allow_rw=None,
@ -417,7 +416,7 @@ class Client(object):
try:
lun.enable_compression(ignore_thresholds=True)
except storops_ex.VNXCompressionAlreadyEnabledError:
LOG.warning(_LW("Compression has already been enabled on %s."),
LOG.warning("Compression has already been enabled on %s.",
lun.name)
def get_vnx_enabler_status(self):
@ -433,8 +432,8 @@ class Client(object):
self.sg_cache[name] = self.vnx.create_sg(name)
except storops_ex.VNXStorageGroupNameInUseError as ex:
# Ignore the failure due to retry
LOG.warning(_LW('Storage group %(name)s already exists. '
'Message: %(msg)s'),
LOG.warning('Storage group %(name)s already exists. '
'Message: %(msg)s',
{'name': name, 'msg': ex.message})
self.sg_cache[name] = self.vnx.get_sg(name=name)
@ -469,8 +468,8 @@ class Client(object):
storage_group.connect_hba(port, initiator_id, host.name,
host_ip=host.ip)
except storops_ex.VNXStorageGroupError as ex:
LOG.warning(_LW('Failed to set path to port %(port)s for '
'initiator %(hba_id)s. Message: %(msg)s'),
LOG.warning('Failed to set path to port %(port)s for '
'initiator %(hba_id)s. Message: %(msg)s',
{'port': port, 'hba_id': initiator_id,
'msg': ex.message})
@ -499,9 +498,9 @@ class Client(object):
except storops_ex.VNXNoHluAvailableError as ex:
with excutils.save_and_reraise_exception():
# Reach the max times of retry, fail the attach action.
LOG.error(_LE('Failed to add %(lun)s into %(sg)s after '
LOG.error('Failed to add %(lun)s into %(sg)s after '
'%(tried)s tries. Reach the max retry times. '
'Message: %(msg)s'),
'Message: %(msg)s',
{'lun': lun.lun_id, 'sg': storage_group.name,
'tried': max_retries, 'msg': ex.message})

View File

@ -23,7 +23,7 @@ from oslo_utils import importutils
storops = importutils.try_import('storops')
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume import volume_types
@ -201,9 +201,9 @@ class ExtraSpecs(object):
:param enabler_status: Instance of VNXEnablerStatus
"""
if "storagetype:pool" in self.specs:
LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
LOG.warning("Extra spec key 'storagetype:pool' is obsoleted "
"since driver version 5.1.0. This key will be "
"ignored."))
"ignored.")
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
self._tier is not None):
@ -417,7 +417,7 @@ class ReplicationDeviceList(list):
device = self._device_map[backend_id]
except KeyError:
device = None
LOG.warning(_LW('Unable to find secondary device named: %s'),
LOG.warning('Unable to find secondary device named: %s',
backend_id)
return device
@ -483,7 +483,7 @@ class VNXMirrorView(object):
mv = self.primary_client.get_mirror(mirror_name)
if not mv.existed:
# We will skip the mirror operations if not existed
LOG.warning(_LW('Mirror view %s was deleted already.'),
LOG.warning('Mirror view %s was deleted already.',
mirror_name)
return
self.fracture_image(mirror_name)

View File

@ -24,10 +24,10 @@ from taskflow import task
from taskflow.types import failure
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume.drivers.dell_emc.vnx import utils
from cinder.i18n import _, _LI, _LW
LOG = logging.getLogger(__name__)
@ -60,8 +60,8 @@ class MigrateLunTask(task.Task):
def revert(self, result, client, src_id, dst_id, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: cleanup migration session: '
'%(src_id)s -> %(dst_id)s.'),
LOG.warning('%(method)s: cleanup migration session: '
'%(src_id)s -> %(dst_id)s.',
{'method': method_name,
'src_id': src_id,
'dst_id': dst_id})
@ -98,7 +98,7 @@ class CreateLunTask(task.Task):
if isinstance(result, failure.Failure):
return
else:
LOG.warning(_LW('%(method_name)s: delete lun %(lun_name)s'),
LOG.warning('%(method_name)s: delete lun %(lun_name)s',
{'method_name': method_name, 'lun_name': lun_name})
client.delete_lun(lun_name)
@ -117,9 +117,9 @@ class CopySnapshotTask(task.Task):
def revert(self, result, client, snap_name, new_snap_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: delete the '
LOG.warning('%(method_name)s: delete the '
'copied snapshot %(new_name)s of '
'%(source_name)s.'),
'%(source_name)s.',
{'method_name': method_name,
'new_name': new_snap_name,
'source_name': snap_name})
@ -146,7 +146,7 @@ class CreateSMPTask(task.Task):
def revert(self, result, client, smp_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: delete mount point %(name)s'),
LOG.warning('%(method_name)s: delete mount point %(name)s',
{'method_name': method_name,
'name': smp_name})
client.delete_lun(smp_name)
@ -164,7 +164,7 @@ class AttachSnapTask(task.Task):
def revert(self, result, client, smp_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: detach mount point %(smp_name)s'),
LOG.warning('%(method_name)s: detach mount point %(smp_name)s',
{'method_name': method_name,
'smp_name': smp_name})
client.detach_snapshot(smp_name)
@ -178,15 +178,15 @@ class CreateSnapshotTask(task.Task):
def execute(self, client, snap_name, lun_id, keep_for=None,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'),
LOG.info('Create snapshot: %(snapshot)s: lun: %(lun)s',
{'snapshot': snap_name,
'lun': lun_id})
client.create_snapshot(lun_id, snap_name, keep_for=keep_for)
def revert(self, result, client, snap_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'delete temp snapshot %(snap_name)s'),
LOG.warning('%(method_name)s: '
'delete temp snapshot %(snap_name)s',
{'method_name': method_name,
'snap_name': snap_name})
client.delete_snapshot(snap_name)
@ -201,8 +201,8 @@ class ModifySnapshotTask(task.Task):
def revert(self, result, client, snap_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'setting snapshot %(snap_name)s to read-only.'),
LOG.warning('%(method_name)s: '
'setting snapshot %(snap_name)s to read-only.',
{'method_name': method_name,
'snap_name': snap_name})
client.modify_snapshot(snap_name, allow_rw=False)
@ -268,8 +268,8 @@ class CreateCGSnapshotTask(task.Task):
def revert(self, client, cg_snap_name, cg_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'deleting CG snapshot %(snap_name)s.'),
LOG.warning('%(method_name)s: '
'deleting CG snapshot %(snap_name)s.',
{'method_name': method_name,
'snap_name': cg_snap_name})
client.delete_cg_snapshot(cg_snap_name)
@ -288,8 +288,8 @@ class CreateMirrorTask(task.Task):
def revert(self, result, mirror, mirror_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: removing mirror '
'view %(name)s.'),
LOG.warning('%(method)s: removing mirror '
'view %(name)s.',
{'method': method_name,
'name': mirror_name})
mirror.delete_mirror(mirror_name)
@ -308,8 +308,8 @@ class AddMirrorImageTask(task.Task):
def revert(self, result, mirror, mirror_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: removing secondary image '
'from %(name)s.'),
LOG.warning('%(method)s: removing secondary image '
'from %(name)s.',
{'method': method_name,
'name': mirror_name})
mirror.remove_image(mirror_name)

View File

@ -24,7 +24,7 @@ from oslo_utils import importutils
storops = importutils.try_import('storops')
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import utils as vol_utils
@ -139,17 +139,17 @@ def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
def validate_storage_migration(volume, target_host, src_serial, src_protocol):
if 'location_info' not in target_host['capabilities']:
LOG.warning(_LW("Failed to get pool name and "
LOG.warning("Failed to get pool name and "
"serial number. 'location_info' "
"from %s."), target_host['host'])
"from %s.", target_host['host'])
return False
info = target_host['capabilities']['location_info']
LOG.debug("Host for migration is %s.", info)
try:
serial_number = info.split('|')[1]
except AttributeError:
LOG.warning(_LW('Error on getting serial number '
'from %s.'), target_host['host'])
LOG.warning('Error on getting serial number '
'from %s.', target_host['host'])
return False
if serial_number != src_serial:
LOG.debug('Skip storage-assisted migration because '
@ -253,8 +253,8 @@ def get_migration_rate(volume):
if rate.lower() in storops.VNXMigrationRate.values():
return storops.VNXMigrationRate.parse(rate.lower())
else:
LOG.warning(_LW('Unknown migration rate specified, '
'using [high] as migration rate.'))
LOG.warning('Unknown migration rate specified, '
'using [high] as migration rate.')
return storops.VNXMigrationRate.HIGH

View File

@ -44,7 +44,7 @@ import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
@ -156,18 +156,18 @@ class XtremIOClient(object):
error = response.json()
err_msg = error.get('message')
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
LOG.warning(_LW("object %(key)s of "
"type %(typ)s not found, %(err_msg)s"),
LOG.warning("object %(key)s of "
"type %(typ)s not found, %(err_msg)s",
{'key': key, 'typ': object_type,
'err_msg': err_msg, })
raise exception.NotFound()
elif err_msg == VOL_NOT_UNIQUE_ERR:
LOG.error(_LE("can't create 2 volumes with the same name, %s"),
LOG.error("can't create 2 volumes with the same name, %s",
err_msg)
msg = (_('Volume by this name already exists'))
msg = _('Volume by this name already exists')
raise exception.VolumeBackendAPIException(data=msg)
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"),
LOG.error("Can't find volume to map %(key)s, %(msg)s",
{'key': key, 'msg': err_msg, })
raise exception.VolumeNotFound(volume_id=key)
elif ALREADY_MAPPED_ERR in err_msg:
@ -338,8 +338,7 @@ class XtremIOClient4(XtremIOClient):
self.req(typ, 'PUT', data, idx=int(idx))
except exception.VolumeBackendAPIException:
# reverting
msg = _LE('Failed to rename the created snapshot, reverting.')
LOG.error(msg)
LOG.error('Failed to rename the created snapshot, reverting.')
self.req(typ, 'DELETE', idx=int(idx))
raise
@ -404,7 +403,7 @@ class XtremIOVolumeDriver(san.SanDriver):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('XtremIO SW version %s'), version_text)
LOG.info('XtremIO SW version %s', version_text)
if ver[0] >= 4:
self.client = XtremIOClient4(self.configuration, self.cluster_id)
@ -466,8 +465,8 @@ class XtremIOVolumeDriver(san.SanDriver):
try:
self.extend_volume(volume, volume['size'])
except Exception:
LOG.error(_LE('failes to extend volume %s, '
'reverting clone operation'), volume['id'])
LOG.error('failes to extend volume %s, '
'reverting clone operation', volume['id'])
# remove the volume in case resize failed
self.delete_volume(volume)
raise
@ -481,7 +480,7 @@ class XtremIOVolumeDriver(san.SanDriver):
try:
self.client.req('volumes', 'DELETE', name=volume.name_id)
except exception.NotFound:
LOG.info(_LI("volume %s doesn't exist"), volume.name_id)
LOG.info("volume %s doesn't exist", volume.name_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
@ -492,7 +491,7 @@ class XtremIOVolumeDriver(san.SanDriver):
try:
self.client.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
LOG.info("snapshot %s doesn't exist", snapshot.id)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
@ -505,8 +504,8 @@ class XtremIOVolumeDriver(san.SanDriver):
data = {'name': original_name}
self.client.req('volumes', 'PUT', data, name=current_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), original_name)
LOG.error('Unable to rename the logical volume '
'for volume: %s', original_name)
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
@ -603,8 +602,8 @@ class XtremIOVolumeDriver(san.SanDriver):
self.client.req('volumes', 'PUT', name=volume['id'],
data={'vol-name': volume['name'] + '-unmanged'})
except exception.NotFound:
LOG.info(_LI("%(typ)s with the name %(name)s wasn't found, "
"can't unmanage") %
LOG.info("%(typ)s with the name %(name)s wasn't found, "
"can't unmanage",
{'typ': 'Snapshot' if is_snapshot else 'Volume',
'name': volume['id']})
raise exception.VolumeNotFound(volume_id=volume['id'])
@ -644,7 +643,7 @@ class XtremIOVolumeDriver(san.SanDriver):
try:
self.client.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
LOG.warning(_LW("terminate_connection: lun map not found"))
LOG.warning("terminate_connection: lun map not found")
def _get_password(self):
return ''.join(RANDOM.choice
@ -659,9 +658,9 @@ class XtremIOVolumeDriver(san.SanDriver):
res = self.client.req('lun-maps', 'POST', data)
lunmap = self._obj_from_result(res)
LOG.info(_LI('Created lun-map:\n%s'), lunmap)
LOG.info('Created lun-map:\n%s', lunmap)
except exception.XtremIOAlreadyMappedError:
LOG.info(_LI('Volume already mapped, retrieving %(ig)s, %(vol)s'),
LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s',
{'ig': ig, 'vol': volume['id']})
lunmap = self.client.find_lunmap(ig, volume['id'])
return lunmap
@ -993,8 +992,7 @@ class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
discovery_chap)
# if CHAP was enabled after the initiator was created
if login_chap and not login_passwd:
LOG.info(_LI('initiator has no password while using chap,'
'adding it'))
LOG.info('Initiator has no password while using chap, adding it.')
data = {}
(login_passwd,
d_passwd) = self._add_auth(data, login_chap, discovery_chap and

View File

@ -26,7 +26,7 @@ import requests
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder.i18n import _
from cinder import utils
LOG = logging.getLogger(__name__)
@ -80,7 +80,7 @@ class DotHillClient(object):
return
except exception.DotHillConnectionError:
not_responding = self._curr_ip_addr
LOG.exception(_LE('session_login failed to connect to %s'),
LOG.exception('session_login failed to connect to %s',
self._curr_ip_addr)
# Loop through the remaining management addresses
# to find one that's up.
@ -92,7 +92,7 @@ class DotHillClient(object):
self._get_session_key()
return
except exception.DotHillConnectionError:
LOG.error(_LE('Failed to connect to %s'),
LOG.error('Failed to connect to %s',
self._curr_ip_addr)
continue
raise exception.DotHillConnectionError(
@ -172,20 +172,20 @@ class DotHillClient(object):
return self._api_request(path, *args, **kargs)
except exception.DotHillConnectionError as e:
if tries_left < 1:
LOG.error(_LE("Array Connection error: "
"%s (no more retries)"), e.msg)
LOG.error("Array Connection error: "
"%s (no more retries)", e.msg)
raise
# Retry on any network connection errors, SSL errors, etc
LOG.error(_LE("Array Connection error: %s (retrying)"), e.msg)
LOG.error("Array Connection error: %s (retrying)", e.msg)
except exception.DotHillRequestError as e:
if tries_left < 1:
LOG.error(_LE("Array Request error: %s (no more retries)"),
LOG.error("Array Request error: %s (no more retries)",
e.msg)
raise
# Retry specific errors which may succeed if we log in again
# -10027 => The user is not recognized on this system.
if '(-10027)' in e.msg:
LOG.error(_LE("Array Request error: %s (retrying)"), e.msg)
LOG.error("Array Request error: %s (retrying)", e.msg)
else:
raise
@ -248,7 +248,7 @@ class DotHillClient(object):
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning(_LW("Ignoring error in create volume: %s"), e.msg)
LOG.warning("Ignoring error in create volume: %s", e.msg)
return None
raise
@ -261,8 +261,8 @@ class DotHillClient(object):
# -10075 => The specified volume was not found.
# This can occur during controller failover.
if '(-10075)' in e.msg:
LOG.warning(_LW("Ignorning error while deleting %(volume)s:"
" %(reason)s"),
LOG.warning("Ignorning error while deleting %(volume)s:"
" %(reason)s",
{'volume': name, 'reason': e.msg})
return
raise
@ -277,8 +277,8 @@ class DotHillClient(object):
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning(_LW("Ignoring error attempting to create snapshot:"
" %s"), e.msg)
LOG.warning("Ignoring error attempting to create snapshot:"
" %s", e.msg)
return None
def delete_snapshot(self, snap_name):
@ -288,7 +288,7 @@ class DotHillClient(object):
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
return None
raise
@ -381,8 +381,8 @@ class DotHillClient(object):
except exception.DotHillRequestError as e:
# -10058: The host identifier or nickname is already in use
if '(-10058)' in e.msg:
LOG.error(_LE("While trying to create host nickname"
" %(nickname)s: %(error_msg)s"),
LOG.error("While trying to create host nickname"
" %(nickname)s: %(error_msg)s",
{'nickname': hostname,
'error_msg': e.msg})
else:
@ -400,9 +400,9 @@ class DotHillClient(object):
except exception.DotHillRequestError as e:
# -3177 => "The specified LUN overlaps a previously defined LUN
if '(-3177)' in e.msg:
LOG.info(_LI("Unable to map volume"
LOG.info("Unable to map volume"
" %(volume_name)s to lun %(lun)d:"
" %(reason)s"),
" %(reason)s",
{'volume_name': volume_name,
'lun': lun, 'reason': e.msg})
lun = self._get_next_available_lun_for_host(host,
@ -410,8 +410,8 @@ class DotHillClient(object):
continue
raise
except Exception as e:
LOG.error(_LE("Error while mapping volume"
" %(volume_name)s to lun %(lun)d:"),
LOG.error("Error while mapping volume"
" %(volume_name)s to lun %(lun)d:",
{'volume_name': volume_name, 'lun': lun},
e)
raise
@ -430,7 +430,7 @@ class DotHillClient(object):
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg)
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
return None
raise
@ -481,7 +481,7 @@ class DotHillClient(object):
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
LOG.error('Error in copying volume: %s', src_name)
raise exception.DotHillRequestError
time.sleep(1)

View File

@ -26,7 +26,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.dothill import dothill_client as dothill
@ -176,7 +176,7 @@ class DotHillCommon(object):
self.backend_name,
self.backend_type)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Creation of volume %s failed."), volume['id'])
LOG.exception("Creation of volume %s failed.", volume['id'])
raise exception.Invalid(ex)
finally:
@ -201,7 +201,7 @@ class DotHillCommon(object):
"""
if (volume['status'] != "available" or
volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):
LOG.error(_LE("Volume must be detached for clone operation."))
LOG.error("Volume must be detached for clone operation.")
raise exception.VolumeAttached(volume_id=volume['id'])
def create_cloned_volume(self, volume, src_vref):
@ -223,7 +223,7 @@ class DotHillCommon(object):
self.client.copy_volume(orig_name, dest_name,
self.backend_name, self.backend_type)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Cloning of volume %s failed."),
LOG.exception("Cloning of volume %s failed.",
src_vref['id'])
raise exception.Invalid(ex)
finally:
@ -246,7 +246,7 @@ class DotHillCommon(object):
self.client.copy_volume(orig_name, dest_name,
self.backend_name, self.backend_type)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Create volume failed from snapshot: %s"),
LOG.exception("Create volume failed from snapshot: %s",
snapshot['id'])
raise exception.Invalid(ex)
finally:
@ -269,7 +269,7 @@ class DotHillCommon(object):
# if the volume wasn't found, ignore the error
if 'The volume was not found on this system.' in ex.args:
return
LOG.exception(_LE("Deletion of volume %s failed."), volume['id'])
LOG.exception("Deletion of volume %s failed.", volume['id'])
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -331,7 +331,7 @@ class DotHillCommon(object):
connector_element)
return data
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error mapping volume: %s"), volume_name)
LOG.exception("Error mapping volume: %s", volume_name)
raise exception.Invalid(ex)
def unmap_volume(self, volume, connector, connector_element):
@ -347,7 +347,7 @@ class DotHillCommon(object):
connector,
connector_element)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error unmapping volume: %s"), volume_name)
LOG.exception("Error unmapping volume: %s", volume_name)
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -356,21 +356,21 @@ class DotHillCommon(object):
try:
return self.client.get_active_fc_target_ports()
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error getting active FC target ports."))
LOG.exception("Error getting active FC target ports.")
raise exception.Invalid(ex)
def get_active_iscsi_target_iqns(self):
try:
return self.client.get_active_iscsi_target_iqns()
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error getting active ISCSI target iqns."))
LOG.exception("Error getting active ISCSI target iqns.")
raise exception.Invalid(ex)
def get_active_iscsi_target_portals(self):
try:
return self.client.get_active_iscsi_target_portals()
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error getting active ISCSI target portals."))
LOG.exception("Error getting active ISCSI target portals.")
raise exception.Invalid(ex)
def create_snapshot(self, snapshot):
@ -387,7 +387,7 @@ class DotHillCommon(object):
try:
self.client.create_snapshot(vol_name, snap_name)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Creation of snapshot failed for volume: %s"),
LOG.exception("Creation of snapshot failed for volume: %s",
snapshot['volume_id'])
raise exception.Invalid(ex)
finally:
@ -404,7 +404,7 @@ class DotHillCommon(object):
# if the volume wasn't found, ignore the error
if 'The volume was not found on this system.' in ex.args:
return
LOG.exception(_LE("Deleting snapshot %s failed"), snapshot['id'])
LOG.exception("Deleting snapshot %s failed", snapshot['id'])
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -428,7 +428,7 @@ class DotHillCommon(object):
try:
self.client.extend_volume(volume_name, "%dGiB" % growth_size)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Extension of volume %s failed."), volume['id'])
LOG.exception("Extension of volume %s failed.", volume['id'])
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -437,14 +437,14 @@ class DotHillCommon(object):
try:
return self.client.get_chap_record(initiator_name)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error getting chap record."))
LOG.exception("Error getting chap record.")
raise exception.Invalid(ex)
def create_chap_record(self, initiator_name, chap_secret):
try:
self.client.create_chap_record(initiator_name, chap_secret)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error creating chap record."))
LOG.exception("Error creating chap record.")
raise exception.Invalid(ex)
def migrate_volume(self, volume, host):
@ -489,7 +489,7 @@ class DotHillCommon(object):
self.client.modify_volume_name(dest_name, source_name)
return (True, None)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error migrating volume: %s"), source_name)
LOG.exception("Error migrating volume: %s", source_name)
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -512,7 +512,7 @@ class DotHillCommon(object):
self.client.modify_volume_name(target_vol_name,
modify_target_vol_name)
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error manage existing volume."))
LOG.exception("Error manage existing volume.")
raise exception.Invalid(ex)
finally:
self.client_logout()
@ -530,7 +530,7 @@ class DotHillCommon(object):
size = self.client.get_volume_size(target_vol_name)
return size
except exception.DotHillRequestError as ex:
LOG.exception(_LE("Error manage existing get volume size."))
LOG.exception("Error manage existing get volume size.")
raise exception.Invalid(ex)
finally:
self.client_logout()

View File

@ -37,7 +37,7 @@ from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LW, _LI, _LE
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
@ -194,7 +194,7 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
try:
return fn(*args)
except dbus.DBusException as e:
LOG.warning(_LW("Got disconnected; trying to reconnect. (%s)"), e)
LOG.warning("Got disconnected; trying to reconnect. (%s)", e)
self.dbus_connect()
# Old function object is invalid, get new one.
return getattr(self.odm, fn._method_name)(*args)
@ -354,8 +354,8 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
retry += 1
# Not yet
LOG.warning(_LW('Try #%(try)d: Volume "%(res)s"/%(vol)d '
'not yet deployed on "%(host)s", waiting.'),
LOG.warning('Try #%(try)d: Volume "%(res)s"/%(vol)d '
'not yet deployed on "%(host)s", waiting.',
{'try': retry, 'host': nodenames,
'res': res_name, 'vol': vol_nr})
@ -771,9 +771,9 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
if not d_res_name:
# resource already gone?
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operation"), snapshot['id'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
LOG.warning("snapshot: %s not found, "
"skipping delete operation", snapshot['id'])
LOG.info('Successfully deleted snapshot: %s', snapshot['id'])
return True
res = self.call_or_reconnect(self.odm.remove_snapshot,
@ -1035,7 +1035,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver):
if len(data) < 1:
# already removed?!
LOG.info(_LI('DRBD connection for %s already removed'),
LOG.info('DRBD connection for %s already removed',
volume['id'])
elif len(data) == 1:
__, __, props, __ = data[0]
@ -1062,7 +1062,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver):
self._check_result(res, ignore=[dm_exc.DM_ENOENT])
else:
# more than one assignment?
LOG.error(_LE("DRBDmanage: too many assignments returned."))
LOG.error("DRBDmanage: too many assignments returned.")
return
def remove_export(self, context, volume):

View File

@ -20,7 +20,7 @@ This driver requires FSS-8.00-8865 or later.
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder import interface
import cinder.volume.driver
from cinder.volume.drivers.falconstor import fss_common
@ -71,8 +71,8 @@ class FSSFCDriver(fss_common.FalconstorBaseDriver,
def validate_connector(self, connector):
"""Check connector for at least one enabled FC protocol."""
if 'FC' == self._storage_protocol and 'wwpns' not in connector:
LOG.error(_LE('The connector does not contain the required '
'information.'))
LOG.error('The connector does not contain the required '
'information.')
raise exception.InvalidConnectorException(missing='wwpns')
@fczm_utils.add_fc_zone

Some files were not shown because too many files have changed in this diff Show More