From a55a6b5c717c0802288ab802a8fbff287a4b6b66 Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Thu, 9 Mar 2017 15:49:01 -0600 Subject: [PATCH] Remove log translations Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I4c96f3590d46205c45d12ee4ead8c208e11c52c5 --- cinder/api/__init__.py | 8 +- cinder/api/contrib/backups.py | 10 +- cinder/api/contrib/cgsnapshots.py | 6 +- cinder/api/contrib/consistencygroups.py | 20 +- cinder/api/contrib/hosts.py | 4 +- cinder/api/contrib/snapshot_actions.py | 4 +- cinder/api/contrib/snapshot_unmanage.py | 3 +- cinder/api/contrib/types_extra_specs.py | 10 +- cinder/api/contrib/volume_transfer.py | 8 +- cinder/api/contrib/volume_unmanage.py | 3 +- cinder/api/extensions.py | 19 +- cinder/api/middleware/fault.py | 6 +- cinder/api/openstack/__init__.py | 6 +- cinder/api/openstack/wsgi.py | 19 +- cinder/api/v2/snapshots.py | 7 +- cinder/api/v2/volumes.py | 6 +- cinder/api/v3/group_snapshots.py | 6 +- cinder/api/v3/groups.py | 20 +- cinder/api/v3/volumes.py | 8 +- cinder/backup/api.py | 10 +- cinder/backup/chunkeddriver.py | 16 +- cinder/backup/driver.py | 22 +- cinder/backup/drivers/ceph.py | 58 +- cinder/backup/drivers/swift.py | 5 +- cinder/backup/drivers/tsm.py | 12 +- cinder/backup/manager.py | 110 ++-- cinder/brick/local_dev/lvm.py | 83 ++- cinder/cmd/volume.py | 8 +- cinder/cmd/volume_usage_audit.py | 26 +- cinder/common/sqlalchemyutils.py | 4 +- cinder/consistencygroup/api.py | 65 ++- cinder/context.py | 6 +- cinder/coordination.py | 22 +- cinder/db/sqlalchemy/api.py | 33 +- cinder/exception.py | 6 +- cinder/group/api.py | 75 ++- cinder/i18n.py | 10 - cinder/image/cache.py | 5 +- cinder/image/glance.py | 8 +- cinder/image/image_utils.py | 22 +- cinder/keymgr/__init__.py | 10 +- cinder/keymgr/conf_key_mgr.py | 8 +- cinder/manager.py | 22 +- cinder/message/api.py | 7 +- cinder/objects/qos_specs.py | 4 +- cinder/quota.py | 8 +- cinder/quota_utils.py | 14 +- cinder/rpc.py | 12 +- cinder/scheduler/base_filter.py | 25 +- cinder/scheduler/filter_scheduler.py | 10 +- cinder/scheduler/filters/capacity_filter.py | 29 +- cinder/scheduler/filters/driver_filter.py | 5 +- .../filters/instance_locality_filter.py | 14 +- cinder/scheduler/flows/create_volume.py | 7 +- cinder/scheduler/host_manager.py | 12 +- cinder/scheduler/manager.py | 30 +- cinder/scheduler/scheduler_options.py | 8 +- cinder/scheduler/weights/goodness.py | 17 +- cinder/service.py | 56 +- cinder/ssh_utils.py | 6 +- .../tests/unit/scheduler/test_base_filter.py | 31 -- .../netapp/dataontap/test_block_base.py | 9 +- cinder/transfer/api.py | 15 +- cinder/utils.py | 14 +- cinder/volume/api.py | 162 +++--- cinder/volume/driver.py | 75 ++- cinder/volume/driver_utils.py | 13 +- cinder/volume/drivers/block_device.py | 12 +- cinder/volume/drivers/coprhd/common.py | 84 ++- cinder/volume/drivers/coprhd/scaleio.py | 9 +- cinder/volume/drivers/datera/datera_api2.py | 35 +- cinder/volume/drivers/datera/datera_api21.py | 33 +- cinder/volume/drivers/datera/datera_common.py | 8 +- .../drivers/dell/dell_storagecenter_api.py | 234 ++++---- .../drivers/dell/dell_storagecenter_common.py | 126 +++-- .../drivers/dell/dell_storagecenter_fc.py | 12 +- .../drivers/dell/dell_storagecenter_iscsi.py | 16 +- cinder/volume/drivers/dell_emc/ps.py | 73 ++- .../volume/drivers/dell_emc/scaleio/driver.py | 205 ++++--- .../volume/drivers/dell_emc/unity/adapter.py | 22 +- .../volume/drivers/dell_emc/unity/client.py | 14 +- cinder/volume/drivers/dell_emc/unity/utils.py | 16 +- cinder/volume/drivers/dell_emc/vmax/common.py | 370 +++++++------ cinder/volume/drivers/dell_emc/vmax/fast.py | 64 ++- cinder/volume/drivers/dell_emc/vmax/fc.py | 3 +- cinder/volume/drivers/dell_emc/vmax/https.py | 6 +- cinder/volume/drivers/dell_emc/vmax/iscsi.py | 19 +- .../volume/drivers/dell_emc/vmax/masking.py | 185 ++++--- .../drivers/dell_emc/vmax/provision_v3.py | 10 +- cinder/volume/drivers/dell_emc/vmax/utils.py | 96 ++-- cinder/volume/drivers/dell_emc/vnx/adapter.py | 110 ++-- cinder/volume/drivers/dell_emc/vnx/client.py | 71 ++- cinder/volume/drivers/dell_emc/vnx/common.py | 12 +- .../volume/drivers/dell_emc/vnx/taskflows.py | 40 +- cinder/volume/drivers/dell_emc/vnx/utils.py | 16 +- cinder/volume/drivers/dell_emc/xtremio.py | 42 +- .../volume/drivers/dothill/dothill_client.py | 46 +- .../volume/drivers/dothill/dothill_common.py | 38 +- cinder/volume/drivers/drbdmanagedrv.py | 18 +- cinder/volume/drivers/falconstor/fc.py | 6 +- .../volume/drivers/falconstor/fss_common.py | 16 +- .../volume/drivers/falconstor/rest_proxy.py | 36 +- .../drivers/fujitsu/eternus_dx_common.py | 81 ++- .../drivers/fujitsu/eternus_dx_iscsi.py | 63 +-- cinder/volume/drivers/fusionstorage/dsware.py | 17 +- .../drivers/fusionstorage/fspythonapi.py | 10 +- cinder/volume/drivers/hgst.py | 14 +- .../volume/drivers/hitachi/hbsd_basiclib.py | 4 +- cinder/volume/drivers/hitachi/hbsd_common.py | 15 +- cinder/volume/drivers/hitachi/hbsd_fc.py | 6 +- cinder/volume/drivers/hitachi/hbsd_horcm.py | 25 +- cinder/volume/drivers/hitachi/hbsd_iscsi.py | 6 +- cinder/volume/drivers/hitachi/hbsd_snm2.py | 9 +- cinder/volume/drivers/hitachi/hnas_backend.py | 4 +- cinder/volume/drivers/hitachi/hnas_nfs.py | 80 +-- cinder/volume/drivers/hitachi/hnas_utils.py | 18 +- cinder/volume/drivers/hitachi/vsp_utils.py | 215 ++++---- cinder/volume/drivers/hpe/hpe_3par_common.py | 361 ++++++------- cinder/volume/drivers/hpe/hpe_3par_fc.py | 20 +- cinder/volume/drivers/hpe/hpe_3par_iscsi.py | 50 +- .../volume/drivers/hpe/hpe_lefthand_iscsi.py | 251 +++++---- cinder/volume/drivers/huawei/huawei_driver.py | 164 +++--- cinder/volume/drivers/huawei/hypermetro.py | 32 +- cinder/volume/drivers/huawei/replication.py | 22 +- cinder/volume/drivers/huawei/rest_client.py | 84 +-- cinder/volume/drivers/huawei/smartx.py | 4 +- .../volume/drivers/ibm/flashsystem_common.py | 39 +- cinder/volume/drivers/ibm/flashsystem_fc.py | 15 +- .../volume/drivers/ibm/flashsystem_iscsi.py | 15 +- cinder/volume/drivers/ibm/gpfs.py | 60 +-- .../drivers/ibm/ibm_storage/certificate.py | 4 +- .../ibm/ibm_storage/ds8k_connection.py | 8 +- .../drivers/ibm/ibm_storage/ds8k_helper.py | 73 ++- .../drivers/ibm/ibm_storage/ds8k_proxy.py | 36 +- .../ibm/ibm_storage/ds8k_replication.py | 69 +-- .../volume/drivers/ibm/ibm_storage/proxy.py | 8 +- .../drivers/ibm/ibm_storage/xiv_proxy.py | 139 +++-- .../drivers/ibm/storwize_svc/replication.py | 14 +- .../ibm/storwize_svc/storwize_svc_common.py | 233 ++++---- .../ibm/storwize_svc/storwize_svc_fc.py | 28 +- .../ibm/storwize_svc/storwize_svc_iscsi.py | 23 +- .../infortrend/raidcmd_cli/cli_factory.py | 13 +- .../infortrend/raidcmd_cli/common_cli.py | 121 +++-- .../drivers/kaminario/kaminario_common.py | 54 +- .../volume/drivers/kaminario/kaminario_fc.py | 8 +- .../drivers/kaminario/kaminario_iscsi.py | 4 +- cinder/volume/drivers/lvm.py | 38 +- cinder/volume/drivers/nec/volume_helper.py | 114 ++-- cinder/volume/drivers/netapp/common.py | 12 +- .../drivers/netapp/dataontap/block_7mode.py | 18 +- .../drivers/netapp/dataontap/block_base.py | 75 ++- .../drivers/netapp/dataontap/client/api.py | 4 +- .../netapp/dataontap/client/client_7mode.py | 8 +- .../netapp/dataontap/client/client_base.py | 20 +- .../netapp/dataontap/client/client_cmode.py | 41 +- .../drivers/netapp/dataontap/nfs_base.py | 78 +-- .../drivers/netapp/dataontap/nfs_cmode.py | 42 +- .../dataontap/performance/perf_7mode.py | 11 +- .../netapp/dataontap/performance/perf_base.py | 6 +- .../dataontap/performance/perf_cmode.py | 11 +- .../netapp/dataontap/utils/capabilities.py | 14 +- .../netapp/dataontap/utils/data_motion.py | 8 +- .../volume/drivers/netapp/eseries/client.py | 5 +- .../volume/drivers/netapp/eseries/library.py | 166 +++--- cinder/volume/drivers/netapp/utils.py | 28 +- cinder/volume/drivers/nexenta/iscsi.py | 80 ++- cinder/volume/drivers/nexenta/nfs.py | 117 ++-- cinder/volume/drivers/nexenta/ns5/iscsi.py | 29 +- cinder/volume/drivers/nexenta/ns5/nfs.py | 28 +- cinder/volume/drivers/nfs.py | 42 +- cinder/volume/drivers/nimble.py | 150 +++--- cinder/volume/drivers/prophetstor/dpl_fc.py | 46 +- .../volume/drivers/prophetstor/dpl_iscsi.py | 9 +- .../volume/drivers/prophetstor/dplcommon.py | 106 ++-- cinder/volume/drivers/pure.py | 106 ++-- cinder/volume/drivers/qnap.py | 10 +- cinder/volume/drivers/quobyte.py | 54 +- cinder/volume/drivers/rbd.py | 38 +- cinder/volume/drivers/reduxio/rdx_cli_api.py | 12 +- .../drivers/reduxio/rdx_iscsi_driver.py | 52 +- cinder/volume/drivers/remotefs.py | 72 +-- cinder/volume/drivers/san/san.py | 4 +- cinder/volume/drivers/sheepdog.py | 81 ++- cinder/volume/drivers/solidfire.py | 94 ++-- .../drivers/synology/synology_common.py | 70 ++- .../volume/drivers/synology/synology_iscsi.py | 15 +- cinder/volume/drivers/tegile.py | 14 +- cinder/volume/drivers/tintri.py | 59 +-- cinder/volume/drivers/violin/v7000_common.py | 34 +- cinder/volume/drivers/violin/v7000_fcp.py | 14 +- cinder/volume/drivers/violin/v7000_iscsi.py | 16 +- cinder/volume/drivers/vmware/datastore.py | 3 +- cinder/volume/drivers/vmware/vmdk.py | 162 +++--- cinder/volume/drivers/vmware/volumeops.py | 51 +- cinder/volume/drivers/vzstorage.py | 14 +- cinder/volume/drivers/windows/smbfs.py | 14 +- cinder/volume/drivers/xio.py | 111 ++-- cinder/volume/drivers/zadara.py | 20 +- cinder/volume/drivers/zfssa/restclient.py | 14 +- cinder/volume/drivers/zfssa/webdavclient.py | 12 +- cinder/volume/drivers/zfssa/zfssaiscsi.py | 65 ++- cinder/volume/drivers/zfssa/zfssanfs.py | 66 +-- cinder/volume/drivers/zfssa/zfssarest.py | 28 +- cinder/volume/drivers/zte/zte_ks.py | 15 +- cinder/volume/flows/api/create_volume.py | 33 +- cinder/volume/flows/api/manage_existing.py | 7 +- cinder/volume/flows/common.py | 12 +- cinder/volume/flows/manager/create_volume.py | 90 ++-- .../volume/flows/manager/manage_existing.py | 12 +- .../flows/manager/manage_existing_snapshot.py | 34 +- cinder/volume/group_types.py | 14 +- cinder/volume/manager.py | 501 +++++++++--------- cinder/volume/qos_specs.py | 22 +- cinder/volume/targets/cxt.py | 37 +- cinder/volume/targets/iet.py | 35 +- cinder/volume/targets/iscsi.py | 20 +- cinder/volume/targets/lio.py | 33 +- cinder/volume/targets/scst.py | 46 +- cinder/volume/targets/tgt.py | 53 +- cinder/volume/throttling.py | 11 +- cinder/volume/utils.py | 22 +- cinder/volume/volume_types.py | 10 +- .../brocade/brcd_fc_san_lookup_service.py | 6 +- .../brocade/brcd_fc_zone_client_cli.py | 22 +- .../drivers/brocade/brcd_fc_zone_driver.py | 34 +- .../brocade/brcd_http_fc_zone_client.py | 4 +- .../cisco/cisco_fc_san_lookup_service.py | 7 +- .../drivers/cisco/cisco_fc_zone_client_cli.py | 22 +- .../drivers/cisco/cisco_fc_zone_driver.py | 27 +- cinder/zonemanager/drivers/driver_utils.py | 12 +- cinder/zonemanager/fc_san_lookup_service.py | 4 +- cinder/zonemanager/fc_zone_manager.py | 44 +- cinder/zonemanager/utils.py | 15 +- 233 files changed, 4768 insertions(+), 5071 deletions(-) diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py index 083e1c5198c..bef2621207c 100644 --- a/cinder/api/__init__.py +++ b/cinder/api/__init__.py @@ -19,8 +19,6 @@ from oslo_config import cfg from oslo_log import log as logging import paste.urlmap -from cinder.i18n import _LW - CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -28,7 +26,7 @@ LOG = logging.getLogger(__name__) def root_app_factory(loader, global_conf, **local_conf): if CONF.enable_v1_api: - LOG.warning(_LW('The v1 api is deprecated and is not under active ' - 'development. You should set enable_v1_api=false ' - 'and enable_v3_api=true in your cinder.conf file.')) + LOG.warning('The v1 api is deprecated and is not under active ' + 'development. You should set enable_v1_api=false ' + 'and enable_v3_api=true in your cinder.conf file.') return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py index e40459c6a14..93b50aef290 100644 --- a/cinder/api/contrib/backups.py +++ b/cinder/api/contrib/backups.py @@ -28,7 +28,7 @@ from cinder.api.openstack import wsgi from cinder.api.views import backups as backup_views from cinder import backup as backupAPI from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) @@ -59,7 +59,7 @@ class BackupsController(wsgi.Controller): LOG.debug('Delete called for member %s.', id) context = req.environ['cinder.context'] - LOG.info(_LI('Delete backup with id: %s'), id) + LOG.info('Delete backup with id: %s', id) try: backup = self.backup_api.get(context, id) @@ -141,8 +141,8 @@ class BackupsController(wsgi.Controller): incremental = backup.get('incremental', False) force = backup.get('force', False) snapshot_id = backup.get('snapshot_id', None) - LOG.info(_LI("Creating backup of volume %(volume_id)s in container" - " %(container)s"), + LOG.info("Creating backup of volume %(volume_id)s in container" + " %(container)s", {'volume_id': volume_id, 'container': container}, context=context) @@ -173,7 +173,7 @@ class BackupsController(wsgi.Controller): volume_id = restore.get('volume_id', None) name = restore.get('name', None) - LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"), + LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s", {'backup_id': id, 'volume_id': volume_id}, context=context) diff --git a/cinder/api/contrib/cgsnapshots.py b/cinder/api/contrib/cgsnapshots.py index b466f8a135a..a4a027eb564 100644 --- a/cinder/api/contrib/cgsnapshots.py +++ b/cinder/api/contrib/cgsnapshots.py @@ -28,7 +28,7 @@ from cinder.api.views import cgsnapshots as cgsnapshot_views from cinder import consistencygroup as consistencygroup_api from cinder import exception from cinder import group as group_api -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder.objects import cgsnapshot as cgsnap_obj from cinder.objects import consistencygroup as cg_obj from cinder.objects import group as grp_obj @@ -62,7 +62,7 @@ class CgsnapshotsController(wsgi.Controller): LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] - LOG.info(_LI('Delete cgsnapshot with id: %s'), id) + LOG.info('Delete cgsnapshot with id: %s', id) try: cgsnapshot = self._get_cgsnapshot(context, id) @@ -167,7 +167,7 @@ class CgsnapshotsController(wsgi.Controller): name = cgsnapshot.get('name', None) description = cgsnapshot.get('description', None) - LOG.info(_LI("Creating cgsnapshot %(name)s."), + LOG.info("Creating cgsnapshot %(name)s.", {'name': name}, context=context) diff --git a/cinder/api/contrib/consistencygroups.py b/cinder/api/contrib/consistencygroups.py index d1568ed0a18..02e055fd8ea 100644 --- a/cinder/api/contrib/consistencygroups.py +++ b/cinder/api/contrib/consistencygroups.py @@ -28,7 +28,7 @@ from cinder.api.views import consistencygroups as consistencygroup_views from cinder import consistencygroup as consistencygroup_api from cinder import exception from cinder import group as group_api -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder.objects import cgsnapshot as cgsnap_obj from cinder.objects import consistencygroup as cg_obj from cinder.objects import group as grp_obj @@ -77,7 +77,7 @@ class ConsistencyGroupsController(wsgi.Controller): msg = _("Invalid value '%s' for force.") % force raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI('Delete consistency group with id: %s'), id) + LOG.info('Delete consistency group with id: %s', id) try: group = self._get(context, id) @@ -181,7 +181,7 @@ class ConsistencyGroupsController(wsgi.Controller): group_types.DEFAULT_CGSNAPSHOT_TYPE) raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI("Creating consistency group %(name)s."), + LOG.info("Creating consistency group %(name)s.", {'name': name}) try: @@ -232,12 +232,12 @@ class ConsistencyGroupsController(wsgi.Controller): raise exc.HTTPBadRequest(explanation=msg) if cgsnapshot_id: - LOG.info(_LI("Creating consistency group %(name)s from " - "cgsnapshot %(snap)s."), + LOG.info("Creating consistency group %(name)s from " + "cgsnapshot %(snap)s.", {'name': name, 'snap': cgsnapshot_id}) elif source_cgid: - LOG.info(_LI("Creating consistency group %(name)s from " - "source consistency group %(source_cgid)s."), + LOG.info("Creating consistency group %(name)s from " + "source consistency group %(source_cgid)s.", {'name': name, 'source_cgid': source_cgid}) try: @@ -282,9 +282,9 @@ class ConsistencyGroupsController(wsgi.Controller): def _update(self, context, id, name, description, add_volumes, remove_volumes, allow_empty=False): - LOG.info(_LI("Updating consistency group %(id)s with name %(name)s " - "description: %(description)s add_volumes: " - "%(add_volumes)s remove_volumes: %(remove_volumes)s."), + LOG.info("Updating consistency group %(id)s with name %(name)s " + "description: %(description)s add_volumes: " + "%(add_volumes)s remove_volumes: %(remove_volumes)s.", {'id': id, 'name': name, 'description': description, diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py index a1855bf8e38..4fc08b0752c 100644 --- a/cinder/api/contrib/hosts.py +++ b/cinder/api/contrib/hosts.py @@ -25,7 +25,7 @@ from cinder.api.openstack import wsgi from cinder.common import constants from cinder import db from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import objects from cinder.volume import api as volume_api @@ -120,7 +120,7 @@ class HostController(wsgi.Controller): """Sets the specified host's ability to accept new volumes.""" context = req.environ['cinder.context'] state = "enabled" if enabled else "disabled" - LOG.info(_LI("Setting host %(host)s to %(state)s."), + LOG.info("Setting host %(host)s to %(state)s.", {'host': host, 'state': state}) result = self.api.set_host_enabled(context, host=host, diff --git a/cinder/api/contrib/snapshot_actions.py b/cinder/api/contrib/snapshot_actions.py index e15e9428c1f..fd8cb7a0107 100644 --- a/cinder/api/contrib/snapshot_actions.py +++ b/cinder/api/contrib/snapshot_actions.py @@ -18,7 +18,7 @@ import webob from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import objects from cinder.objects import fields @@ -94,7 +94,7 @@ class SnapshotActionsController(wsgi.Controller): update_dict.update({'progress': progress}) - LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"), + LOG.info("Updating snapshot %(id)s with info %(dict)s", {'id': id, 'dict': update_dict}) current_snapshot.update(update_dict) diff --git a/cinder/api/contrib/snapshot_unmanage.py b/cinder/api/contrib/snapshot_unmanage.py index edd315d6293..a594a458cd7 100644 --- a/cinder/api/contrib/snapshot_unmanage.py +++ b/cinder/api/contrib/snapshot_unmanage.py @@ -20,7 +20,6 @@ from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import exception -from cinder.i18n import _LI from cinder import volume LOG = logging.getLogger(__name__) @@ -49,7 +48,7 @@ class SnapshotUnmanageController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context) - LOG.info(_LI("Unmanage snapshot with id: %s"), id) + LOG.info("Unmanage snapshot with id: %s", id) try: snapshot = self.volume_api.get_snapshot(context, id) diff --git a/cinder/api/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py index 210fbae4f1b..ff81fccb096 100644 --- a/cinder/api/contrib/types_extra_specs.py +++ b/cinder/api/contrib/types_extra_specs.py @@ -27,7 +27,7 @@ from cinder.api.openstack import wsgi from cinder import context as ctxt from cinder import db from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder import rpc from cinder import utils from cinder.volume import volume_types @@ -80,10 +80,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller): expl = _('Volume Type is currently in use.') raise webob.exc.HTTPBadRequest(explanation=expl) else: - msg = _LW("The option 'allow_inuse_volume_type_modification' " - "is deprecated and will be removed in a future " - "release. The default behavior going forward will " - "be to disallow modificaton of in-use types.") + msg = ("The option 'allow_inuse_volume_type_modification' " + "is deprecated and will be removed in a future " + "release. The default behavior going forward will " + "be to disallow modificaton of in-use types.") versionutils.report_deprecated_feature(LOG, msg) return diff --git a/cinder/api/contrib/volume_transfer.py b/cinder/api/contrib/volume_transfer.py index e4389173bdd..e55c42a0146 100644 --- a/cinder/api/contrib/volume_transfer.py +++ b/cinder/api/contrib/volume_transfer.py @@ -23,7 +23,7 @@ from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import transfers as transfer_view from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import transfer as transferAPI LOG = logging.getLogger(__name__) @@ -95,7 +95,7 @@ class VolumeTransferController(wsgi.Controller): remove_whitespaces=True) name = name.strip() - LOG.info(_LI("Creating transfer of volume %s"), + LOG.info("Creating transfer of volume %s", volume_id) try: @@ -124,7 +124,7 @@ class VolumeTransferController(wsgi.Controller): msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI("Accepting transfer %s"), transfer_id) + LOG.info("Accepting transfer %s", transfer_id) try: accepted_transfer = self.transfer_api.accept(context, transfer_id, @@ -144,7 +144,7 @@ class VolumeTransferController(wsgi.Controller): """Delete a transfer.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete transfer with id: %s"), id) + LOG.info("Delete transfer with id: %s", id) # Not found exception will be handled at the wsgi level self.transfer_api.delete(context, transfer_id=id) diff --git a/cinder/api/contrib/volume_unmanage.py b/cinder/api/contrib/volume_unmanage.py index 59bbf55eb29..c16651527c7 100644 --- a/cinder/api/contrib/volume_unmanage.py +++ b/cinder/api/contrib/volume_unmanage.py @@ -18,7 +18,6 @@ import webob from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.i18n import _LI from cinder import volume LOG = logging.getLogger(__name__) @@ -50,7 +49,7 @@ class VolumeUnmanageController(wsgi.Controller): context = req.environ['cinder.context'] authorize(context) - LOG.info(_LI("Unmanage volume with id: %s"), id) + LOG.info("Unmanage volume with id: %s", id) # Not found exception will be handled at the wsgi level vol = self.volume_api.get(context, id) diff --git a/cinder/api/extensions.py b/cinder/api/extensions.py index bda8d9754db..3733bc30147 100644 --- a/cinder/api/extensions.py +++ b/cinder/api/extensions.py @@ -25,7 +25,6 @@ import webob.exc import cinder.api.openstack from cinder.api.openstack import wsgi from cinder import exception -from cinder.i18n import _LE, _LI, _LW import cinder.policy @@ -123,7 +122,7 @@ class ExtensionManager(object): """ def __init__(self): - LOG.info(_LI('Initializing extension manager.')) + LOG.info('Initializing extension manager.') self.cls_list = CONF.osapi_volume_extension self.extensions = {} @@ -138,7 +137,7 @@ class ExtensionManager(object): return alias = ext.alias - LOG.info(_LI('Loaded extension: %s'), alias) + LOG.info('Loaded extension: %s', alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) @@ -182,7 +181,7 @@ class ExtensionManager(object): ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext updated: %s', extension.updated) except AttributeError: - LOG.exception(_LE("Exception loading extension.")) + LOG.exception("Exception loading extension.") return False return True @@ -214,8 +213,8 @@ class ExtensionManager(object): try: self.load_extension(ext_factory) except Exception as exc: - LOG.warning(_LW('Failed to load extension %(ext_factory)s: ' - '%(exc)s'), + LOG.warning('Failed to load extension %(ext_factory)s: ' + '%(exc)s', {'ext_factory': ext_factory, 'exc': exc}) @@ -288,8 +287,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): try: ext_mgr.load_extension(classpath) except Exception as exc: - logger.warning(_LW('Failed to load extension %(classpath)s: ' - '%(exc)s'), + logger.warning('Failed to load extension %(classpath)s: ' + '%(exc)s', {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... @@ -313,8 +312,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): try: ext(ext_mgr) except Exception as exc: - logger.warning(_LW('Failed to load extension ' - '%(ext_name)s: %(exc)s'), + logger.warning('Failed to load extension ' + '%(ext_name)s: %(exc)s', {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... diff --git a/cinder/api/middleware/fault.py b/cinder/api/middleware/fault.py index 9aa9e6456b7..d2158b47b34 100644 --- a/cinder/api/middleware/fault.py +++ b/cinder/api/middleware/fault.py @@ -21,7 +21,7 @@ import webob.exc from cinder.api.openstack import wsgi from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import utils from cinder.wsgi import common as base_wsgi @@ -44,7 +44,7 @@ class FaultWrapper(base_wsgi.Middleware): def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): - LOG.exception(_LE("Caught error: %(type)s %(error)s"), + LOG.exception("Caught error: %(type)s %(error)s", {'type': type(inner), 'error': inner}) safe = getattr(inner, 'safe', False) @@ -54,7 +54,7 @@ class FaultWrapper(base_wsgi.Middleware): status = 500 msg_dict = dict(url=req.url, status=status) - LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) + LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py index 1d7152b5b10..2294135d136 100644 --- a/cinder/api/openstack/__init__.py +++ b/cinder/api/openstack/__init__.py @@ -23,7 +23,7 @@ from oslo_service import wsgi as base_wsgi import routes from cinder.api.openstack import wsgi -from cinder.i18n import _, _LW +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -111,8 +111,8 @@ class APIRouter(base_wsgi.Router): controller = extension.controller if collection not in self.resources: - LOG.warning(_LW('Extension %(ext_name)s: Cannot extend ' - 'resource %(collection)s: No such resource'), + LOG.warning('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource', {'ext_name': extension.extension.name, 'collection': collection}) continue diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py index f37f2b9f239..2a0040a068d 100644 --- a/cinder/api/openstack/wsgi.py +++ b/cinder/api/openstack/wsgi.py @@ -32,7 +32,7 @@ from cinder.api.openstack import api_version_request as api_version from cinder.api.openstack import versioned_method from cinder import exception from cinder import i18n -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import policy from cinder import utils from cinder.wsgi import common as wsgi @@ -602,15 +602,14 @@ class ResourceExceptionHandler(object): code=ex_value.code, explanation=six.text_type(ex_value))) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) - LOG.error(_LE( - 'Exception handling resource: %s'), - ex_value, exc_info=exc_info) + LOG.error('Exception handling resource: %s', + ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): - LOG.info(_LI("Fault thrown: %s"), ex_value) + LOG.info("Fault thrown: %s", ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): - LOG.info(_LI("HTTP exception thrown: %s"), ex_value) + LOG.info("HTTP exception thrown: %s", ex_value) raise Fault(ex_value) # We didn't handle the exception @@ -812,7 +811,7 @@ class Resource(wsgi.Application): def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" - LOG.info(_LI("%(method)s %(url)s"), + LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) @@ -934,10 +933,10 @@ class Resource(wsgi.Application): try: msg_dict = dict(url=request.url, status=response.status_int) - msg = _LI("%(url)s returned with HTTP %(status)d") + msg = "%(url)s returned with HTTP %(status)d" except AttributeError as e: msg_dict = dict(url=request.url, e=e) - msg = _LI("%(url)s returned a fault: %(e)s") + msg = "%(url)s returned a fault: %(e)s" LOG.info(msg, msg_dict) @@ -972,7 +971,7 @@ class Resource(wsgi.Application): 'create', 'delete', 'update']): - LOG.exception(_LE('Get method error.')) + LOG.exception('Get method error.') else: ctxt.reraise = False else: diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py index b5ed4a27c69..467c911a408 100644 --- a/cinder/api/v2/snapshots.py +++ b/cinder/api/v2/snapshots.py @@ -25,7 +25,7 @@ from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.views import snapshots as snapshot_views from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import utils from cinder import volume from cinder.volume import utils as volume_utils @@ -58,7 +58,7 @@ class SnapshotsController(wsgi.Controller): """Delete a snapshot.""" context = req.environ['cinder.context'] - LOG.info(_LI("Delete snapshot with id: %s"), id) + LOG.info("Delete snapshot with id: %s", id) # Not found exception will be handled at the wsgi level snapshot = self.volume_api.get_snapshot(context, id) @@ -127,8 +127,7 @@ class SnapshotsController(wsgi.Controller): volume = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) - msg = _LI("Create snapshot from volume %s") - LOG.info(msg, volume_id) + LOG.info("Create snapshot from volume %s", volume_id) self.validate_name_and_description(snapshot) # NOTE(thingee): v2 API allows name instead of display_name diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py index 86ae02c74ef..c09ad0c1966 100644 --- a/cinder/api/v2/volumes.py +++ b/cinder/api/v2/volumes.py @@ -28,7 +28,7 @@ from cinder.api.v2.views import volumes as volume_views from cinder import consistencygroup as consistencygroupAPI from cinder import exception from cinder import group as group_api -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder import utils @@ -70,7 +70,7 @@ class VolumeController(wsgi.Controller): cascade = utils.get_bool_param('cascade', req.params) - LOG.info(_LI("Delete volume with id: %s"), id) + LOG.info("Delete volume with id: %s", id) # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) @@ -257,7 +257,7 @@ class VolumeController(wsgi.Controller): elif size is None and kwargs['source_replica'] is not None: size = kwargs['source_replica']['size'] - LOG.info(_LI("Create volume of %s GB"), size) + LOG.info("Create volume of %s GB", size) if self.ext_mgr.is_loaded('os-image-create'): image_ref = volume.get('imageRef') diff --git a/cinder/api/v3/group_snapshots.py b/cinder/api/v3/group_snapshots.py index adb36a815d9..468a08497b8 100644 --- a/cinder/api/v3/group_snapshots.py +++ b/cinder/api/v3/group_snapshots.py @@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi from cinder.api.v3.views import group_snapshots as group_snapshot_views from cinder import exception from cinder import group as group_api -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import rpc from cinder.volume import group_types @@ -72,7 +72,7 @@ class GroupSnapshotsController(wsgi.Controller): LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] - LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context) + LOG.info('Delete group_snapshot with id: %s', id, context=context) try: group_snapshot = self.group_snapshot_api.get_group_snapshot( @@ -160,7 +160,7 @@ class GroupSnapshotsController(wsgi.Controller): name = group_snapshot.get('name', None) description = group_snapshot.get('description', None) - LOG.info(_LI("Creating group_snapshot %(name)s."), + LOG.info("Creating group_snapshot %(name)s.", {'name': name}, context=context) diff --git a/cinder/api/v3/groups.py b/cinder/api/v3/groups.py index ad681df3e74..16c8765013a 100644 --- a/cinder/api/v3/groups.py +++ b/cinder/api/v3/groups.py @@ -26,7 +26,7 @@ from cinder.api.openstack import wsgi from cinder.api.v3.views import groups as views_groups from cinder import exception from cinder import group as group_api -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import rpc from cinder.volume import group_types @@ -134,7 +134,7 @@ class GroupsController(wsgi.Controller): % del_vol) raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI('Delete group with id: %s'), id, + LOG.info('Delete group with id: %s', id, context=context) try: @@ -217,7 +217,7 @@ class GroupsController(wsgi.Controller): raise exc.HTTPBadRequest(explanation=msg) availability_zone = group.get('availability_zone') - LOG.info(_LI("Creating group %(name)s."), + LOG.info("Creating group %(name)s.", {'name': name}, context=context) @@ -268,16 +268,16 @@ class GroupsController(wsgi.Controller): group_type_id = None if group_snapshot_id: - LOG.info(_LI("Creating group %(name)s from group_snapshot " - "%(snap)s."), + LOG.info("Creating group %(name)s from group_snapshot " + "%(snap)s.", {'name': name, 'snap': group_snapshot_id}, context=context) grp_snap = self.group_api.get_group_snapshot(context, group_snapshot_id) group_type_id = grp_snap.group_type_id elif source_group_id: - LOG.info(_LI("Creating group %(name)s from " - "source group %(source_group_id)s."), + LOG.info("Creating group %(name)s from " + "source group %(source_group_id)s.", {'name': name, 'source_group_id': source_group_id}, context=context) source_group = self.group_api.get(context, source_group_id) @@ -341,9 +341,9 @@ class GroupsController(wsgi.Controller): "can not be all empty in the request body.") raise exc.HTTPBadRequest(explanation=msg) - LOG.info(_LI("Updating group %(id)s with name %(name)s " - "description: %(description)s add_volumes: " - "%(add_volumes)s remove_volumes: %(remove_volumes)s."), + LOG.info("Updating group %(id)s with name %(name)s " + "description: %(description)s add_volumes: " + "%(add_volumes)s remove_volumes: %(remove_volumes)s.", {'id': id, 'name': name, 'description': description, 'add_volumes': add_volumes, diff --git a/cinder/api/v3/volumes.py b/cinder/api/v3/volumes.py index 27cc590ad7b..16753e0aaea 100644 --- a/cinder/api/v3/volumes.py +++ b/cinder/api/v3/volumes.py @@ -25,8 +25,8 @@ from cinder.api.v2 import volumes as volumes_v2 from cinder.api.v3.views import volumes as volume_views_v3 from cinder import exception from cinder import group as group_api +from cinder.i18n import _ from cinder import objects -from cinder.i18n import _, _LI import cinder.policy from cinder import utils @@ -70,8 +70,8 @@ class VolumeController(volumes_v2.VolumeController): params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade, 'f': force} - msg = _LI("Delete volume with id: %(id)s %(params)s") - LOG.info(msg, {'id': id, 'params': params}, context=context) + LOG.info("Delete volume with id: %(id)s %(params)s", + {'id': id, 'params': params}, context=context) if force: check_policy(context, 'force_delete') @@ -264,7 +264,7 @@ class VolumeController(volumes_v2.VolumeController): elif size is None and kwargs['source_replica'] is not None: size = kwargs['source_replica']['size'] - LOG.info(_LI("Create volume of %s GB"), size) + LOG.info("Create volume of %s GB", size) if self.ext_mgr.is_loaded('os-image-create'): image_ref = volume.get('imageRef') diff --git a/cinder/backup/api.py b/cinder/backup/api.py index 9c7fc30980b..6ad8a56020b 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -33,7 +33,7 @@ from cinder.common import constants from cinder import context from cinder.db import base from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder import objects from cinder.objects import fields import cinder.policy @@ -354,8 +354,8 @@ class API(base.Base): description = 'auto-created_from_restore_from_backup' - LOG.info(_LI("Creating volume of %(size)s GB for restore of " - "backup %(backup_id)s."), + LOG.info("Creating volume of %(size)s GB for restore of " + "backup %(backup_id)s.", {'size': size, 'backup_id': backup_id}) volume = self.volume_api.create(context, size, name, description) volume_id = volume['id'] @@ -380,8 +380,8 @@ class API(base.Base): {'volume_size': volume['size'], 'size': size}) raise exception.InvalidVolume(reason=msg) - LOG.info(_LI("Overwriting volume %(volume_id)s with restore of " - "backup %(backup_id)s"), + LOG.info("Overwriting volume %(volume_id)s with restore of " + "backup %(backup_id)s", {'volume_id': volume_id, 'backup_id': backup_id}) # Setting the status here rather than setting at start and unrolling diff --git a/cinder/backup/chunkeddriver.py b/cinder/backup/chunkeddriver.py index 595986ace8e..5dc64a30aea 100644 --- a/cinder/backup/chunkeddriver.py +++ b/cinder/backup/chunkeddriver.py @@ -36,7 +36,7 @@ import six from cinder.backup import driver from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import utils as volume_utils @@ -572,10 +572,9 @@ class ChunkedBackupDriver(driver.BackupDriver): try: self._backup_metadata(backup, object_meta) # Whatever goes wrong, we want to log, cleanup, and re-raise. - except Exception as err: + except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Backup volume metadata failed: %s."), - err) + LOG.exception("Backup volume metadata failed.") self.delete(backup) self._finalize_backup(backup, container, object_meta, object_sha256) @@ -635,9 +634,8 @@ class ChunkedBackupDriver(driver.BackupDriver): try: fileno = volume_file.fileno() except IOError: - LOG.info(_LI("volume_file does not support " - "fileno() so skipping " - "fsync()")) + LOG.info("volume_file does not support fileno() so skipping " + "fsync()") else: os.fsync(fileno) @@ -722,8 +720,8 @@ class ChunkedBackupDriver(driver.BackupDriver): try: object_names = self._generate_object_names(backup) except Exception: - LOG.warning(_LW('Error while listing objects, continuing' - ' with delete.')) + LOG.warning('Error while listing objects, continuing' + ' with delete.') for object_name in object_names: self.delete_object(container, object_name) diff --git a/cinder/backup/driver.py b/cinder/backup/driver.py index a42ab979bd7..3d636fa79a2 100644 --- a/cinder/backup/driver.py +++ b/cinder/backup/driver.py @@ -24,7 +24,7 @@ import six from cinder.db import base from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder import keymgr as key_manager service_opts = [ @@ -64,7 +64,7 @@ class BackupMetadataAPI(base.Base): try: jsonutils.dumps(value) except TypeError: - LOG.info(_LI("Value with type=%s is not serializable"), + LOG.info("Value with type=%s is not serializable", type(value)) return False @@ -84,8 +84,8 @@ class BackupMetadataAPI(base.Base): for key, value in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(value): - LOG.info(_LI("Unable to serialize field '%s' - excluding " - "from backup"), key) + LOG.info("Unable to serialize field '%s' - excluding " + "from backup", key) continue # Copy the encryption key uuid for backup if key is 'encryption_key_id' and value is not None: @@ -112,8 +112,8 @@ class BackupMetadataAPI(base.Base): for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(meta[entry]): - LOG.info(_LI("Unable to serialize field '%s' - excluding " - "from backup"), entry) + LOG.info("Unable to serialize field '%s' - excluding " + "from backup", entry) continue container[type_tag][entry] = meta[entry] @@ -136,8 +136,8 @@ class BackupMetadataAPI(base.Base): for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(entry.value): - LOG.info(_LI("Unable to serialize field '%s' - " - "excluding from backup"), entry) + LOG.info("Unable to serialize field '%s' - " + "excluding from backup", entry) continue container[type_tag][entry.key] = entry.value @@ -234,9 +234,9 @@ class BackupMetadataAPI(base.Base): else: # Volume type id's do not match, and destination volume # has a volume type. Throw exception. - LOG.warning(_LW("Destination volume type is different from " - "source volume type for an encrypted volume. " - "Encrypted backup restore has failed.")) + LOG.warning("Destination volume type is different from " + "source volume type for an encrypted volume. " + "Encrypted backup restore has failed.") msg = (_("The source volume type '%(src)s' is different " "than the destination volume type '%(dest)s'.") % {'src': src_volume_type_id, diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py index 2a644dd1538..88a1605b9ea 100644 --- a/cinder/backup/drivers/ceph.py +++ b/cinder/backup/drivers/ceph.py @@ -58,7 +58,7 @@ from six.moves import range from cinder.backup import driver from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils import cinder.volume.drivers.rbd as rbd_driver @@ -181,8 +181,8 @@ class CephBackupDriver(driver.BackupDriver): self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit self.rbd_stripe_count = CONF.backup_ceph_stripe_count else: - LOG.info(_LI("RBD striping not supported - ignoring configuration " - "settings for rbd striping")) + LOG.info("RBD striping not supported - ignoring configuration " + "settings for rbd striping.") self.rbd_stripe_count = 0 self.rbd_stripe_unit = 0 @@ -258,8 +258,8 @@ class CephBackupDriver(driver.BackupDriver): # moved to the driver's initialization so that it can stop # the service from starting when the underyling RBD does not # support the requested features. - LOG.error(_LE("RBD journaling not supported - unable to " - "support per image mirroring in backup pool")) + LOG.error("RBD journaling not supported - unable to " + "support per image mirroring in backup pool") raise exception.BackupInvalidCephArgs( _("Image Journaling set but RBD backend does " "not support journaling") @@ -468,14 +468,14 @@ class CephBackupDriver(driver.BackupDriver): backup.id) if rem: LOG.info( - _LI("Backup base image of volume %(volume)s still " - "has %(snapshots)s snapshots so skipping base " - "image delete."), + "Backup base image of volume %(volume)s still " + "has %(snapshots)s snapshots so skipping base " + "image delete.", {'snapshots': rem, 'volume': volume_id}) return - LOG.info(_LI("Deleting backup base image='%(basename)s' of " - "volume %(volume)s."), + LOG.info("Deleting backup base image='%(basename)s' of " + "volume %(volume)s.", {'basename': base_name, 'volume': volume_id}) # Delete base if no more snapshots try: @@ -483,17 +483,16 @@ class CephBackupDriver(driver.BackupDriver): except self.rbd.ImageBusy: # Allow a retry if the image is busy if retries > 0: - LOG.info(_LI("Backup image of volume %(volume)s is " - "busy, retrying %(retries)s more time(s) " - "in %(delay)ss."), + LOG.info("Backup image of volume %(volume)s is " + "busy, retrying %(retries)s more time(s) " + "in %(delay)ss.", {'retries': retries, 'delay': delay, 'volume': volume_id}) eventlet.sleep(delay) else: - LOG.error(_LE("Max retries reached deleting backup " - "%(basename)s image of volume " - "%(volume)s."), + LOG.error("Max retries reached deleting backup " + "%(basename)s image of volume %(volume)s.", {'volume': volume_id, 'basename': base_name}) raise @@ -527,7 +526,7 @@ class CephBackupDriver(driver.BackupDriver): p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: - LOG.error(_LE("Pipe1 failed - %s "), e) + LOG.error("Pipe1 failed - %s ", e) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work @@ -541,7 +540,7 @@ class CephBackupDriver(driver.BackupDriver): stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: - LOG.error(_LE("Pipe2 failed - %s "), e) + LOG.error("Pipe2 failed - %s ", e) raise p1.stdout.close() @@ -1005,8 +1004,7 @@ class CephBackupDriver(driver.BackupDriver): dest_user=rbd_user, dest_conf=rbd_conf, src_snap=restore_point) except exception.BackupRBDOperationFailed: - LOG.exception(_LE("Differential restore failed, trying full " - "restore")) + LOG.exception("Differential restore failed, trying full restore") raise # If the volume we are restoring to is larger than the backup volume, @@ -1108,10 +1106,9 @@ class CephBackupDriver(driver.BackupDriver): else: LOG.debug("Volume file is NOT RBD.") else: - LOG.info(_LI("No restore point found for backup=" - "'%(backup)s' of volume %(volume)s " - "although base image is found - " - "forcing full copy."), + LOG.info("No restore point found for backup='%(backup)s' of " + "volume %(volume)s although base image is found - " + "forcing full copy.", {'backup': backup.id, 'volume': backup.volume_id}) return False, restore_point @@ -1196,8 +1193,8 @@ class CephBackupDriver(driver.BackupDriver): LOG.debug('Restore to volume %s finished successfully.', volume_id) except exception.BackupOperationError as e: - LOG.error(_LE('Restore to volume %(volume)s finished with error - ' - '%(error)s.'), {'error': e, 'volume': volume_id}) + LOG.error('Restore to volume %(volume)s finished with error - ' + '%(error)s.', {'error': e, 'volume': volume_id}) raise def delete(self, backup): @@ -1209,8 +1206,8 @@ class CephBackupDriver(driver.BackupDriver): self._try_delete_base_image(backup) except self.rbd.ImageNotFound: LOG.warning( - _LW("RBD image for backup %(backup)s of volume %(volume)s " - "not found. Deleting backup metadata."), + "RBD image for backup %(backup)s of volume %(volume)s " + "not found. Deleting backup metadata.", {'backup': backup.id, 'volume': backup.volume_id}) delete_failed = True @@ -1218,9 +1215,8 @@ class CephBackupDriver(driver.BackupDriver): VolumeMetadataBackup(client, backup.id).remove_if_exists() if delete_failed: - LOG.info(_LI("Delete of backup '%(backup)s' " - "for volume '%(volume)s' " - "finished with warning."), + LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' " + "finished with warning.", {'backup': backup.id, 'volume': backup.volume_id}) else: LOG.debug("Delete of backup '%(backup)s' for volume " diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index d47b79970fe..e2d3f5150b9 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -55,7 +55,6 @@ from swiftclient import client as swift from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ -from cinder.i18n import _LE from cinder import interface LOG = logging.getLogger(__name__) @@ -215,8 +214,8 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver): self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure if CONF.backup_swift_auth == 'single_user': if CONF.backup_swift_user is None: - LOG.error(_LE("single_user auth mode enabled, " - "but %(param)s not set"), + LOG.error("single_user auth mode enabled, " + "but %(param)s not set", {'param': 'backup_swift_user'}) raise exception.ParameterNotFound(param='backup_swift_user') os_options = {} diff --git a/cinder/backup/drivers/tsm.py b/cinder/backup/drivers/tsm.py index aab738dc797..ca306e4f79c 100644 --- a/cinder/backup/drivers/tsm.py +++ b/cinder/backup/drivers/tsm.py @@ -35,7 +35,7 @@ from oslo_log import log as logging from cinder.backup import driver from cinder import exception -from cinder.i18n import _LE, _ +from cinder.i18n import _ from cinder import interface from cinder import utils @@ -250,9 +250,9 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id): hardlink_path, run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_LE('backup: %(vol_id)s failed to remove backup hardlink ' - 'from %(vpath)s to %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s.'), + LOG.error('backup: %(vol_id)s failed to remove backup hardlink ' + 'from %(vpath)s to %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s.', {'vol_id': volume_id, 'vpath': volume_path, 'bpath': hardlink_path, @@ -523,8 +523,8 @@ class TSMBackupDriver(driver.BackupDriver): # log error if tsm cannot delete the backup object # but do not raise exception so that cinder backup # object can be removed. - LOG.error(_LE('delete: %(vol_id)s failed with ' - 'stdout: %(out)s\n stderr: %(err)s'), + LOG.error('delete: %(vol_id)s failed with ' + 'stdout: %(out)s\n stderr: %(err)s', {'vol_id': backup.volume_id, 'out': out, 'err': err}) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index c3d8a0c1854..d8ec76d02bf 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -42,7 +42,7 @@ from cinder.backup import driver from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import manager from cinder import objects from cinder.objects import fields @@ -117,7 +117,7 @@ class BackupManager(manager.ThreadPoolManager): LOG.debug("Got backend '%s'.", backend) return backend - LOG.info(_LI("Backend not found in hostname (%s) so using default."), + LOG.info("Backend not found in hostname (%s) so using default.", host) if 'default' not in self.volume_managers: @@ -168,15 +168,15 @@ class BackupManager(manager.ThreadPoolManager): self.volume_managers['default'] = default def _init_volume_driver(self, ctxt, driver): - LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."), + LOG.info("Starting volume driver %(driver_name)s (%(version)s).", {'driver_name': driver.__class__.__name__, 'version': driver.get_version()}) try: driver.do_setup(ctxt) driver.check_for_setup_error() except Exception: - LOG.exception(_LE("Error encountered during initialization of " - "driver: %(name)s."), + LOG.exception("Error encountered during initialization of " + "driver: %(name)s.", {'name': driver.__class__.__name__}) # we don't want to continue since we failed # to initialize the driver correctly. @@ -213,8 +213,7 @@ class BackupManager(manager.ThreadPoolManager): self._cleanup_incomplete_backup_operations(ctxt) except Exception: # Don't block startup of the backup service. - LOG.exception(_LE("Problem cleaning incomplete backup " - "operations.")) + LOG.exception("Problem cleaning incomplete backup operations.") def reset(self): super(BackupManager, self).reset() @@ -222,7 +221,7 @@ class BackupManager(manager.ThreadPoolManager): self.volume_rpcapi = volume_rpcapi.VolumeAPI() def _cleanup_incomplete_backup_operations(self, ctxt): - LOG.info(_LI("Cleaning up incomplete backup operations.")) + LOG.info("Cleaning up incomplete backup operations.") # TODO(smulcahy) implement full resume of backup and restore # operations on restart (rather than simply resetting) @@ -231,35 +230,35 @@ class BackupManager(manager.ThreadPoolManager): try: self._cleanup_one_backup(ctxt, backup) except Exception: - LOG.exception(_LE("Problem cleaning up backup %(bkup)s."), + LOG.exception("Problem cleaning up backup %(bkup)s.", {'bkup': backup['id']}) try: self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt, backup) except Exception: - LOG.exception(_LE("Problem cleaning temp volumes and " - "snapshots for backup %(bkup)s."), + LOG.exception("Problem cleaning temp volumes and " + "snapshots for backup %(bkup)s.", {'bkup': backup['id']}) def _cleanup_one_volume(self, ctxt, volume): if volume['status'] == 'backing-up': self._detach_all_attachments(ctxt, volume) - LOG.info(_LI('Resetting volume %(vol_id)s to previous ' - 'status %(status)s (was backing-up).'), + LOG.info('Resetting volume %(vol_id)s to previous ' + 'status %(status)s (was backing-up).', {'vol_id': volume['id'], 'status': volume['previous_status']}) self.db.volume_update(ctxt, volume['id'], {'status': volume['previous_status']}) elif volume['status'] == 'restoring-backup': self._detach_all_attachments(ctxt, volume) - LOG.info(_LI('setting volume %s to error_restoring ' - '(was restoring-backup).'), volume['id']) + LOG.info('Setting volume %s to error_restoring ' + '(was restoring-backup).', volume['id']) self.db.volume_update(ctxt, volume['id'], {'status': 'error_restoring'}) def _cleanup_one_backup(self, ctxt, backup): if backup['status'] == fields.BackupStatus.CREATING: - LOG.info(_LI('Resetting backup %s to error (was creating).'), + LOG.info('Resetting backup %s to error (was creating).', backup['id']) volume = objects.Volume.get_by_id(ctxt, backup.volume_id) @@ -268,8 +267,8 @@ class BackupManager(manager.ThreadPoolManager): err = 'incomplete backup reset on manager restart' self._update_backup_error(backup, err) elif backup['status'] == fields.BackupStatus.RESTORING: - LOG.info(_LI('Resetting backup %s to ' - 'available (was restoring).'), + LOG.info('Resetting backup %s to ' + 'available (was restoring).', backup['id']) volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id) self._cleanup_one_volume(ctxt, volume) @@ -277,7 +276,7 @@ class BackupManager(manager.ThreadPoolManager): backup.status = fields.BackupStatus.AVAILABLE backup.save() elif backup['status'] == fields.BackupStatus.DELETING: - LOG.info(_LI('Resuming delete on backup: %s.'), backup['id']) + LOG.info('Resuming delete on backup: %s.', backup['id']) if CONF.backup_service_inithost_offload: # Offload all the pending backup delete operations to the # threadpool to prevent the main backup service thread @@ -296,8 +295,7 @@ class BackupManager(manager.ThreadPoolManager): rpcapi = self.volume_rpcapi rpcapi.detach_volume(ctxt, volume, attachment['id']) except Exception: - LOG.exception(_LE("Detach attachment %(attach_id)s" - " failed."), + LOG.exception("Detach attachment %(attach_id)s failed.", {'attach_id': attachment['id']}, resource=volume) @@ -359,8 +357,8 @@ class BackupManager(manager.ThreadPoolManager): volume_id = backup.volume_id volume = objects.Volume.get_by_id(context, volume_id) previous_status = volume.get('previous_status', None) - LOG.info(_LI('Create backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s.'), + LOG.info('Create backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, "create.start") @@ -417,7 +415,7 @@ class BackupManager(manager.ThreadPoolManager): backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() - LOG.info(_LI('Create backup finished. backup: %s.'), backup.id) + LOG.info('Create backup finished. backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, "create.end") def _run_backup(self, context, backup, volume): @@ -457,8 +455,8 @@ class BackupManager(manager.ThreadPoolManager): def restore_backup(self, context, backup, volume_id): """Restore volume backups from configured backup service.""" - LOG.info(_LI('Restore backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s.'), + LOG.info('Restore backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) @@ -490,9 +488,9 @@ class BackupManager(manager.ThreadPoolManager): raise exception.InvalidBackup(reason=err) if volume['size'] > backup['size']: - LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is ' - 'larger than backup: %(backup_id)s, ' - 'size: %(backup_size)d, continuing with restore.'), + LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is ' + 'larger than backup: %(backup_id)s, ' + 'size: %(backup_size)d, continuing with restore.', {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], @@ -525,8 +523,8 @@ class BackupManager(manager.ThreadPoolManager): self.db.volume_update(context, volume_id, {'status': 'available'}) backup.status = fields.BackupStatus.AVAILABLE backup.save() - LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored' - ' to volume %(volume_id)s.'), + LOG.info('Restore backup finished, backup %(backup_id)s restored' + ' to volume %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, "restore.end") @@ -557,7 +555,7 @@ class BackupManager(manager.ThreadPoolManager): def delete_backup(self, context, backup): """Delete volume backup from configured backup service.""" - LOG.info(_LI('Delete backup started, backup: %s.'), backup.id) + LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, "delete.start") backup.host = self.host @@ -604,7 +602,7 @@ class BackupManager(manager.ThreadPoolManager): **reserve_opts) except Exception: reservations = None - LOG.exception(_LE("Failed to update usages deleting backup")) + LOG.exception("Failed to update usages deleting backup") backup.destroy() # If this backup is incremental backup, handle the @@ -620,7 +618,7 @@ class BackupManager(manager.ThreadPoolManager): QUOTAS.commit(context, reservations, project_id=backup.project_id) - LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id) + LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, "delete.end") def _notify_about_backup_usage(self, @@ -646,7 +644,7 @@ class BackupManager(manager.ThreadPoolManager): :returns: 'backup_service' describing the needed driver. :raises: InvalidBackup """ - LOG.info(_LI('Export record started, backup: %s.'), backup.id) + LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status @@ -680,7 +678,7 @@ class BackupManager(manager.ThreadPoolManager): msg = six.text_type(err) raise exception.InvalidBackup(reason=msg) - LOG.info(_LI('Export record finished, backup %s exported.'), backup.id) + LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record def import_record(self, @@ -699,7 +697,7 @@ class BackupManager(manager.ThreadPoolManager): :raises: InvalidBackup :raises: ServiceNotFound """ - LOG.info(_LI('Import record started, backup_url: %s.'), backup_url) + LOG.info('Import record started, backup_url: %s.', backup_url) # Can we import this backup? if (backup_service != self.driver_name): @@ -783,9 +781,9 @@ class BackupManager(manager.ThreadPoolManager): if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: - LOG.warning(_LW('Backup service %(service)s does not ' - 'support verify. Backup id %(id)s is ' - 'not verified. Skipping verify.'), + LOG.warning('Backup service %(service)s does not ' + 'support verify. Backup id %(id)s is ' + 'not verified. Skipping verify.', {'service': self.driver_name, 'id': backup.id}) except exception.InvalidBackup as err: @@ -796,8 +794,8 @@ class BackupManager(manager.ThreadPoolManager): backup.update({"status": fields.BackupStatus.AVAILABLE}) backup.save() - LOG.info(_LI('Import record id %s metadata from driver ' - 'finished.'), backup.id) + LOG.info('Import record id %s metadata from driver ' + 'finished.', backup.id) def reset_status(self, context, backup, status): """Reset volume backup status. @@ -809,13 +807,13 @@ class BackupManager(manager.ThreadPoolManager): :raises: BackupVerifyUnsupportedDriver :raises: AttributeError """ - LOG.info(_LI('Reset backup status started, backup_id: ' - '%(backup_id)s, status: %(status)s.'), + LOG.info('Reset backup status started, backup_id: ' + '%(backup_id)s, status: %(status)s.', {'backup_id': backup.id, 'status': status}) backup_service_name = self._map_service_to_driver(backup.service) - LOG.info(_LI('Backup service: %s.'), backup_service_name) + LOG.info('Backup service: %s.', backup_service_name) if backup_service_name is not None: configured_service = self.driver_name if backup_service_name != configured_service: @@ -857,14 +855,14 @@ class BackupManager(manager.ThreadPoolManager): backup.save() except exception.InvalidBackup: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Backup id %s is not invalid. " - "Skipping reset."), backup.id) + LOG.error("Backup id %s is not invalid. Skipping reset.", + backup.id) except exception.BackupVerifyUnsupportedDriver: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Backup service %(configured_service)s ' - 'does not support verify. Backup id ' - '%(id)s is not verified. ' - 'Skipping verify.'), + LOG.error('Backup service %(configured_service)s ' + 'does not support verify. Backup id ' + '%(id)s is not verified. ' + 'Skipping verify.', {'configured_service': self.driver_name, 'id': backup.id}) except AttributeError: @@ -882,8 +880,8 @@ class BackupManager(manager.ThreadPoolManager): self._cleanup_temp_volumes_snapshots_for_one_backup( context, backup) except Exception: - LOG.exception(_LE("Problem cleaning temp volumes and " - "snapshots for backup %(bkup)s."), + LOG.exception("Problem cleaning temp volumes and " + "snapshots for backup %(bkup)s.", {'bkup': backup.id}) # send notification to ceilometer @@ -928,9 +926,9 @@ class BackupManager(manager.ThreadPoolManager): properties, force=True) except Exception: - LOG.warning(_LW("Failed to terminate the connection " - "of volume %(volume_id)s, but it is " - "acceptable."), + LOG.warning("Failed to terminate the connection " + "of volume %(volume_id)s, but it is " + "acceptable.", {'volume_id', volume.id}) def _connect_device(self, conn): diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index dbe7667d217..e4e44bed340 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -28,7 +28,6 @@ from oslo_utils import excutils from six import moves from cinder import exception -from cinder.i18n import _LE, _LI from cinder import utils @@ -97,14 +96,14 @@ class LVM(executor.Executor): try: self._create_vg(physical_volumes) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error creating Volume Group')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error creating Volume Group') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) if self._vg_exists() is False: - LOG.error(_LE('Unable to locate Volume Group %s'), vg_name) + LOG.error('Unable to locate Volume Group %s', vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name) # NOTE: we assume that the VG has been activated outside of Cinder @@ -180,10 +179,10 @@ class LVM(executor.Executor): free_space = pool_size - consumed_space free_space = round(free_space, 2) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error querying thin pool about data_percent')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error querying thin pool about data_percent') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) return free_space @@ -300,8 +299,8 @@ class LVM(executor.Executor): with excutils.save_and_reraise_exception(reraise=True) as ctx: if "not found" in err.stderr or "Failed to find" in err.stderr: ctx.reraise = False - LOG.info(_LI("Logical Volume not found when querying " - "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"), + LOG.info("Logical Volume not found when querying " + "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s", {'vg': vg_name, 'lv': lv_name}) out = None @@ -416,7 +415,7 @@ class LVM(executor.Executor): vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) if len(vg_list) != 1: - LOG.error(_LE('Unable to find VG: %s'), self.vg_name) + LOG.error('Unable to find VG: %s', self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name) self.vg_size = float(vg_list[0]['size']) @@ -503,9 +502,9 @@ class LVM(executor.Executor): """ if not self.supports_thin_provisioning(self._root_helper): - LOG.error(_LE('Requested to setup thin provisioning, ' - 'however current LVM version does not ' - 'support it.')) + LOG.error('Requested to setup thin provisioning, ' + 'however current LVM version does not ' + 'support it.') return None if name is None: @@ -563,11 +562,11 @@ class LVM(executor.Executor): root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error creating Volume')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) - LOG.error(_LE('Current state: %s'), self.get_all_volume_groups()) + LOG.exception('Error creating Volume') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) + LOG.error('Current state: %s', self.get_all_volume_groups()) raise @utils.retry(putils.ProcessExecutionError) @@ -581,7 +580,7 @@ class LVM(executor.Executor): """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: - LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"), + LOG.error("Trying to create snapshot by non-existent LV: %s", source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', @@ -595,10 +594,10 @@ class LVM(executor.Executor): root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error creating snapshot')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error creating snapshot') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise def _mangle_lv_name(self, name): @@ -629,10 +628,10 @@ class LVM(executor.Executor): root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error deactivating LV')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error deactivating LV') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise # Wait until lv is deactivated to return in @@ -686,10 +685,10 @@ class LVM(executor.Executor): root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error activating LV')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error activating LV') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise @utils.retry(putils.ProcessExecutionError) @@ -813,10 +812,10 @@ class LVM(executor.Executor): self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error extending Volume')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error extending Volume') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise def vg_mirror_free_space(self, mirror_count): @@ -851,8 +850,8 @@ class LVM(executor.Executor): root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: - LOG.exception(_LE('Error renaming logical volume')) - LOG.error(_LE('Cmd :%s'), err.cmd) - LOG.error(_LE('StdOut :%s'), err.stdout) - LOG.error(_LE('StdErr :%s'), err.stderr) + LOG.exception('Error renaming logical volume') + LOG.error('Cmd :%s', err.cmd) + LOG.error('StdOut :%s', err.stdout) + LOG.error('StdErr :%s', err.stderr) raise diff --git a/cinder/cmd/volume.py b/cinder/cmd/volume.py index b845104b1a2..88e4123c913 100644 --- a/cinder/cmd/volume.py +++ b/cinder/cmd/volume.py @@ -46,7 +46,7 @@ i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder.db import api as session -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import service from cinder import utils from cinder import version @@ -109,9 +109,9 @@ def main(): launcher.launch_service(server) service_started = True else: - LOG.error(_LE('Configuration for cinder-volume does not specify ' - '"enabled_backends". Using DEFAULT section to configure ' - 'drivers is not supported since Ocata.')) + LOG.error('Configuration for cinder-volume does not specify ' + '"enabled_backends". Using DEFAULT section to configure ' + 'drivers is not supported since Ocata.') if not service_started: msg = _('No volume service(s) started successfully, terminating.') diff --git a/cinder/cmd/volume_usage_audit.py b/cinder/cmd/volume_usage_audit.py index 8616d2fdd4c..144f3b87dcb 100644 --- a/cinder/cmd/volume_usage_audit.py +++ b/cinder/cmd/volume_usage_audit.py @@ -48,7 +48,7 @@ from oslo_log import log as logging from cinder import i18n i18n.enable_lazy() from cinder import context -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils @@ -104,7 +104,7 @@ def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context): cinder.volume.utils.notify_about_volume_usage( admin_context, volume_ref, 'exists', extra_usage_info=extra_info) except Exception as exc_msg: - LOG.error(_LE("Exists volume notification failed: %s"), + LOG.error("Exists volume notification failed: %s", exc_msg, resource=volume_ref) @@ -119,7 +119,7 @@ def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context): cinder.volume.utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'exists', extra_info) except Exception as exc_msg: - LOG.error(_LE("Exists snapshot notification failed: %s"), + LOG.error("Exists snapshot notification failed: %s", exc_msg, resource=snapshot_ref) @@ -134,7 +134,7 @@ def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context): 'project_id': backup_ref.project_id, 'extra_info': extra_info}) except Exception as exc_msg: - LOG.error(_LE("Exists backups notification failed: %s"), exc_msg) + LOG.error("Exists backups notification failed: %s", exc_msg) def _create_action(obj_ref, admin_context, LOG, notify_about_usage, @@ -155,7 +155,7 @@ def _create_action(obj_ref, admin_context, LOG, notify_about_usage, notify_about_usage(admin_context, obj_ref, 'create.end', extra_usage_info=local_extra_info) except Exception as exc_msg: - LOG.error(_LE("Create %(type)s notification failed: %(exc_msg)s"), + LOG.error("Create %(type)s notification failed: %(exc_msg)s", {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) @@ -177,7 +177,7 @@ def _delete_action(obj_ref, admin_context, LOG, notify_about_usage, notify_about_usage(admin_context, obj_ref, 'delete.end', extra_usage_info=local_extra_info) except Exception as exc_msg: - LOG.error(_LE("Delete %(type)s notification failed: %(exc_msg)s"), + LOG.error("Delete %(type)s notification failed: %(exc_msg)s", {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) @@ -206,9 +206,9 @@ def main(): begin, end = utils.last_completed_audit_period() begin, end = _time_error(LOG, begin, end) - LOG.info(_LI("Starting volume usage audit")) - msg = _LI("Creating usages for %(begin_period)s until %(end_period)s") - LOG.info(msg, {"begin_period": str(begin), "end_period": str(end)}) + LOG.info("Starting volume usage audit") + LOG.info("Creating usages for %(begin_period)s until %(end_period)s", + {"begin_period": begin, "end_period": end}) extra_info = { 'audit_period_beginning': str(begin), @@ -219,7 +219,7 @@ def main(): begin, end) - LOG.info(_LI("Found %d volumes"), len(volumes)) + LOG.info("Found %d volumes", len(volumes)) for volume_ref in volumes: _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info, admin_context, begin, end, @@ -228,7 +228,7 @@ def main(): snapshots = objects.SnapshotList.get_all_active_by_window(admin_context, begin, end) - LOG.info(_LI("Found %d snapshots"), len(snapshots)) + LOG.info("Found %d snapshots", len(snapshots)) for snapshot_ref in snapshots: _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info, admin_context, begin, @@ -238,10 +238,10 @@ def main(): backups = objects.BackupList.get_all_active_by_window(admin_context, begin, end) - LOG.info(_LI("Found %d backups"), len(backups)) + LOG.info("Found %d backups", len(backups)) for backup_ref in backups: _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info, admin_context, begin, end, cinder.volume.utils.notify_about_backup_usage, "backup_id", "backup") - LOG.info(_LI("Volume usage audit completed")) + LOG.info("Volume usage audit completed") diff --git a/cinder/common/sqlalchemyutils.py b/cinder/common/sqlalchemyutils.py index c24628cb57c..4b4e211a89b 100644 --- a/cinder/common/sqlalchemyutils.py +++ b/cinder/common/sqlalchemyutils.py @@ -27,7 +27,7 @@ from sqlalchemy.sql import type_api from cinder.db import api from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -96,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None, if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id - LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) + LOG.warning('Id not in sort_keys; is sort_keys unique?') assert(not (sort_dir and sort_dirs)) diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py index cf2833e4351..f5d9d3a5271 100644 --- a/cinder/consistencygroup/api.py +++ b/cinder/consistencygroup/api.py @@ -28,7 +28,7 @@ from oslo_utils import timeutils from cinder import db from cinder.db import base from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import fields as c_fields import cinder.policy @@ -110,8 +110,7 @@ class API(base.Base): valid = self._valid_availability_zone(availability_zone) if not valid: - msg = _LW( - "Availability zone '%s' is invalid") % (availability_zone) + msg = _("Availability zone '%s' is invalid.") % availability_zone LOG.warning(msg) raise exception.InvalidInput(reason=msg) @@ -148,8 +147,8 @@ class API(base.Base): group.create() except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating consistency group" - " %s."), name) + LOG.error("Error occurred when creating consistency group " + "%s.", name) request_spec_list = [] filter_properties_list = [] @@ -189,19 +188,19 @@ class API(base.Base): group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid) except exception.ConsistencyGroupNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Source CG %(source_cg)s not found when " - "creating consistency group %(cg)s from " - "source."), + LOG.error("Source CG %(source_cg)s not found when " + "creating consistency group %(cg)s from " + "source.", {'cg': name, 'source_cg': source_cgid}) except exception.CgSnapshotNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE("CG snapshot %(cgsnap)s not found when creating " - "consistency group %(cg)s from source."), + LOG.error("CG snapshot %(cgsnap)s not found when creating " + "consistency group %(cg)s from source.", {'cg': name, 'cgsnap': cgsnapshot_id}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating consistency group" - " %(cg)s from cgsnapshot %(cgsnap)s."), + LOG.error("Error occurred when creating consistency group" + " %(cg)s from cgsnapshot %(cgsnap)s.", {'cg': name, 'cgsnap': cgsnapshot_id}) # Update quota for consistencygroups @@ -257,10 +256,10 @@ class API(base.Base): **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating volume " - "entry from snapshot in the process of " - "creating consistency group %(group)s " - "from cgsnapshot %(cgsnap)s."), + LOG.error("Error occurred when creating volume " + "entry from snapshot in the process of " + "creating consistency group %(group)s " + "from cgsnapshot %(cgsnap)s.", {'group': group.id, 'cgsnap': cgsnapshot.id}) except Exception: @@ -268,9 +267,9 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when creating consistency " - "group %(group)s from cgsnapshot " - "%(cgsnap)s."), + LOG.error("Error occurred when creating consistency " + "group %(group)s from cgsnapshot " + "%(cgsnap)s.", {'group': group.id, 'cgsnap': cgsnapshot.id}) @@ -321,10 +320,10 @@ class API(base.Base): **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating cloned " - "volume in the process of creating " - "consistency group %(group)s from " - "source CG %(source_cg)s."), + LOG.error("Error occurred when creating cloned " + "volume in the process of creating " + "consistency group %(group)s from " + "source CG %(source_cg)s.", {'group': group.id, 'source_cg': source_cg.id}) except Exception: @@ -332,9 +331,9 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when creating consistency " - "group %(group)s from source CG " - "%(source_cg)s."), + LOG.error("Error occurred when creating consistency " + "group %(group)s from source CG " + "%(source_cg)s.", {'group': group.id, 'source_cg': source_cg.id}) @@ -390,9 +389,9 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when building " - "request spec list for consistency group " - "%s."), group.id) + LOG.error("Error occurred when building " + "request spec list for consistency group " + "%s.", group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. @@ -418,8 +417,8 @@ class API(base.Base): quota_utils.process_reserve_over_quota( context, e, resource='groups') finally: - LOG.error(_LE("Failed to update quota for " - "consistency group %s."), group.id) + LOG.error("Failed to update quota for " + "consistency group %s.", group.id) @wrap_check_policy def delete(self, context, group, force=False): @@ -749,8 +748,8 @@ class API(base.Base): if cgsnapshot.obj_attr_is_set('id'): cgsnapshot.destroy() finally: - LOG.error(_LE("Error occurred when creating cgsnapshot" - " %s."), cgsnapshot_id) + LOG.error("Error occurred when creating cgsnapshot" + " %s.", cgsnapshot_id) self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot) diff --git a/cinder/context.py b/cinder/context.py index 6d6274c8304..babfc43dc8f 100644 --- a/cinder/context.py +++ b/cinder/context.py @@ -25,7 +25,7 @@ from oslo_log import log as logging from oslo_utils import timeutils import six -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder import policy context_opts = [ @@ -214,6 +214,6 @@ def get_internal_tenant_context(): project_id=project_id, is_admin=True) else: - LOG.warning(_LW('Unable to get internal tenant context: Missing ' - 'required config parameters.')) + LOG.warning('Unable to get internal tenant context: Missing ' + 'required config parameters.') return None diff --git a/cinder/coordination.py b/cinder/coordination.py index b4dc212fd6c..8023de94956 100644 --- a/cinder/coordination.py +++ b/cinder/coordination.py @@ -32,7 +32,7 @@ from tooz import coordination from tooz import locking from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ LOG = log.getLogger(__name__) @@ -94,9 +94,9 @@ class Coordinator(object): self._ev = eventlet.spawn( lambda: tpool.execute(self.heartbeat)) except coordination.ToozError: - LOG.exception(_LE('Error starting coordination backend.')) + LOG.exception('Error starting coordination backend.') raise - LOG.info(_LI('Coordination backend started successfully.')) + LOG.info('Coordination backend started successfully.') def stop(self): """Disconnect from coordination backend and stop heartbeat.""" @@ -154,17 +154,17 @@ class Coordinator(object): self.coordinator.heartbeat() return True except coordination.ToozConnectionError: - LOG.exception(_LE('Connection error while sending a heartbeat ' - 'to coordination backend.')) + LOG.exception('Connection error while sending a heartbeat ' + 'to coordination backend.') raise except coordination.ToozError: - LOG.exception(_LE('Error sending a heartbeat to coordination ' - 'backend.')) + LOG.exception('Error sending a heartbeat to coordination ' + 'backend.') return False def _reconnect(self): """Reconnect with jittered exponential backoff increase.""" - LOG.info(_LI('Reconnecting to coordination backend.')) + LOG.info('Reconnecting to coordination backend.') cap = cfg.CONF.coordination.max_reconnect_backoff backoff = base = cfg.CONF.coordination.initial_reconnect_backoff for attempt in itertools.count(1): @@ -173,11 +173,11 @@ class Coordinator(object): break except coordination.ToozError: backoff = min(cap, random.uniform(base, backoff * 3)) - msg = _LW('Reconnect attempt %(attempt)s failed. ' - 'Next try in %(backoff).2fs.') + msg = ('Reconnect attempt %(attempt)s failed. ' + 'Next try in %(backoff).2fs.') LOG.warning(msg, {'attempt': attempt, 'backoff': backoff}) self._dead.wait(backoff) - LOG.info(_LI('Reconnected to coordination backend.')) + LOG.info('Reconnected to coordination backend.') COORDINATOR = Coordinator(prefix='cinder-') diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 6feaf1e25a0..1149d61c564 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -57,7 +57,7 @@ from cinder.common import sqlalchemyutils from cinder import db from cinder.db.sqlalchemy import models from cinder import exception -from cinder.i18n import _, _LW, _LE, _LI +from cinder.i18n import _ from cinder.objects import fields from cinder import utils @@ -120,7 +120,7 @@ def get_backend(): def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: - LOG.warning(_LW('Use of empty request context is deprecated'), + LOG.warning('Use of empty request context is deprecated', DeprecationWarning) raise Exception('die') return context.is_admin @@ -234,8 +234,8 @@ def _retry_on_deadlock(f): try: return f(*args, **kwargs) except db_exc.DBDeadlock: - LOG.warning(_LW("Deadlock detected when running " - "'%(func_name)s': Retrying..."), + LOG.warning("Deadlock detected when running " + "'%(func_name)s': Retrying...", dict(func_name=f.__name__)) # Retry! time.sleep(0.5) @@ -1277,8 +1277,8 @@ def quota_reserve(context, resources, quotas, deltas, expire, usages[resource].reserved += delta if unders: - LOG.warning(_LW("Change will make usage less than 0 for the following " - "resources: %s"), unders) + LOG.warning("Change will make usage less than 0 for the following " + "resources: %s", unders) if overs: usages = {k: dict(in_use=v.in_use, reserved=v.reserved, allocated=allocated.get(k, 0)) @@ -3898,8 +3898,7 @@ def volume_type_destroy(context, id): session=session).filter( models.ConsistencyGroup.volume_type_id.contains(id)).count() if results or group_count or cg_count: - LOG.error(_LE('VolumeType %s deletion failed, ' - 'VolumeType in use.'), id) + LOG.error('VolumeType %s deletion failed, VolumeType in use.', id) raise exception.VolumeTypeInUse(volume_type_id=id) updated_values = {'deleted': True, 'deleted_at': utcnow, @@ -3929,8 +3928,8 @@ def group_type_destroy(context, id): # results = model_query(context, models.Group, session=session). \ # filter_by(group_type_id=id).all() # if results: - # LOG.error(_LE('GroupType %s deletion failed, ' - # 'GroupType in use.'), id) + # LOG.error('GroupType %s deletion failed, ' + # 'GroupType in use.', id) # raise exception.GroupTypeInUse(group_type_id=id) model_query(context, models.GroupTypes, session=session).\ filter_by(id=id).\ @@ -6086,9 +6085,9 @@ def purge_deleted_rows(context, age_in_days): for table in reversed(metadata.sorted_tables): if 'deleted' not in table.columns.keys(): continue - LOG.info(_LI('Purging deleted rows older than age=%(age)d days ' - 'from table=%(table)s'), {'age': age_in_days, - 'table': table}) + LOG.info('Purging deleted rows older than age=%(age)d days ' + 'from table=%(table)s', {'age': age_in_days, + 'table': table}) deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) try: with session.begin(): @@ -6104,14 +6103,14 @@ def purge_deleted_rows(context, age_in_days): table.delete() .where(table.c.deleted_at < deleted_age)) except db_exc.DBReferenceError as ex: - LOG.error(_LE('DBError detected when purging from ' - '%(tablename)s: %(error)s.'), - {'tablename': table, 'error': six.text_type(ex)}) + LOG.error('DBError detected when purging from ' + '%(tablename)s: %(error)s.', + {'tablename': table, 'error': ex}) raise rows_purged = result.rowcount if rows_purged != 0: - LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"), + LOG.info("Deleted %(row)d rows from table=%(table)s", {'row': rows_purged, 'table': table}) diff --git a/cinder/exception.py b/cinder/exception.py index 1f278fda334..6cc2090ded2 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -32,7 +32,7 @@ import webob.exc from webob.util import status_generic_reasons from webob.util import status_reasons -from cinder.i18n import _, _LE +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -108,9 +108,9 @@ class CinderException(Exception): exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation')) + LOG.exception('Exception in string format operation') for name, value in kwargs.items(): - LOG.error(_LE("%(name)s: %(value)s"), + LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: six.reraise(*exc_info) diff --git a/cinder/group/api.py b/cinder/group/api.py index 6c87b6f96db..e21dfa5a38d 100644 --- a/cinder/group/api.py +++ b/cinder/group/api.py @@ -29,7 +29,7 @@ from oslo_utils import uuidutils from cinder import db from cinder.db import base from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields as c_fields @@ -117,9 +117,8 @@ class API(base.Base): availability_zone = ( CONF.default_availability_zone or CONF.storage_availability_zone) - LOG.warning(_LW("Availability zone '%(s_az)s' " - "not found, falling back to " - "'%(s_fallback_az)s'."), + LOG.warning("Availability zone '%(s_az)s' not found, falling " + "back to '%(s_fallback_az)s'.", {'s_az': original_az, 's_fallback_az': availability_zone}) else: @@ -159,8 +158,8 @@ class API(base.Base): group.create() except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating group" - " %s."), name) + LOG.error("Error occurred when creating group" + " %s.", name) request_spec_list = [] filter_properties_list = [] @@ -222,19 +221,18 @@ class API(base.Base): source_group_id=source_group_id) except exception.GroupNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Source Group %(source_group)s not found when " - "creating group %(group)s from " - "source."), + LOG.error("Source Group %(source_group)s not found when " + "creating group %(group)s from source.", {'group': name, 'source_group': source_group_id}) except exception.GroupSnapshotNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Group snapshot %(group_snap)s not found when " - "creating group %(group)s from source."), + LOG.error("Group snapshot %(group_snap)s not found when " + "creating group %(group)s from source.", {'group': name, 'group_snap': group_snapshot_id}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating group" - " %(group)s from group_snapshot %(grp_snap)s."), + LOG.error("Error occurred when creating group" + " %(group)s from group_snapshot %(grp_snap)s.", {'group': name, 'grp_snap': group_snapshot_id}) # Update quota for groups @@ -286,9 +284,9 @@ class API(base.Base): except exception.GroupVolumeTypeMappingExists: # Only need to create one group volume_type mapping # entry for the same combination, skipping. - LOG.info(_LI("A mapping entry already exists for group" - " %(grp)s and volume type %(vol_type)s. " - "Do not need to create again."), + LOG.info("A mapping entry already exists for group" + " %(grp)s and volume type %(vol_type)s. " + "Do not need to create again.", {'grp': group.id, 'vol_type': volume_type_id}) pass @@ -306,10 +304,10 @@ class API(base.Base): **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating volume " - "entry from snapshot in the process of " - "creating group %(group)s " - "from group snapshot %(group_snap)s."), + LOG.error("Error occurred when creating volume " + "entry from snapshot in the process of " + "creating group %(group)s " + "from group snapshot %(group_snap)s.", {'group': group.id, 'group_snap': group_snapshot.id}) except Exception: @@ -317,9 +315,8 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when creating group " - "%(group)s from group snapshot " - "%(group_snap)s."), + LOG.error("Error occurred when creating group " + "%(group)s from group snapshot %(group_snap)s.", {'group': group.id, 'group_snap': group_snapshot.id}) @@ -364,9 +361,9 @@ class API(base.Base): except exception.GroupVolumeTypeMappingExists: # Only need to create one group volume_type mapping # entry for the same combination, skipping. - LOG.info(_LI("A mapping entry already exists for group" - " %(grp)s and volume type %(vol_type)s. " - "Do not need to create again."), + LOG.info("A mapping entry already exists for group" + " %(grp)s and volume type %(vol_type)s. " + "Do not need to create again.", {'grp': group.id, 'vol_type': volume_type_id}) pass @@ -384,10 +381,10 @@ class API(base.Base): **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when creating cloned " - "volume in the process of creating " - "group %(group)s from " - "source group %(source_group)s."), + LOG.error("Error occurred when creating cloned " + "volume in the process of creating " + "group %(group)s from " + "source group %(source_group)s.", {'group': group.id, 'source_group': source_group.id}) except Exception: @@ -395,9 +392,9 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when creating " - "group %(group)s from source group " - "%(source_group)s."), + LOG.error("Error occurred when creating " + "group %(group)s from source group " + "%(source_group)s.", {'group': group.id, 'source_group': source_group.id}) @@ -467,9 +464,8 @@ class API(base.Base): try: group.destroy() finally: - LOG.error(_LE("Error occurred when building " - "request spec list for group " - "%s."), group.id) + LOG.error("Error occurred when building request spec " + "list for group %s.", group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. @@ -497,8 +493,7 @@ class API(base.Base): quota_utils.process_reserve_over_quota( context, e, resource='groups') finally: - LOG.error(_LE("Failed to update quota for " - "group %s."), group.id) + LOG.error("Failed to update quota for group %s.", group.id) @wrap_check_policy def delete(self, context, group, delete_volumes=False): @@ -823,8 +818,8 @@ class API(base.Base): if group_snapshot.obj_attr_is_set('id'): group_snapshot.destroy() finally: - LOG.error(_LE("Error occurred when creating group_snapshot" - " %s."), group_snapshot_id) + LOG.error("Error occurred when creating group_snapshot" + " %s.", group_snapshot_id) self.volume_rpcapi.create_group_snapshot(context, group_snapshot) diff --git a/cinder/i18n.py b/cinder/i18n.py index b798d831437..ceb6fc25c85 100644 --- a/cinder/i18n.py +++ b/cinder/i18n.py @@ -27,16 +27,6 @@ _translators = i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical - def enable_lazy(enable=True): return i18n.enable_lazy(enable) diff --git a/cinder/image/cache.py b/cinder/image/cache.py index c24b7175bbd..bbea32fe752 100644 --- a/cinder/image/cache.py +++ b/cinder/image/cache.py @@ -19,7 +19,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils -from cinder.i18n import _LW from cinder import objects from cinder import rpc from cinder import utils @@ -176,8 +175,8 @@ class ImageVolumeCache(object): # to 0. if self.max_cache_size_gb > 0: if current_size > self.max_cache_size_gb > 0: - LOG.warning(_LW('Image-volume cache for %(service)s does ' - 'not have enough space (GB).'), + LOG.warning('Image-volume cache for %(service)s does ' + 'not have enough space (GB).', {'service': volume.service_topic_queue}) return False diff --git a/cinder/image/glance.py b/cinder/image/glance.py index f38dd157042..5da4cf9f8f7 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -36,7 +36,7 @@ from six.moves import range from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ glance_opts = [ @@ -198,9 +198,9 @@ class GlanceClientWrapper(object): except retry_excs as e: netloc = self.netloc extra = "retrying" - error_msg = _LE("Error contacting glance server " - "'%(netloc)s' for '%(method)s', " - "%(extra)s.") + error_msg = _("Error contacting glance server " + "'%(netloc)s' for '%(method)s', " + "%(extra)s.") if attempt == num_attempts: extra = 'done trying' LOG.exception(error_msg, {'netloc': netloc, diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index 5cebd9c0a3a..f5c8acbf1df 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -42,7 +42,7 @@ from oslo_utils import units import psutil from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume import throttling from cinder.volume import utils as volume_utils @@ -88,7 +88,7 @@ def get_qemu_img_version(): pattern = r"qemu-img version ([0-9\.]*)" version = re.match(pattern, info) if not version: - LOG.warning(_LW("qemu-img is not installed.")) + LOG.warning("qemu-img is not installed.") return None return _get_version_from_string(version.groups()[0]) @@ -149,8 +149,8 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True): image_size = qemu_img_info(source, run_as_root=run_as_root).virtual_size except ValueError as e: - msg = _LI("The image was successfully converted, but image size " - "is unavailable. src %(src)s, dest %(dest)s. %(error)s") + msg = ("The image was successfully converted, but image size " + "is unavailable. src %(src)s, dest %(dest)s. %(error)s") LOG.info(msg, {"src": source, "dest": dest, "error": e}) @@ -165,7 +165,7 @@ def _convert_image(prefix, source, dest, out_format, run_as_root=True): "duration": duration, "dest": dest}) - msg = _LI("Converted %(sz).2f MB image at %(mbps).2f MB/s") + msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) @@ -198,9 +198,9 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id): with excutils.save_and_reraise_exception(): if e.errno == errno.ENOSPC: # TODO(eharney): Fire an async error message for this - LOG.error(_LE("No space left in image_conversion_dir " - "path (%(path)s) while fetching " - "image %(image)s."), + LOG.error("No space left in image_conversion_dir " + "path (%(path)s) while fetching " + "image %(image)s.", {'path': os.path.dirname(path), 'image': image_id}) @@ -217,7 +217,7 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id): LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) - msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s") + msg = "Image download %(sz).2f MB at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) @@ -530,8 +530,8 @@ def cleanup_temporary_file(backend_name): path = os.path.join(temp_dir, tmp_file) os.remove(path) except OSError as e: - LOG.warning(_LW("Exception caught while clearing temporary image " - "files: %s"), e) + LOG.warning("Exception caught while clearing temporary image " + "files: %s", e) @contextlib.contextmanager diff --git a/cinder/keymgr/__init__.py b/cinder/keymgr/__init__.py index 8fc375f3042..3795ced3617 100644 --- a/cinder/keymgr/__init__.py +++ b/cinder/keymgr/__init__.py @@ -19,8 +19,6 @@ from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils -from cinder.i18n import _LW - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -46,13 +44,13 @@ def set_overrides(conf): try: api_class = conf.key_manager.api_class except cfg.NoSuchOptError: - LOG.warning(_LW("key_manager.api_class is not set, will use deprecated" - " option keymgr.api_class if set")) + LOG.warning("key_manager.api_class is not set, will use deprecated" + " option keymgr.api_class if set") try: api_class = CONF.keymgr.api_class should_override = True except cfg.NoSuchOptError: - LOG.warning(_LW("keymgr.api_class is not set")) + LOG.warning("keymgr.api_class is not set") deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager' barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager' @@ -72,7 +70,7 @@ def set_overrides(conf): should_override = True # TODO(kfarr): key_manager.api_class should be set in DevStack, and # this block can be removed - LOG.warning(_LW("key manager not set, using insecure default %s"), + LOG.warning("key manager not set, using insecure default %s", castellan_mock) api_class = castellan_mock diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py index e90db09ba6f..83771666cd0 100644 --- a/cinder/keymgr/conf_key_mgr.py +++ b/cinder/keymgr/conf_key_mgr.py @@ -39,7 +39,7 @@ from oslo_config import cfg from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ key_mgr_opts = [ @@ -67,8 +67,8 @@ class ConfKeyManager(key_manager.KeyManager): def __init__(self, configuration): if not ConfKeyManager.warning_logged: - LOG.warning(_LW('This key manager is insecure and is not ' - 'recommended for production deployments')) + LOG.warning('This key manager is insecure and is not ' + 'recommended for production deployments') ConfKeyManager.warning_logged = True super(ConfKeyManager, self).__init__(configuration) @@ -143,4 +143,4 @@ class ConfKeyManager(key_manager.KeyManager): raise exception.KeyManagerError( reason="cannot delete non-existent key") - LOG.warning(_LW("Not deleting key %s"), managed_object_id) + LOG.warning("Not deleting key %s", managed_object_id) diff --git a/cinder/manager.py b/cinder/manager.py index d5bc6ef25bf..fb7b98c37d2 100644 --- a/cinder/manager.py +++ b/cinder/manager.py @@ -62,7 +62,6 @@ from cinder import context from cinder import db from cinder.db import base from cinder import exception -from cinder.i18n import _LE, _LI, _LW from cinder import objects from cinder import rpc from cinder.scheduler import rpcapi as scheduler_rpcapi @@ -141,7 +140,7 @@ class Manager(base.Base, PeriodicTasks): We're utilizing it to reset RPC API version pins to avoid restart of the service when rolling upgrade is completed. """ - LOG.info(_LI('Resetting cached RPC version pins.')) + LOG.info('Resetting cached RPC version pins.') rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} @@ -198,9 +197,9 @@ class SchedulerDependentManager(ThreadPoolManager): # This means we have Newton's c-sch in the deployment, so # rpcapi cannot send the message. We can safely ignore the # error. Log it because it shouldn't happen after upgrade. - msg = _LW("Failed to notify about cinder-volume service " - "capabilities for host %(host)s. This is normal " - "during a live upgrade. Error: %(e)s") + msg = ("Failed to notify about cinder-volume service " + "capabilities for host %(host)s. This is normal " + "during a live upgrade. Error: %(e)s") LOG.warning(msg, {'host': self.host, 'e': e}) def reset(self): @@ -210,7 +209,7 @@ class SchedulerDependentManager(ThreadPoolManager): class CleanableManager(object): def do_cleanup(self, context, cleanup_request): - LOG.info(_LI('Initiating service %s cleanup'), + LOG.info('Initiating service %s cleanup', cleanup_request.service_id) # If the 'until' field in the cleanup request is not set, we default to @@ -264,8 +263,8 @@ class CleanableManager(object): 'exp_sts': clean.status, 'found_sts': vo.status}) else: - LOG.info(_LI('Cleaning %(type)s with id %(id)s and status ' - '%(status)s'), + LOG.info('Cleaning %(type)s with id %(id)s and status ' + '%(status)s', {'type': clean.resource_type, 'id': clean.resource_id, 'status': clean.status}, @@ -276,7 +275,7 @@ class CleanableManager(object): # of it keep_entry = self._do_cleanup(context, vo) except Exception: - LOG.exception(_LE('Could not perform cleanup.')) + LOG.exception('Could not perform cleanup.') # Return the worker DB entry to the original service db.worker_update(context, clean.id, service_id=original_service_id, @@ -288,10 +287,9 @@ class CleanableManager(object): # method doesn't want to keep the entry (for example for delayed # deletion). if not keep_entry and not db.worker_destroy(context, id=clean.id): - LOG.warning(_LW('Could not remove worker entry %s.'), clean.id) + LOG.warning('Could not remove worker entry %s.', clean.id) - LOG.info(_LI('Service %s cleanup completed.'), - cleanup_request.service_id) + LOG.info('Service %s cleanup completed.', cleanup_request.service_id) def _do_cleanup(self, ctxt, vo_resource): return False diff --git a/cinder/message/api.py b/cinder/message/api.py index ae464e2df40..b8025fa3709 100644 --- a/cinder/message/api.py +++ b/cinder/message/api.py @@ -19,7 +19,6 @@ from oslo_log import log as logging from oslo_utils import timeutils from cinder.db import base -from cinder.i18n import _LE, _LI from cinder.message import defined_messages @@ -39,7 +38,7 @@ class API(base.Base): def create(self, context, event_id, project_id, resource_type=None, resource_uuid=None, level="ERROR"): """Create a message with the specified information.""" - LOG.info(_LI("Creating message record for request_id = %s"), + LOG.info("Creating message record for request_id = %s", context.request_id) # Ensure valid event_id defined_messages.get_message_text(event_id) @@ -57,8 +56,8 @@ class API(base.Base): try: self.db.message_create(context, message_record) except Exception: - LOG.exception(_LE("Failed to create message record " - "for request_id %s"), context.request_id) + LOG.exception("Failed to create message record " + "for request_id %s", context.request_id) def get(self, context, id): """Return message with the specified id.""" diff --git a/cinder/objects/qos_specs.py b/cinder/objects/qos_specs.py index 691129d6128..2a87c090dcd 100644 --- a/cinder/objects/qos_specs.py +++ b/cinder/objects/qos_specs.py @@ -15,7 +15,7 @@ from oslo_log import log as logging from cinder import db from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields @@ -149,7 +149,7 @@ class QualityOfServiceSpecs(base.CinderPersistentObject, LOG.exception(msg) raise exception.Invalid(msg) except db_exc.DBError: - LOG.exception(_LE('DB error occurred when creating QoS specs.')) + LOG.exception('DB error occurred when creating QoS specs.') raise exception.QoSSpecsCreateFailed(name=self.name, qos_specs=self.specs) # Save ID with the object diff --git a/cinder/quota.py b/cinder/quota.py index ee88d21b49d..5452f1f0e11 100644 --- a/cinder/quota.py +++ b/cinder/quota.py @@ -29,7 +29,7 @@ import six from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import quota_utils @@ -1044,8 +1044,7 @@ class QuotaEngine(object): # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. - LOG.exception(_LE("Failed to commit " - "reservations %s"), reservations) + LOG.exception("Failed to commit reservations %s", reservations) def rollback(self, context, reservations, project_id=None): """Roll back reservations. @@ -1065,8 +1064,7 @@ class QuotaEngine(object): # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. - LOG.exception(_LE("Failed to roll back reservations " - "%s"), reservations) + LOG.exception("Failed to roll back reservations %s", reservations) def destroy_by_project(self, context, project_id): """Destroy all quota limits associated with a project. diff --git a/cinder/quota_utils.py b/cinder/quota_utils.py index eee4ddf2809..bda0a7b06a4 100644 --- a/cinder/quota_utils.py +++ b/cinder/quota_utils.py @@ -22,7 +22,7 @@ from keystoneclient import exceptions from cinder import db from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ CONF = cfg.CONF CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__', @@ -265,9 +265,9 @@ def process_reserve_over_quota(context, over_quota_exception, for over in overs: if 'gigabytes' in over: - msg = _LW("Quota exceeded for %(s_pid)s, tried to create " - "%(s_size)dG %(s_resource)s (%(d_consumed)dG of " - "%(d_quota)dG already consumed).") + msg = ("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)dG %(s_resource)s (%(d_consumed)dG of " + "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': size, 's_resource': resource[:-1], @@ -284,9 +284,9 @@ def process_reserve_over_quota(context, over_quota_exception, quota=quotas[over]) if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and resource in over): - msg = _LW("Quota exceeded for %(s_pid)s, tried to create " - "%(s_resource)s (%(d_consumed)d %(s_resource)ss " - "already consumed).") + msg = ("Quota exceeded for %(s_pid)s, tried to create " + "%(s_resource)s (%(d_consumed)d %(s_resource)ss " + "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over), 's_resource': resource[:-1]}) diff --git a/cinder/rpc.py b/cinder/rpc.py index d58ca1d344e..f1171e035be 100644 --- a/cinder/rpc.py +++ b/cinder/rpc.py @@ -35,7 +35,7 @@ import six import cinder.context import cinder.exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder import utils @@ -93,7 +93,7 @@ def initialized(): def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER if NOTIFIER is None: - LOG.exception(_LE("RPC cleanup: NOTIFIER is None")) + LOG.exception("RPC cleanup: NOTIFIER is None") TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None @@ -256,8 +256,8 @@ class RPCAPI(object): # If there is no service we assume they will come up later and will # have the same version as we do. version_cap = cls.RPC_API_VERSION - LOG.info(_LI('Automatically selected %(binary)s RPC version ' - '%(version)s as minimum service version.'), + LOG.info('Automatically selected %(binary)s RPC version ' + '%(version)s as minimum service version.', {'binary': cls.BINARY, 'version': version_cap}) LAST_RPC_VERSIONS[cls.BINARY] = version_cap return version_cap @@ -274,8 +274,8 @@ class RPCAPI(object): # have the same version as we do. if not version_cap: version_cap = base.OBJ_VERSIONS.get_current() - LOG.info(_LI('Automatically selected %(binary)s objects version ' - '%(version)s as minimum service version.'), + LOG.info('Automatically selected %(binary)s objects version ' + '%(version)s as minimum service version.', {'binary': cls.BINARY, 'version': version_cap}) LAST_OBJ_VERSIONS[cls.BINARY] = version_cap return version_cap diff --git a/cinder/scheduler/base_filter.py b/cinder/scheduler/base_filter.py index 6f51b1475ca..07180667a10 100644 --- a/cinder/scheduler/base_filter.py +++ b/cinder/scheduler/base_filter.py @@ -17,9 +17,7 @@ Filter support """ from oslo_log import log as logging -import six -from cinder.i18n import _LI from cinder.scheduler import base_handler LOG = logging.getLogger(__name__) @@ -69,22 +67,17 @@ class BaseFilterHandler(base_handler.BaseHandler): # Log the filtration history rspec = filter_properties.get("request_spec", {}) msg_dict = {"vol_id": rspec.get("volume_id", ""), - "str_results": six.text_type(full_filter_results), - } - full_msg = ("Filtering removed all hosts for the request with " - "volume ID " - "'%(vol_id)s'. Filter results: %(str_results)s" - ) % msg_dict + "str_results": full_filter_results} + LOG.debug("Filtering removed all hosts for the request with " + "volume ID '%(vol_id)s'. Filter results: %(str_results)s", + msg_dict) msg_dict["str_results"] = ', '.join( - _LI("%(cls_name)s: (start: %(start)s, end: %(end)s)") % { + "%(cls_name)s: (start: %(start)s, end: %(end)s)" % { "cls_name": value[0], "start": value[1], "end": value[2]} for value in part_filter_results) - part_msg = _LI("Filtering removed all hosts for the request with " - "volume ID " - "'%(vol_id)s'. Filter results: %(str_results)s" - ) % msg_dict - LOG.debug(full_msg) - LOG.info(part_msg) + LOG.info("Filtering removed all hosts for the request with " + "volume ID '%(vol_id)s'. Filter results: %(str_results)s", + msg_dict) def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): @@ -115,7 +108,7 @@ class BaseFilterHandler(base_handler.BaseHandler): if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: - LOG.info(_LI("Filter %s returned 0 hosts"), cls_name) + LOG.info("Filter %s returned 0 hosts", cls_name) full_filter_results.append((cls_name, None)) list_objs = None break diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 660e1e7763b..3575c68426a 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -25,7 +25,7 @@ from oslo_log import log as logging from oslo_serialization import jsonutils from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.scheduler import driver from cinder.scheduler import scheduler_options from cinder.volume import utils @@ -246,8 +246,8 @@ class FilterScheduler(driver.Scheduler): return # no previously attempted hosts, skip last_backend = backends[-1] - LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: " - "%(last_backend)s : %(exc)s"), + LOG.error("Error scheduling %(volume_id)s from last vol-service: " + "%(last_backend)s : %(exc)s", {'volume_id': volume_id, 'last_backend': last_backend, 'exc': exc}) @@ -631,8 +631,8 @@ class FilterScheduler(driver.Scheduler): if backend_id != group_backend: weighed_backends.remove(backend) if not weighed_backends: - LOG.warning(_LW('No weighed backend found for volume ' - 'with properties: %s'), + LOG.warning('No weighed backend found for volume ' + 'with properties: %s', filter_properties['request_spec'].get('volume_type')) return None return self._choose_top_backend(weighed_backends, request_spec) diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py index 59359be4e4c..52e704121c3 100644 --- a/cinder/scheduler/filters/capacity_filter.py +++ b/cinder/scheduler/filters/capacity_filter.py @@ -21,7 +21,6 @@ import math from oslo_log import log as logging -from cinder.i18n import _LE, _LW from cinder.scheduler import filters @@ -63,8 +62,8 @@ class CapacityFilter(filters.BaseBackendFilter): if backend_state.free_capacity_gb is None: # Fail Safe - LOG.error(_LE("Free capacity not set: " - "volume node info collection broken.")) + LOG.error("Free capacity not set: " + "volume node info collection broken.") return False free_space = backend_state.free_capacity_gb @@ -88,9 +87,9 @@ class CapacityFilter(filters.BaseBackendFilter): return False total = float(total_space) if total <= 0: - LOG.warning(_LW("Insufficient free space for volume creation. " - "Total capacity is %(total).2f on %(grouping)s " - "%(grouping_name)s."), + LOG.warning("Insufficient free space for volume creation. " + "Total capacity is %(total).2f on %(grouping)s " + "%(grouping_name)s.", {"total": total, "grouping": grouping, "grouping_name": backend_state.backend_id}) @@ -125,12 +124,12 @@ class CapacityFilter(filters.BaseBackendFilter): "grouping": grouping, "grouping_name": backend_state.backend_id, } - LOG.warning(_LW( + LOG.warning( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f has exceeded the maximum over " "subscription ratio %(oversub_ratio).2f on %(grouping)s " - "%(grouping_name)s."), msg_args) + "%(grouping_name)s.", msg_args) return False else: # Thin provisioning is enabled and projected over-subscription @@ -143,10 +142,10 @@ class CapacityFilter(filters.BaseBackendFilter): free * backend_state.max_over_subscription_ratio) return adjusted_free_virtual >= requested_size elif thin and backend_state.thin_provisioning_support: - LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s " - "with an invalid maximum over subscription ratio " - "of %(oversub_ratio).2f. The ratio should be a " - "minimum of 1.0."), + LOG.warning("Filtering out %(grouping)s %(grouping_name)s " + "with an invalid maximum over subscription ratio " + "of %(oversub_ratio).2f. The ratio should be a " + "minimum of 1.0.", {"oversub_ratio": backend_state.max_over_subscription_ratio, "grouping": grouping, @@ -159,9 +158,9 @@ class CapacityFilter(filters.BaseBackendFilter): "available": free} if free < requested_size: - LOG.warning(_LW("Insufficient free space for volume creation " - "on %(grouping)s %(grouping_name)s (requested / " - "avail): %(requested)s/%(available)s"), + LOG.warning("Insufficient free space for volume creation " + "on %(grouping)s %(grouping_name)s (requested / " + "avail): %(requested)s/%(available)s", msg_args) return False diff --git a/cinder/scheduler/filters/driver_filter.py b/cinder/scheduler/filters/driver_filter.py index dcfcbaecd24..aa0338b5cb2 100644 --- a/cinder/scheduler/filters/driver_filter.py +++ b/cinder/scheduler/filters/driver_filter.py @@ -16,7 +16,6 @@ from oslo_log import log as logging import six -from cinder.i18n import _LW from cinder.scheduler.evaluator import evaluator from cinder.scheduler import filters @@ -60,8 +59,8 @@ class DriverFilter(filters.BaseBackendFilter): except Exception as ex: # Warn the admin for now that there is an error in the # filter function. - LOG.warning(_LW("Error in filtering function " - "'%(function)s' : '%(error)s' :: failing backend"), + LOG.warning("Error in filtering function " + "'%(function)s' : '%(error)s' :: failing backend", {'function': stats['filter_function'], 'error': ex, }) return False diff --git a/cinder/scheduler/filters/instance_locality_filter.py b/cinder/scheduler/filters/instance_locality_filter.py index 303bf974f46..652bd763c85 100644 --- a/cinder/scheduler/filters/instance_locality_filter.py +++ b/cinder/scheduler/filters/instance_locality_filter.py @@ -18,7 +18,7 @@ from oslo_utils import uuidutils from cinder.compute import nova from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder.scheduler import filters from cinder.volume import utils as volume_utils @@ -96,8 +96,8 @@ class InstanceLocalityFilter(filters.BaseBackendFilter): return self._cache[instance_uuid] == backend if not self._nova_has_extended_server_attributes(context): - LOG.warning(_LW('Hint "%s" dropped because ' - 'ExtendedServerAttributes not active in Nova.'), + LOG.warning('Hint "%s" dropped because ' + 'ExtendedServerAttributes not active in Nova.', HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) @@ -107,10 +107,10 @@ class InstanceLocalityFilter(filters.BaseBackendFilter): timeout=REQUESTS_TIMEOUT) if not hasattr(server, INSTANCE_HOST_PROP): - LOG.warning(_LW('Hint "%s" dropped because Nova did not return ' - 'enough information. Either Nova policy needs to ' - 'be changed or a privileged account for Nova ' - 'should be specified in conf.'), HINT_KEYWORD) + LOG.warning('Hint "%s" dropped because Nova did not return ' + 'enough information. Either Nova policy needs to ' + 'be changed or a privileged account for Nova ' + 'should be specified in conf.', HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py index 7cb8e4dbdf3..1fbe6a22828 100644 --- a/cinder/scheduler/flows/create_volume.py +++ b/cinder/scheduler/flows/create_volume.py @@ -17,7 +17,6 @@ from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils -from cinder.i18n import _LE from cinder.message import api as message_api from cinder.message import defined_messages from cinder.message import resource_types @@ -96,7 +95,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): try: self._notify_failure(context, request_spec, cause) finally: - LOG.error(_LE("Failed to run task %(name)s: %(cause)s"), + LOG.error("Failed to run task %(name)s: %(cause)s", {'cause': cause, 'name': self.name}) @utils.if_notifications_enabled @@ -114,8 +113,8 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask): rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC, payload) except exception.CinderException: - LOG.exception(_LE("Failed notifying on %(topic)s " - "payload %(payload)s"), + LOG.exception("Failed notifying on %(topic)s " + "payload %(payload)s", {'topic': self.FAILURE_TOPIC, 'payload': payload}) def execute(self, context, request_spec, filter_properties, volume): diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index c429aef477d..b96a2228e04 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -29,9 +29,8 @@ from cinder.common import constants from cinder import context as cinder_context from cinder import exception from cinder import objects -from cinder import utils -from cinder.i18n import _LI, _LW from cinder.scheduler import filters +from cinder import utils from cinder.volume import utils as vol_utils @@ -484,8 +483,7 @@ class HostManager(object): # Ignore older updates if capab_old['timestamp'] and timestamp < capab_old['timestamp']: - LOG.info(_LI('Ignoring old capability report from %s.'), - backend) + LOG.info('Ignoring old capability report from %s.', backend) return # If the capabilites are not changed and the timestamp is older, @@ -559,7 +557,7 @@ class HostManager(object): for service in volume_services.objects: host = service.host if not service.is_up: - LOG.warning(_LW("volume service is down. (host: %s)"), host) + LOG.warning("volume service is down. (host: %s)", host) continue backend_key = service.service_topic_queue @@ -601,8 +599,8 @@ class HostManager(object): # the map when we are removing it because it has been added to a # cluster. if backend_key not in active_hosts: - LOG.info(_LI("Removing non-active backend: %(backend)s from " - "scheduler cache."), {'backend': backend_key}) + LOG.info("Removing non-active backend: %(backend)s from " + "scheduler cache.", {'backend': backend_key}) del self.backend_state_map[backend_key] def get_all_backend_states(self, context): diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index bf22b15247d..7b196e62c33 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -36,7 +36,7 @@ from cinder import context from cinder import db from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import manager from cinder import objects from cinder import quota @@ -141,15 +141,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): request_spec_list, filter_properties_list) except exception.NoValidBackend: - LOG.error(_LE("Could not find a backend for consistency group " - "%(group_id)s."), + LOG.error("Could not find a backend for consistency group " + "%(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to create consistency group " - "%(group_id)s."), + LOG.exception("Failed to create consistency group " + "%(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -166,15 +166,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): group_filter_properties, filter_properties_list) except exception.NoValidBackend: - LOG.error(_LE("Could not find a backend for group " - "%(group_id)s."), + LOG.error("Could not find a backend for group " + "%(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to create generic group " - "%(group_id)s."), + LOG.exception("Failed to create generic group " + "%(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -370,7 +370,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): request_spec, msg=None): # TODO(harlowja): move into a task that just does this later. if not msg: - msg = (_LE("Failed to schedule_%(method)s: %(ex)s") % + msg = ("Failed to schedule_%(method)s: %(ex)s" % {'method': method, 'ex': six.text_type(ex)}) LOG.error(msg) @@ -445,7 +445,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): if self.upgrading_cloud: raise exception.UnavailableDuringUpgrade(action='workers cleanup') - LOG.info(_LI('Workers cleanup request started.')) + LOG.info('Workers cleanup request started.') filters = dict(service_id=cleanup_request.service_id, cluster_name=cleanup_request.cluster_name, @@ -475,7 +475,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): # If it's a scheduler or the service is up, send the request. if not dest or dest.is_up: - LOG.info(_LI('Sending cleanup for %(binary)s %(dest_name)s.'), + LOG.info('Sending cleanup for %(binary)s %(dest_name)s.', {'binary': service.binary, 'dest_name': dest_name}) cleanup_rpc(context, cleanup_request) @@ -483,11 +483,11 @@ class SchedulerManager(manager.CleanableManager, manager.Manager): # We don't send cleanup requests when there are no services alive # to do the cleanup. else: - LOG.info(_LI('No service available to cleanup %(binary)s ' - '%(dest_name)s.'), + LOG.info('No service available to cleanup %(binary)s ' + '%(dest_name)s.', {'binary': service.binary, 'dest_name': dest_name}) not_requested.append(service) - LOG.info(_LI('Cleanup requests completed.')) + LOG.info('Cleanup requests completed.') return requested, not_requested diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py index b6c8da1fa56..f5ab62f5e5f 100644 --- a/cinder/scheduler/scheduler_options.py +++ b/cinder/scheduler/scheduler_options.py @@ -28,8 +28,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils -from cinder.i18n import _LE - scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', @@ -66,8 +64,8 @@ class SchedulerOptions(object): try: return os.path.getmtime(filename) except os.error: - LOG.exception(_LE("Could not stat scheduler options file " - "%(filename)s."), + LOG.exception("Could not stat scheduler options file " + "%(filename)s.", {'filename': filename}) raise @@ -76,7 +74,7 @@ class SchedulerOptions(object): try: return json.load(handle) except ValueError: - LOG.exception(_LE("Could not decode scheduler options.")) + LOG.exception("Could not decode scheduler options.") return {} def _get_time_now(self): diff --git a/cinder/scheduler/weights/goodness.py b/cinder/scheduler/weights/goodness.py index ee45ff00586..72c88ad1c56 100644 --- a/cinder/scheduler/weights/goodness.py +++ b/cinder/scheduler/weights/goodness.py @@ -15,7 +15,6 @@ from oslo_log import log as logging import six -from cinder.i18n import _LW from cinder.scheduler.evaluator import evaluator from cinder.scheduler import weights @@ -56,17 +55,17 @@ class GoodnessWeigher(weights.BaseHostWeigher): goodness_rating = 0 if stats['goodness_function'] is None: - LOG.warning(_LW("Goodness function not set :: defaulting to " - "minimal goodness rating of 0")) + LOG.warning("Goodness function not set :: defaulting to " + "minimal goodness rating of 0") else: try: goodness_result = self._run_evaluator( stats['goodness_function'], stats) except Exception as ex: - LOG.warning(_LW("Error in goodness_function function " - "'%(function)s' : '%(error)s' :: Defaulting " - "to a goodness of 0"), + LOG.warning("Error in goodness_function function " + "'%(function)s' : '%(error)s' :: Defaulting " + "to a goodness of 0", {'function': stats['goodness_function'], 'error': ex, }) return goodness_rating @@ -75,9 +74,9 @@ class GoodnessWeigher(weights.BaseHostWeigher): if goodness_result: goodness_rating = 100 elif goodness_result < 0 or goodness_result > 100: - LOG.warning(_LW("Invalid goodness result. Result must be " - "between 0 and 100. Result generated: '%s' " - ":: Defaulting to a goodness of 0"), + LOG.warning("Invalid goodness result. Result must be " + "between 0 and 100. Result generated: '%s' " + ":: Defaulting to a goodness of 0", goodness_result) else: goodness_rating = goodness_result diff --git a/cinder/service.py b/cinder/service.py index 1a5295d8c71..e805021fc84 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -41,7 +41,7 @@ from cinder.common import constants from cinder import context from cinder import coordination from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields @@ -104,15 +104,15 @@ def setup_profiler(binary, host): host=host ) LOG.warning( - _LW("OSProfiler is enabled.\nIt means that person who knows " - "any of hmac_keys that are specified in " - "/etc/cinder/cinder.conf can trace his requests. \n" - "In real life only operator can read this file so there " - "is no security issue. Note that even if person can " - "trigger profiler, only admin user can retrieve trace " - "information.\n" - "To disable OSProfiler set in cinder.conf:\n" - "[profiler]\nenabled=false")) + "OSProfiler is enabled.\nIt means that person who knows " + "any of hmac_keys that are specified in " + "/etc/cinder/cinder.conf can trace his requests. \n" + "In real life only operator can read this file so there " + "is no security issue. Note that even if person can " + "trigger profiler, only admin user can retrieve trace " + "information.\n" + "To disable OSProfiler set in cinder.conf:\n" + "[profiler]\nenabled=false") class Service(service.Service): @@ -183,9 +183,9 @@ class Service(service.Service): # TODO(geguileo): In O - Remove self.is_upgrading_to_n part if (service_ref.cluster_name != cluster and not self.is_upgrading_to_n): - LOG.info(_LI('This service has been moved from cluster ' - '%(cluster_svc)s to %(cluster_cfg)s. Resources ' - 'will %(opt_no)sbe moved to the new cluster'), + LOG.info('This service has been moved from cluster ' + '%(cluster_svc)s to %(cluster_cfg)s. Resources ' + 'will %(opt_no)sbe moved to the new cluster', {'cluster_svc': service_ref.cluster_name, 'cluster_cfg': cluster, 'opt_no': '' if self.added_to_cluster else 'NO '}) @@ -231,7 +231,7 @@ class Service(service.Service): def start(self): version_string = version.version_string() - LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), + LOG.info('Starting %(topic)s node (version %(version_string)s)', {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False @@ -270,8 +270,8 @@ class Service(service.Service): # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part if self.cluster and not self.is_svc_upgrading_to_n(self.binary): - LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version ' - '%(version)s)'), + LOG.info('Starting %(topic)s cluster %(cluster)s (version ' + '%(version)s)', {'topic': self.topic, 'version': version_string, 'cluster': self.cluster}) target = messaging.Target( @@ -310,11 +310,11 @@ class Service(service.Service): if CONF.service_down_time <= self.report_interval: new_down_time = int(self.report_interval * 2.5) LOG.warning( - _LW("Report interval must be less than service down " - "time. Current config service_down_time: " - "%(service_down_time)s, report_interval for this: " - "service is: %(report_interval)s. Setting global " - "service_down_time to: %(new_down_time)s"), + "Report interval must be less than service down " + "time. Current config service_down_time: " + "%(service_down_time)s, report_interval for this: " + "service is: %(report_interval)s. Setting global " + "service_down_time to: %(new_down_time)s", {'service_down_time': CONF.service_down_time, 'report_interval': self.report_interval, 'new_down_time': new_down_time}) @@ -478,9 +478,9 @@ class Service(service.Service): if not self.manager.is_working(): # NOTE(dulek): If manager reports a problem we're not sending # heartbeats - to indicate that service is actually down. - LOG.error(_LE('Manager for service %(binary)s %(host)s is ' - 'reporting problems, not sending heartbeat. ' - 'Service will appear "down".'), + LOG.error('Manager for service %(binary)s %(host)s is ' + 'reporting problems, not sending heartbeat. ' + 'Service will appear "down".', {'binary': self.binary, 'host': self.host}) return @@ -506,24 +506,24 @@ class Service(service.Service): # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False - LOG.error(_LE('Recovered model server connection!')) + LOG.error('Recovered model server connection!') except db_exc.DBConnectionError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True - LOG.exception(_LE('model server went away')) + LOG.exception('model server went away') # NOTE(jsbryant) Other DB errors can happen in HA configurations. # such errors shouldn't kill this thread, so we handle them here. except db_exc.DBError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True - LOG.exception(_LE('DBError encountered: ')) + LOG.exception('DBError encountered: ') except Exception: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True - LOG.exception(_LE('Exception encountered: ')) + LOG.exception('Exception encountered: ') def reset(self): self.manager.reset() diff --git a/cinder/ssh_utils.py b/cinder/ssh_utils.py index c3a7160ece4..d6e8889085a 100644 --- a/cinder/ssh_utils.py +++ b/cinder/ssh_utils.py @@ -27,7 +27,7 @@ import paramiko import six from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -79,8 +79,8 @@ class SSHPool(pools.Pool): if 'hosts_key_file' in kwargs.keys(): self.hosts_key_file = kwargs.pop('hosts_key_file') - LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be " - "loaded along with %(conf)s from /etc/cinder.conf."), + LOG.info("Secondary ssh hosts key file %(kwargs)s will be " + "loaded along with %(conf)s from /etc/cinder.conf.", {'kwargs': self.hosts_key_file, 'conf': CONF.ssh_hosts_key_file}) diff --git a/cinder/tests/unit/scheduler/test_base_filter.py b/cinder/tests/unit/scheduler/test_base_filter.py index 54fd3f78403..cf950b464be 100644 --- a/cinder/tests/unit/scheduler/test_base_filter.py +++ b/cinder/tests/unit/scheduler/test_base_filter.py @@ -16,9 +16,7 @@ import mock from cinder.scheduler import base_filter -from cinder.scheduler import host_manager from cinder import test -from cinder.tests.unit import fake_constants as fake class TestBaseFilter(test.TestCase): @@ -174,32 +172,3 @@ class TestBaseFilterHandler(test.TestCase): result = self._get_filtered_objects(filter_classes, index=2) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) - - def test_get_filtered_objects_info_and_debug_log_none_returned(self): - - all_filters = [FilterA, FilterA, FilterB] - fake_backends = [host_manager.BackendState('fake_be%s' % x, None) - for x in range(1, 4)] - - filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID, - 'volume_properties': {'project_id': fake.PROJECT_ID, - 'size': 2048, - 'host': 'host4'}}} - with mock.patch.object(base_filter, 'LOG') as mock_log: - result = self.handler.get_filtered_objects( - all_filters, fake_backends, filt_props) - self.assertFalse(result) - msg = "with volume ID '%s'" % fake.VOLUME_ID - # FilterA should leave Host1 and Host2; FilterB should leave None. - exp_output = ("FilterA: (start: 3, end: 2), " - "FilterA: (start: 2, end: 1)") - cargs = mock_log.info.call_args[0][0] - self.assertIn(msg, cargs) - self.assertIn(exp_output, cargs) - - exp_output = ("[('FilterA', ['fake_be2', 'fake_be3']), " - "('FilterA', ['fake_be3']), " - + "('FilterB', None)]") - cargs = mock_log.debug.call_args[0][0] - self.assertIn(msg, cargs) - self.assertIn(exp_output, cargs) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py index 48dba2d0128..7e9d0d4e969 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py @@ -33,7 +33,6 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _LW from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_volume @@ -91,10 +90,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): reserved_percentage = 100 * int(reserved_ratio) self.assertEqual(reserved_percentage, result) - msg = _LW('The "netapp_size_multiplier" configuration option is ' - 'deprecated and will be removed in the Mitaka release. ' - 'Please set "reserved_percentage = %d" instead.') % ( - result) + msg = ('The "netapp_size_multiplier" configuration option is ' + 'deprecated and will be removed in the Mitaka release. ' + 'Please set "reserved_percentage = %d" instead.' % + result) mock_report.assert_called_once_with(block_base.LOG, msg) @mock.patch.object(block_base.NetAppBlockStorageLibrary, diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py index 6b02cdecac4..4e0a5cabda5 100644 --- a/cinder/transfer/api.py +++ b/cinder/transfer/api.py @@ -29,7 +29,7 @@ import six from cinder.db import base from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import objects from cinder import quota from cinder import quota_utils @@ -72,7 +72,7 @@ class API(base.Base): volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.start") if volume_ref['status'] != 'awaiting-transfer': - LOG.error(_LE("Volume in unexpected state")) + LOG.error("Volume in unexpected state") self.db.transfer_destroy(context, transfer_id) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.end") @@ -115,7 +115,7 @@ class API(base.Base): def create(self, context, volume_id, display_name): """Creates an entry in the transfers table.""" volume_api.check_policy(context, 'create_transfer') - LOG.info(_LI("Generating transfer record for volume %s"), volume_id) + LOG.info("Generating transfer record for volume %s", volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.InvalidVolume(reason=_("status must be available")) @@ -137,8 +137,7 @@ class API(base.Base): try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: - LOG.error(_LE("Failed to create transfer record " - "for %s"), volume_id) + LOG.error("Failed to create transfer record for %s", volume_id) raise volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.end") @@ -200,8 +199,8 @@ class API(base.Base): **reserve_opts) except Exception: donor_reservations = None - LOG.exception(_LE("Failed to update quota donating volume" - " transfer id %s"), transfer_id) + LOG.exception("Failed to update quota donating volume" + " transfer id %s", transfer_id) volume_utils.notify_about_volume_usage(context, vol_ref, "transfer.accept.start") @@ -219,7 +218,7 @@ class API(base.Base): QUOTAS.commit(context, reservations) if donor_reservations: QUOTAS.commit(context, donor_reservations, project_id=donor_id) - LOG.info(_LI("Volume %s has been transferred."), volume_id) + LOG.info("Volume %s has been transferred.", volume_id) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) diff --git a/cinder/utils.py b/cinder/utils.py index 16614a503db..416ad01001d 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -53,7 +53,7 @@ import six import webob.exc from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import keymgr @@ -398,7 +398,7 @@ def robust_file_write(directory, filename, data): os.fsync(dirfd) except OSError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to write persistence file: %(path)s."), + LOG.error("Failed to write persistence file: %(path)s.", {'path': os.path.join(directory, filename)}) if os.path.isfile(tempname): os.unlink(tempname) @@ -535,7 +535,7 @@ def require_driver_initialized(driver): # we can't do anything if the driver didn't init if not driver.initialized: driver_name = driver.__class__.__name__ - LOG.error(_LE("Volume driver %s not initialized"), driver_name) + LOG.error("Volume driver %s not initialized", driver_name) raise exception.DriverNotInitialized() else: log_unsupported_driver_warning(driver) @@ -545,9 +545,9 @@ def log_unsupported_driver_warning(driver): """Annoy the log about unsupported drivers.""" if not driver.supported: # Check to see if the driver is flagged as supported. - LOG.warning(_LW("Volume driver (%(driver_name)s %(version)s) is " - "currently unsupported and may be removed in the " - "next release of OpenStack. Use at your own risk."), + LOG.warning("Volume driver (%(driver_name)s %(version)s) is " + "currently unsupported and may be removed in the " + "next release of OpenStack. Use at your own risk.", {'driver_name': driver.__class__.__name__, 'version': driver.get_version()}, resource={'type': 'driver', @@ -944,7 +944,7 @@ def setup_tracing(trace_flags): except TypeError: # Handle when trace_flags is None or a test mock trace_flags = [] for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): - LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag) + LOG.warning('Invalid trace flag: %s', invalid_flag) TRACE_METHOD = 'method' in trace_flags TRACE_API = 'api' in trace_flags diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 60ccbd12761..d53fd2b5c5e 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -36,7 +36,7 @@ from cinder import db from cinder.db import base from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import cache as image_cache from cinder.image import glance from cinder import keymgr as key_manager @@ -170,7 +170,7 @@ class API(base.Base): seconds=CONF.az_cache_duration)) else: azs = self.availability_zones - LOG.info(_LI("Availability Zones retrieved successfully.")) + LOG.info("Availability Zones retrieved successfully.") return tuple(azs) def _retype_is_possible(self, context, @@ -349,7 +349,7 @@ class API(base.Base): with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vref = flow_engine.storage.fetch('volume') - LOG.info(_LI("Volume created successfully."), resource=vref) + LOG.info("Volume created successfully.", resource=vref) return vref @wrap_check_policy @@ -380,8 +380,8 @@ class API(base.Base): project_id=project_id, **reserve_opts) except Exception: - LOG.exception(_LE("Failed to update quota while " - "deleting volume.")) + LOG.exception("Failed to update quota while " + "deleting volume.") volume.destroy() if reservations: @@ -389,7 +389,7 @@ class API(base.Base): volume_utils.notify_about_volume_usage(context, volume, "delete.end") - LOG.info(_LI("Delete volume request issued successfully."), + LOG.info("Delete volume request issued successfully.", resource={'type': 'volume', 'id': volume.id}) return @@ -468,14 +468,14 @@ class API(base.Base): try: self.key_manager.delete(context, encryption_key_id) except Exception as e: - LOG.warning(_LW("Unable to delete encryption key for " - "volume: %s."), e.msg, resource=volume) + LOG.warning("Unable to delete encryption key for " + "volume: %s.", e.msg, resource=volume) self.volume_rpcapi.delete_volume(context, volume, unmanage_only, cascade) - LOG.info(_LI("Delete volume request issued successfully."), + LOG.info("Delete volume request issued successfully.", resource=volume) @wrap_check_policy @@ -488,8 +488,8 @@ class API(base.Base): volume = objects.Volume._from_db_object(context, vol_obj, volume) if volume.status == 'maintenance': - LOG.info(_LI("Unable to update volume, " - "because it is in maintenance."), resource=volume) + LOG.info("Unable to update volume, " + "because it is in maintenance.", resource=volume) msg = _("The volume cannot be updated during maintenance.") raise exception.InvalidVolume(reason=msg) @@ -497,7 +497,7 @@ class API(base.Base): volume.update(fields) volume.save() - LOG.info(_LI("Volume updated successfully."), resource=volume) + LOG.info("Volume updated successfully.", resource=volume) def get(self, context, volume_id, viewable_admin_meta=False): volume = objects.Volume.get_by_id(context, volume_id) @@ -516,7 +516,7 @@ class API(base.Base): volume.admin_metadata = admin_metadata volume.obj_reset_changes() - LOG.info(_LI("Volume info retrieved successfully."), resource=volume) + LOG.info("Volume info retrieved successfully.", resource=volume) return volume def get_all(self, context, marker=None, limit=None, sort_keys=None, @@ -565,7 +565,7 @@ class API(base.Base): sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) - LOG.info(_LI("Get all volumes completed successfully.")) + LOG.info("Get all volumes completed successfully.") return volumes def get_volume_summary(self, context, filters=None): @@ -583,7 +583,7 @@ class API(base.Base): volumes = objects.VolumeList.get_volume_summary_by_project( context, context.project_id) - LOG.info(_LI("Get summary completed successfully.")) + LOG.info("Get summary completed successfully.") return volumes def get_snapshot(self, context, snapshot_id): @@ -592,7 +592,7 @@ class API(base.Base): # FIXME(jdg): The objects don't have the db name entries # so build the resource tag manually for now. - LOG.info(_LI("Snapshot retrieved successfully."), + LOG.info("Snapshot retrieved successfully.", resource={'type': 'snapshot', 'id': snapshot.id}) return snapshot @@ -600,7 +600,7 @@ class API(base.Base): def get_volume(self, context, volume_id): check_policy(context, 'get_volume') volume = objects.Volume.get_by_id(context, volume_id) - LOG.info(_LI("Volume retrieved successfully."), resource=volume) + LOG.info("Volume retrieved successfully.", resource=volume) return volume def get_all_snapshots(self, context, search_opts=None, marker=None, @@ -621,7 +621,7 @@ class API(base.Base): context, context.project_id, search_opts, marker, limit, sort_keys, sort_dirs, offset) - LOG.info(_LI("Get all snapshots completed successfully.")) + LOG.info("Get all snapshots completed successfully.") return snapshots @wrap_check_policy @@ -640,7 +640,7 @@ class API(base.Base): LOG.error(msg) raise exception.InvalidVolume(reason=msg) - LOG.info(_LI("Reserve volume completed successfully."), + LOG.info("Reserve volume completed successfully.", resource=volume) @wrap_check_policy @@ -658,7 +658,7 @@ class API(base.Base): resource=volume) return - LOG.info(_LI("Unreserve volume completed successfully."), + LOG.info("Unreserve volume completed successfully.", resource=volume) @wrap_check_policy @@ -678,22 +678,22 @@ class API(base.Base): LOG.error(msg) raise exception.InvalidVolume(reason=msg) - LOG.info(_LI("Begin detaching volume completed successfully."), + LOG.info("Begin detaching volume completed successfully.", resource=volume) @wrap_check_policy def roll_detaching(self, context, volume): volume.conditional_update({'status': 'in-use'}, {'status': 'detaching'}) - LOG.info(_LI("Roll detaching of volume completed successfully."), + LOG.info("Roll detaching of volume completed successfully.", resource=volume) @wrap_check_policy def attach(self, context, volume, instance_uuid, host_name, mountpoint, mode): if volume.status == 'maintenance': - LOG.info(_LI('Unable to attach volume, ' - 'because it is in maintenance.'), resource=volume) + LOG.info('Unable to attach volume, ' + 'because it is in maintenance.', resource=volume) msg = _("The volume cannot be attached in maintenance mode.") raise exception.InvalidVolume(reason=msg) @@ -712,36 +712,36 @@ class API(base.Base): host_name, mountpoint, mode) - LOG.info(_LI("Attach volume completed successfully."), + LOG.info("Attach volume completed successfully.", resource=volume) return attach_results @wrap_check_policy def detach(self, context, volume, attachment_id): if volume['status'] == 'maintenance': - LOG.info(_LI('Unable to detach volume, ' - 'because it is in maintenance.'), resource=volume) + LOG.info('Unable to detach volume, ' + 'because it is in maintenance.', resource=volume) msg = _("The volume cannot be detached in maintenance mode.") raise exception.InvalidVolume(reason=msg) detach_results = self.volume_rpcapi.detach_volume(context, volume, attachment_id) - LOG.info(_LI("Detach volume completed successfully."), + LOG.info("Detach volume completed successfully.", resource=volume) return detach_results @wrap_check_policy def initialize_connection(self, context, volume, connector): if volume.status == 'maintenance': - LOG.info(_LI('Unable to initialize the connection for ' - 'volume, because it is in ' - 'maintenance.'), resource=volume) + LOG.info('Unable to initialize the connection for ' + 'volume, because it is in ' + 'maintenance.', resource=volume) msg = _("The volume connection cannot be initialized in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) init_results = self.volume_rpcapi.initialize_connection(context, volume, connector) - LOG.info(_LI("Initialize volume connection completed successfully."), + LOG.info("Initialize volume connection completed successfully.", resource=volume) return init_results @@ -751,22 +751,22 @@ class API(base.Base): volume, connector, force) - LOG.info(_LI("Terminate volume connection completed successfully."), + LOG.info("Terminate volume connection completed successfully.", resource=volume) self.unreserve_volume(context, volume) @wrap_check_policy def accept_transfer(self, context, volume, new_user, new_project): if volume['status'] == 'maintenance': - LOG.info(_LI('Unable to accept transfer for volume, ' - 'because it is in maintenance.'), resource=volume) + LOG.info('Unable to accept transfer for volume, ' + 'because it is in maintenance.', resource=volume) msg = _("The volume cannot accept transfer in maintenance mode.") raise exception.InvalidVolume(reason=msg) results = self.volume_rpcapi.accept_transfer(context, volume, new_user, new_project) - LOG.info(_LI("Transfer volume completed successfully."), + LOG.info("Transfer volume completed successfully.", resource=volume) return results @@ -798,8 +798,8 @@ class API(base.Base): raise exception.InvalidVolume(reason=msg) if volume['status'] == 'maintenance': - LOG.info(_LI('Unable to create the snapshot for volume, ' - 'because it is in maintenance.'), resource=volume) + LOG.info('Unable to create the snapshot for volume, ' + 'because it is in maintenance.', resource=volume) msg = _("The snapshot cannot be created when the volume is in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) @@ -911,8 +911,8 @@ class API(base.Base): check_policy(context, 'create_snapshot', volume) if volume['status'] == 'maintenance': - LOG.info(_LI('Unable to create the snapshot for volume, ' - 'because it is in maintenance.'), resource=volume) + LOG.info('Unable to create the snapshot for volume, ' + 'because it is in maintenance.', resource=volume) msg = _("The snapshot cannot be created when the volume is in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) @@ -981,7 +981,7 @@ class API(base.Base): result = self._create_snapshot(context, volume, name, description, False, metadata, cgsnapshot_id, group_snapshot_id) - LOG.info(_LI("Snapshot create request issued successfully."), + LOG.info("Snapshot create request issued successfully.", resource=result) return result @@ -990,7 +990,7 @@ class API(base.Base): description, metadata=None): result = self._create_snapshot(context, volume, name, description, True, metadata) - LOG.info(_LI("Snapshot force create request issued successfully."), + LOG.info("Snapshot force create request issued successfully.", resource=result) return result @@ -1021,7 +1021,7 @@ class API(base.Base): raise exception.InvalidSnapshot(reason=msg) self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only) - LOG.info(_LI("Snapshot delete request issued successfully."), + LOG.info("Snapshot delete request issued successfully.", resource=snapshot) @wrap_check_policy @@ -1033,7 +1033,7 @@ class API(base.Base): def get_volume_metadata(self, context, volume): """Get all metadata associated with a volume.""" rv = self.db.volume_metadata_get(context, volume['id']) - LOG.info(_LI("Get volume metadata completed successfully."), + LOG.info("Get volume metadata completed successfully.", resource=volume) return dict(rv) @@ -1042,7 +1042,7 @@ class API(base.Base): """Creates volume metadata.""" db_meta = self._update_volume_metadata(context, volume, metadata) - LOG.info(_LI("Create volume metadata completed successfully."), + LOG.info("Create volume metadata completed successfully.", resource=volume) return db_meta @@ -1056,7 +1056,7 @@ class API(base.Base): LOG.info(msg, resource=volume) raise exception.InvalidVolume(reason=msg) self.db.volume_metadata_delete(context, volume.id, key, meta_type) - LOG.info(_LI("Delete volume metadata completed successfully."), + LOG.info("Delete volume metadata completed successfully.", resource=volume) def _update_volume_metadata(self, context, volume, metadata, delete=False, @@ -1084,7 +1084,7 @@ class API(base.Base): # TODO(jdg): Implement an RPC call for drivers that may use this info - LOG.info(_LI("Update volume metadata completed successfully."), + LOG.info("Update volume metadata completed successfully.", resource=volume) return db_meta @@ -1092,7 +1092,7 @@ class API(base.Base): def get_volume_admin_metadata(self, context, volume): """Get all administration metadata associated with a volume.""" rv = self.db.volume_admin_metadata_get(context, volume['id']) - LOG.info(_LI("Get volume admin metadata completed successfully."), + LOG.info("Get volume admin metadata completed successfully.", resource=volume) return dict(rv) @@ -1112,7 +1112,7 @@ class API(base.Base): # TODO(jdg): Implement an RPC call for drivers that may use this info - LOG.info(_LI("Update volume admin metadata completed successfully."), + LOG.info("Update volume admin metadata completed successfully.", resource=volume) return db_meta @@ -1120,7 +1120,7 @@ class API(base.Base): def get_snapshot_metadata(self, context, snapshot): """Get all metadata associated with a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot.id) - LOG.info(_LI("Get snapshot metadata completed successfully."), + LOG.info("Get snapshot metadata completed successfully.", resource=snapshot) return snapshot_obj.metadata @@ -1129,7 +1129,7 @@ class API(base.Base): """Delete the given metadata item from a snapshot.""" snapshot_obj = self.get_snapshot(context, snapshot.id) snapshot_obj.delete_metadata_key(context, key) - LOG.info(_LI("Delete snapshot metadata completed successfully."), + LOG.info("Delete snapshot metadata completed successfully.", resource=snapshot) @wrap_check_policy @@ -1156,12 +1156,12 @@ class API(base.Base): # TODO(jdg): Implement an RPC call for drivers that may use this info - LOG.info(_LI("Update snapshot metadata completed successfully."), + LOG.info("Update snapshot metadata completed successfully.", resource=snapshot) return snapshot.metadata def get_snapshot_metadata_value(self, snapshot, key): - LOG.info(_LI("Get snapshot metadata value not implemented."), + LOG.info("Get snapshot metadata value not implemented.", resource=snapshot) # FIXME(jdg): Huh? Pass? pass @@ -1178,7 +1178,7 @@ class API(base.Base): @wrap_check_policy def get_volume_image_metadata(self, context, volume): db_data = self.db.volume_glance_metadata_get(context, volume['id']) - LOG.info(_LI("Get volume image-metadata completed successfully."), + LOG.info("Get volume image-metadata completed successfully.", resource=volume) return {meta_entry.key: meta_entry.value for meta_entry in db_data} @@ -1195,8 +1195,8 @@ class API(base.Base): def copy_volume_to_image(self, context, volume, metadata, force): """Create a new image from the specified volume.""" if not CONF.enable_force_upload and force: - LOG.info(_LI("Force upload to image is disabled, " - "Force option will be ignored."), + LOG.info("Force upload to image is disabled, " + "Force option will be ignored.", resource={'type': 'volume', 'id': volume['id']}) force = False @@ -1262,7 +1262,7 @@ class API(base.Base): response['is_public'] = recv_metadata.get('is_public') elif 'visibility' in recv_metadata: response['visibility'] = recv_metadata.get('visibility') - LOG.info(_LI("Copy volume to image completed successfully."), + LOG.info("Copy volume to image completed successfully.", resource=volume) return response @@ -1272,7 +1272,7 @@ class API(base.Base): expected = {'status': 'available'} def _roll_back_status(): - msg = _LE('Could not return volume %s to available.') + msg = _('Could not return volume %s to available.') try: if not volume.conditional_update(expected, value): LOG.error(msg, volume.id) @@ -1323,13 +1323,13 @@ class API(base.Base): gb_quotas = exc.kwargs['quotas']['gigabytes'] consumed = gigabytes['reserved'] + gigabytes['in_use'] - msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume " + LOG.error("Quota exceeded for %(s_pid)s, tried to extend volume " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " - "already consumed).") - LOG.error(msg, {'s_pid': context.project_id, - 's_size': size_increase, - 'd_consumed': consumed, - 'd_quota': gb_quotas}) + "already consumed).", + {'s_pid': context.project_id, + 's_size': size_increase, + 'd_consumed': consumed, + 'd_quota': gb_quotas}) raise exception.VolumeSizeExceedsAvailableQuota( requested=size_increase, consumed=consumed, quota=gb_quotas) finally: @@ -1357,15 +1357,15 @@ class API(base.Base): # NOTE(erlon): During rolling upgrades scheduler and volume can # have different versions. This check makes sure that a new # version of the volume service won't break. - msg = _LW("Failed to send extend volume request to scheduler. " - "Falling back to old behaviour. This is normal during a " - "live-upgrade. Error: %(e)s") + msg = ("Failed to send extend volume request to scheduler. " + "Falling back to old behaviour. This is normal during a " + "live-upgrade. Error: %(e)s") LOG.warning(msg, {'e': e}) # TODO(erlon): Remove in Pike self.volume_rpcapi.extend_volume(context, volume, new_size, reservations) - LOG.info(_LI("Extend volume request issued successfully."), + LOG.info("Extend volume request issued successfully.", resource=volume) @wrap_check_policy @@ -1451,7 +1451,7 @@ class API(base.Base): cluster_name or host, force_copy, request_spec) - LOG.info(_LI("Migrate volume request issued successfully."), + LOG.info("Migrate volume request issued successfully.", resource=volume) @wrap_check_policy @@ -1490,7 +1490,7 @@ class API(base.Base): 'exp': expected_status}) raise exception.InvalidVolume(reason=msg) - LOG.info(_LI("Migrate volume completion issued successfully."), + LOG.info("Migrate volume completion issued successfully.", resource=volume) return self.volume_rpcapi.migrate_volume_completion(context, volume, new_volume, error) @@ -1505,8 +1505,8 @@ class API(base.Base): raise exception.InvalidVolume(reason=msg) self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': six.text_type(flag)}) - LOG.info(_LI("Update readonly setting on volume " - "completed successfully."), + LOG.info("Update readonly setting on volume " + "completed successfully.", resource=volume) @wrap_check_policy @@ -1592,7 +1592,7 @@ class API(base.Base): self.scheduler_rpcapi.retype(context, volume, request_spec=request_spec, filter_properties={}) - LOG.info(_LI("Retype volume request issued successfully."), + LOG.info("Retype volume request issued successfully.", resource=volume) def _get_service_by_host_cluster(self, context, host, cluster_name, @@ -1613,20 +1613,20 @@ class API(base.Base): cluster_name=svc_cluster) except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Unable to find service: %(service)s for ' - 'given host: %(host)s and cluster %(cluster)s.'), + LOG.error('Unable to find service: %(service)s for ' + 'given host: %(host)s and cluster %(cluster)s.', {'service': constants.VOLUME_BINARY, 'host': host, 'cluster': cluster_name}) if service.disabled and (not service.cluster_name or service.cluster.disabled): - LOG.error(_LE('Unable to manage existing %s on a disabled ' - 'service.'), resource) + LOG.error('Unable to manage existing %s on a disabled ' + 'service.', resource) raise exception.ServiceUnavailable() if not service.is_up: - LOG.error(_LE('Unable to manage existing %s on a service that is ' - 'down.'), resource) + LOG.error('Unable to manage existing %s on a service that is ' + 'down.', resource) raise exception.ServiceUnavailable() return service @@ -1673,7 +1673,7 @@ class API(base.Base): with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vol_ref = flow_engine.storage.fetch('volume') - LOG.info(_LI("Manage volume request issued successfully."), + LOG.info("Manage volume request issued successfully.", resource=vol_ref) return vol_ref @@ -1791,7 +1791,7 @@ class API(base.Base): cluster.save() raise exception.InvalidInput( reason=_('No service could be changed: %s') % msg) - LOG.warning(_LW('Some services could not be changed: %s'), msg) + LOG.warning('Some services could not be changed: %s', msg) return cluster, services diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 1477d114795..e154249dcc3 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -26,7 +26,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import objects from cinder.objects import fields @@ -424,8 +424,8 @@ class BaseVD(object): self._is_non_recoverable(ex.stderr, non_recoverable): raise - LOG.exception(_LE("Recovering from a failed execute. " - "Try number %s"), tries) + LOG.exception("Recovering from a failed execute. " + "Try number %s", tries) time.sleep(tries ** 2) def _detach_volume(self, context, attach_info, volume, properties, @@ -458,8 +458,8 @@ class BaseVD(object): LOG.debug("volume %s: removing export", volume['id']) self.remove_export(context, volume) except Exception as ex: - LOG.exception(_LE("Error detaching volume %(volume)s, " - "due to remove export failure."), + LOG.exception("Error detaching volume %(volume)s, " + "due to remove export failure.", {"volume": volume['id']}) raise exception.RemoveExportException(volume=volume['id'], reason=ex) @@ -480,8 +480,8 @@ class BaseVD(object): # flag in the interface is for anticipation that it will be enabled # in the future. if remote: - LOG.error(_LE("Detaching snapshot from a remote node " - "is not supported.")) + LOG.error("Detaching snapshot from a remote node " + "is not supported.") raise exception.NotSupportedOperation( operation=_("detach snapshot from remote node")) else: @@ -501,8 +501,8 @@ class BaseVD(object): LOG.debug("Snapshot %s: removing export.", snapshot.id) self.remove_export_snapshot(context, snapshot) except Exception as ex: - LOG.exception(_LE("Error detaching snapshot %(snapshot)s, " - "due to remove export failure."), + LOG.exception("Error detaching snapshot %(snapshot)s, " + "due to remove export failure.", {"snapshot": snapshot.id}) raise exception.RemoveExportException(volume=snapshot.id, reason=ex) @@ -532,8 +532,8 @@ class BaseVD(object): self._throttle = throttling.BlkioCgroup(int(bps_limit), cgroup_name) except processutils.ProcessExecutionError as err: - LOG.warning(_LW('Failed to activate volume copy throttling: ' - '%(err)s'), {'err': err}) + LOG.warning('Failed to activate volume copy throttling: ' + '%(err)s', {'err': err}) throttling.Throttle.set_default(self._throttle) def get_version(self): @@ -737,9 +737,9 @@ class BaseVD(object): if ':' in vendor_name: old_name = vendor_name vendor_name = vendor_name.replace(':', '_') - LOG.warning(_LW('The colon in vendor name was replaced ' - 'by underscore. Updated vendor name is ' - '%(name)s".'), {'name': vendor_name}) + LOG.warning('The colon in vendor name was replaced ' + 'by underscore. Updated vendor name is ' + '%(name)s".', {'name': vendor_name}) for key in vendor_prop: # If key has colon in vendor name field, we replace it to @@ -751,10 +751,10 @@ class BaseVD(object): updated_vendor_prop[new_key] = vendor_prop[key] continue if not key.startswith(vendor_name + ':'): - LOG.warning(_LW('Vendor unique property "%(property)s" ' - 'must start with vendor prefix with colon ' - '"%(prefix)s". The property was ' - 'not registered on capabilities list.'), + LOG.warning('Vendor unique property "%(property)s" ' + 'must start with vendor prefix with colon ' + '"%(prefix)s". The property was ' + 'not registered on capabilities list.', {'prefix': vendor_name + ':', 'property': key}) continue @@ -952,9 +952,9 @@ class BaseVD(object): rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: - LOG.warning(_LW("Failed terminating the connection " - "of volume %(volume_id)s, but it is " - "acceptable."), + LOG.warning("Failed terminating the connection " + "of volume %(volume_id)s, but it is " + "acceptable.", {'volume_id': volume['id']}) else: # Call local driver's create_export and initialize_connection. @@ -969,9 +969,9 @@ class BaseVD(object): volume.save() except exception.CinderException as ex: if model_update: - LOG.exception(_LE("Failed updating model of volume " - "%(volume_id)s with driver provided " - "model %(model)s"), + LOG.exception("Failed updating model of volume " + "%(volume_id)s with driver provided " + "model %(model)s", {'volume_id': volume['id'], 'model': model_update}) raise exception.ExportFailure(reason=ex) @@ -1008,7 +1008,7 @@ class BaseVD(object): properties, force=True, remote=remote) except Exception: - LOG.exception(_LE('Error detaching volume %s'), + LOG.exception('Error detaching volume %s', volume['id']) raise @@ -1024,8 +1024,8 @@ class BaseVD(object): # flag in the interface is for anticipation that it will be enabled # in the future. if remote: - LOG.error(_LE("Attaching snapshot from a remote node " - "is not supported.")) + LOG.error("Attaching snapshot from a remote node " + "is not supported.") raise exception.NotSupportedOperation( operation=_("attach snapshot from remote node")) else: @@ -1045,9 +1045,9 @@ class BaseVD(object): snapshot.save() except exception.CinderException as ex: if model_update: - LOG.exception(_LE("Failed updating model of snapshot " - "%(snapshot_id)s with driver provided " - "model %(model)s."), + LOG.exception("Failed updating model of snapshot " + "%(snapshot_id)s with driver provided " + "model %(model)s.", {'snapshot_id': snapshot.id, 'model': model_update}) raise exception.ExportFailure(reason=ex) @@ -1094,7 +1094,7 @@ class BaseVD(object): unavailable = not connector.check_valid_device(host_device, root_access) except Exception: - LOG.exception(_LE('Could not validate device %s'), host_device) + LOG.exception('Could not validate device %s', host_device) if unavailable: raise exception.DeviceUnavailable(path=host_device, @@ -2612,8 +2612,7 @@ class ISCSIDriver(VolumeDriver): def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) - LOG.warning(_LW("ISCSI provider_location not " - "stored, using discovery")) + LOG.warning("ISCSI provider_location not stored, using discovery") volume_name = volume['name'] @@ -2626,7 +2625,7 @@ class ISCSIDriver(VolumeDriver): volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: - LOG.error(_LE("ISCSI discovery attempt failed for:%s"), + LOG.error("ISCSI discovery attempt failed for:%s", volume['host'].split('@')[0]) LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) return None @@ -2815,8 +2814,8 @@ class ISCSIDriver(VolumeDriver): # iSCSI drivers require the initiator information required = 'initiator' if required not in connector: - LOG.error(_LE('The volume driver requires %(data)s ' - 'in the connector.'), {'data': required}) + LOG.error('The volume driver requires %(data)s ' + 'in the connector.', {'data': required}) raise exception.InvalidConnectorException(missing=required) def terminate_connection(self, volume, connector, **kwargs): @@ -2969,9 +2968,9 @@ class FibreChannelDriver(VolumeDriver): def validate_connector_has_setting(connector, setting): """Test for non-empty setting in connector.""" if setting not in connector or not connector[setting]: - LOG.error(_LE( + LOG.error( "FibreChannelDriver validate_connector failed. " - "No '%(setting)s'. Make sure HBA state is Online."), + "No '%(setting)s'. Make sure HBA state is Online.", {'setting': setting}) raise exception.InvalidConnectorException(missing=setting) diff --git a/cinder/volume/driver_utils.py b/cinder/volume/driver_utils.py index 3a8a0bb8656..d22a265b829 100644 --- a/cinder/volume/driver_utils.py +++ b/cinder/volume/driver_utils.py @@ -17,7 +17,6 @@ from oslo_log import log as logging from cinder import context from cinder import exception -from cinder.i18n import _LE LOG = logging.getLogger(__name__) @@ -41,9 +40,9 @@ class VolumeDriverUtils(object): self._data_namespace ) except exception.CinderException: - LOG.exception(_LE("Failed to get driver initiator data for" - " initiator %(initiator)s and namespace" - " %(namespace)s"), + LOG.exception("Failed to get driver initiator data for" + " initiator %(initiator)s and namespace" + " %(namespace)s", {'initiator': initiator, 'namespace': self._data_namespace}) raise @@ -63,9 +62,9 @@ class VolumeDriverUtils(object): value ) except exception.CinderException: - LOG.exception(_LE("Failed to insert initiator data for" - " initiator %(initiator)s and backend" - " %(backend)s for key %(key)s."), + LOG.exception("Failed to insert initiator data for" + " initiator %(initiator)s and backend" + " %(backend)s for key %(key)s.", {'initiator': initiator, 'backend': self._data_namespace, 'key': key}) diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py index 3c785431071..0f2c8e53900 100644 --- a/cinder/volume/drivers/block_device.py +++ b/cinder/volume/drivers/block_device.py @@ -23,7 +23,7 @@ from oslo_utils import units from cinder import context from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects @@ -87,7 +87,7 @@ class BlockDeviceDriver(driver.BaseVD, @utils.synchronized('block_device', external=True) def create_volume(self, volume): device = self.find_appropriate_size_device(volume.size) - LOG.info(_LI("Creating %(volume)s on %(device)s"), + LOG.info("Creating %(volume)s on %(device)s", {"volume": volume.name, "device": device}) self._update_provider_location(volume, device) @@ -109,7 +109,7 @@ class BlockDeviceDriver(driver.BaseVD, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) else: - LOG.warning(_LW("The device %s won't be cleared."), device) + LOG.warning("The device %s won't be cleared.", device) if device.status == "error_deleting": msg = _("Failed to delete device.") @@ -141,7 +141,7 @@ class BlockDeviceDriver(driver.BaseVD, @utils.synchronized('block_device', external=True) def create_cloned_volume(self, volume, src_vref): - LOG.info(_LI('Creating clone of volume: %s.'), src_vref.id) + LOG.info('Creating clone of volume: %s.', src_vref.id) device = self.find_appropriate_size_device(src_vref.size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( @@ -260,7 +260,7 @@ class BlockDeviceDriver(driver.BaseVD, LOG.error(msg, resource=volume) raise exception.CinderException(msg) - LOG.info(_LI('Creating volume snapshot: %s.'), snapshot.id) + LOG.info('Creating volume snapshot: %s.', snapshot.id) device = self.find_appropriate_size_device(snapshot.volume_size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( @@ -275,7 +275,7 @@ class BlockDeviceDriver(driver.BaseVD, @utils.synchronized('block_device', external=True) def create_volume_from_snapshot(self, volume, snapshot): - LOG.info(_LI('Creating volume %s from snapshot.'), volume.id) + LOG.info('Creating volume %s from snapshot.', volume.id) device = self.find_appropriate_size_device(snapshot.volume_size) dev_size = self._get_devices_sizes([device]) volutils.copy_volume( diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py index 60793eff5e0..083f48eb706 100644 --- a/cinder/volume/drivers/coprhd/common.py +++ b/cinder/volume/drivers/coprhd/common.py @@ -29,8 +29,6 @@ import six from cinder import context from cinder import exception from cinder.i18n import _ -from cinder.i18n import _LE -from cinder.i18n import _LI from cinder.objects import fields from cinder.volume.drivers.coprhd.helpers import ( authentication as coprhd_auth) @@ -254,7 +252,7 @@ class EMCCoprHDDriverCommon(object): coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") % {'name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s creation failed") % name) + log_err_msg = ("Volume : %s creation failed" % name) self._raise_or_log_exception( e.err_code, coprhd_err_msg, log_err_msg) @@ -283,7 +281,7 @@ class EMCCoprHDDriverCommon(object): " create failed\n%(err)s") % {'name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Consistency Group : %s creation failed") % + log_err_msg = ("Consistency Group : %s creation failed" % name) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -321,7 +319,7 @@ class EMCCoprHDDriverCommon(object): " update failed\n%(err)s") % {'cg_uri': cg_uri, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Consistency Group : %s update failed") % + log_err_msg = ("Consistency Group : %s update failed" % cg_uri) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -357,7 +355,7 @@ class EMCCoprHDDriverCommon(object): volumes_model_update.append(update_item) - LOG.exception(_LE("Failed to delete the volume %s of CG."), + LOG.exception("Failed to delete the volume %s of CG.", vol['name']) self.consistencygroup_obj.delete( @@ -375,7 +373,7 @@ class EMCCoprHDDriverCommon(object): " delete failed\n%(err)s") % {'name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Consistency Group : %s deletion failed") % + log_err_msg = ("Consistency Group : %s deletion failed" % name) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -395,8 +393,8 @@ class EMCCoprHDDriverCommon(object): coprhd_cgid = self._get_coprhd_cgid(cg_id) cg_name = self._get_consistencygroup_name(cg_group) - LOG.info(_LI('Start to create cgsnapshot for consistency group' - ': %(group_name)s'), + LOG.info('Start to create cgsnapshot for consistency group' + ': %(group_name)s', {'group_name': cg_name}) try: @@ -484,8 +482,8 @@ class EMCCoprHDDriverCommon(object): {'cg_name': cg_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s creation failed") % + log_err_msg = ("Snapshot %(name)s for Consistency" + " Group: %(cg_name)s creation failed" % {'cg_name': cg_name, 'name': cgsnapshot_name}) self._raise_or_log_exception(e.err_code, coprhd_err_msg, @@ -505,9 +503,9 @@ class EMCCoprHDDriverCommon(object): cg_name = self._get_consistencygroup_name(cg_group) model_update = {} - LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: ' - '%(group_name)s'), {'snap_name': cgsnapshot['name'], - 'group_name': cg_name}) + LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' + '%(group_name)s', {'snap_name': cgsnapshot['name'], + 'group_name': cg_name}) try: uri = None @@ -545,8 +543,8 @@ class EMCCoprHDDriverCommon(object): 'cg_name': cg_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s deletion failed") % + log_err_msg = ("Snapshot %(name)s for Consistency" + " Group: %(cg_name)s deletion failed" % {'cg_name': cg_name, 'name': cgsnapshot_name}) self._raise_or_log_exception(e.err_code, coprhd_err_msg, @@ -618,10 +616,9 @@ class EMCCoprHDDriverCommon(object): "%s:%s:%s" % (self.OPENSTACK_TAG, prop, six.text_type(value))) except TypeError: - LOG.error( - _LE("Error tagging the resource property %s"), prop) + LOG.error("Error tagging the resource property %s", prop) except TypeError: - LOG.error(_LE("Error tagging the resource properties")) + LOG.error("Error tagging the resource properties") try: self.tag_obj.tag_resource( @@ -683,13 +680,13 @@ class EMCCoprHDDriverCommon(object): "", full_project_name, name, True) except IndexError: - LOG.exception(_LE("Volume clone detach returned empty task list")) + LOG.exception("Volume clone detach returned empty task list") except coprhd_utils.CoprHdError as e: coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") % {'name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : {%s} clone failed") % name) + log_err_msg = ("Volume : {%s} clone failed" % name) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -712,7 +709,7 @@ class EMCCoprHDDriverCommon(object): {'volume_name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s expand failed") % name) + log_err_msg = ("Volume : %s expand failed" % name) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -735,8 +732,7 @@ class EMCCoprHDDriverCommon(object): {'volume_name': volume_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s expand failed") % - volume_name) + log_err_msg = "Volume : %s expand failed" % volume_name self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -789,8 +785,7 @@ class EMCCoprHDDriverCommon(object): {'src_snapshot_name': src_snapshot_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Snapshot : %s clone failed") % - src_snapshot_name) + log_err_msg = "Snapshot : %s clone failed" % src_snapshot_name self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -809,8 +804,7 @@ class EMCCoprHDDriverCommon(object): {'volume_name': new_volume_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s expand failed") % - new_volume_name) + log_err_msg = "Volume : %s expand failed" % new_volume_name self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -825,16 +819,16 @@ class EMCCoprHDDriverCommon(object): self.volume_obj.delete(full_project_name, name, sync=True) except coprhd_utils.CoprHdError as e: if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: - LOG.info(_LI( + LOG.info( "Volume %s" " no longer exists; volume deletion is" - " considered successful."), name) + " considered successful.", name) else: coprhd_err_msg = (_("Volume %(name)s: delete failed" "\n%(err)s") % {'name': name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s delete failed") % name) + log_err_msg = "Volume : %s delete failed" % name self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -851,7 +845,7 @@ class EMCCoprHDDriverCommon(object): _("Snapshot can't be taken individually on a volume" " that is part of a Consistency Group")) except KeyError: - LOG.info(_LI("No Consistency Group associated with the volume")) + LOG.info("No Consistency Group associated with the volume") if self.configuration.coprhd_emulate_snapshot: self.create_cloned_volume(snapshot, volume, truncate_name) @@ -899,7 +893,7 @@ class EMCCoprHDDriverCommon(object): "\n%(err)s") % {'snapshotname': snapshotname, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Snapshot : %s create failed") % snapshotname) + log_err_msg = "Snapshot : %s create failed" % snapshotname self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -916,7 +910,7 @@ class EMCCoprHDDriverCommon(object): _("Snapshot delete can't be done individually on a volume" " that is part of a Consistency Group")) except KeyError: - LOG.info(_LI("No Consistency Group associated with the volume")) + LOG.info("No Consistency Group associated with the volume") if self.configuration.coprhd_emulate_snapshot: self.delete_volume(snapshot) @@ -936,10 +930,10 @@ class EMCCoprHDDriverCommon(object): project=projectname, tenant=tenantname) if resource_uri is None: - LOG.info(_LI( + LOG.info( "Snapshot %s" " is not found; snapshot deletion" - " is considered successful."), snapshotname) + " is considered successful.", snapshotname) else: snapshotname = self._get_coprhd_snapshot_name( snapshot, resource_uri) @@ -954,7 +948,7 @@ class EMCCoprHDDriverCommon(object): coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") % snapshotname) - log_err_msg = (_LE("Snapshot : %s delete failed") % snapshotname) + log_err_msg = "Snapshot : %s delete failed" % snapshotname self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) @@ -976,11 +970,11 @@ class EMCCoprHDDriverCommon(object): foundhostname = self._find_host(initiator_ports[i]) if foundhostname: - LOG.info(_LI("Found host %s"), foundhostname) + LOG.info("Found host %s", foundhostname) break if not foundhostname: - LOG.error(_LE("Auto host creation not supported")) + LOG.error("Auto host creation not supported") # create an export group for this host foundgroupname = foundhostname + 'SG' # create a unique name @@ -1056,9 +1050,9 @@ class EMCCoprHDDriverCommon(object): None, None) else: - LOG.info(_LI( + LOG.info( "No export group found for the host: %s" - "; this is considered already detached."), hostname) + "; this is considered already detached.", hostname) return itls @@ -1133,11 +1127,11 @@ class EMCCoprHDDriverCommon(object): if itls is None: # No device number found after 10 tries; return an empty itl - LOG.info(_LI( + LOG.info( "No device number has been found after 10 tries; " "this likely indicates an unsuccessful attach of " "volume volumename=%(volumename)s to" - " initiator initiator_ports=%(initiator_ports)s"), + " initiator initiator_ports=%(initiator_ports)s", {'volumename': volumename, 'initiator_ports': initiator_ports}) @@ -1408,7 +1402,7 @@ class EMCCoprHDDriverCommon(object): except coprhd_utils.CoprHdError: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Update volume stats failed")) + LOG.exception("Update volume stats failed") @retry_wrapper def retype(self, ctxt, volume, new_type, diff, host): @@ -1434,7 +1428,7 @@ class EMCCoprHDDriverCommon(object): "\n%(err)s") % {'volume_name': volume_name, 'err': six.text_type(e.msg)}) - log_err_msg = (_LE("Volume : %s type update failed") % + log_err_msg = ("Volume : %s type update failed" % volume_name) self._raise_or_log_exception(e.err_code, coprhd_err_msg, log_err_msg) diff --git a/cinder/volume/drivers/coprhd/scaleio.py b/cinder/volume/drivers/coprhd/scaleio.py index 20287c6c445..ef65598c223 100644 --- a/cinder/volume/drivers/coprhd/scaleio.py +++ b/cinder/volume/drivers/coprhd/scaleio.py @@ -24,7 +24,6 @@ from six.moves import urllib from cinder import exception from cinder.i18n import _ -from cinder.i18n import _LI from cinder import interface from cinder.volume import driver from cinder.volume.drivers.coprhd import common as coprhd_common @@ -266,7 +265,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" % (server_ip, six.text_type(server_port), ip_double_encoded)) - LOG.info(_LI("ScaleIO get client id by ip request: %s"), request) + LOG.info("ScaleIO get client id by ip request: %s", request) if self.configuration.scaleio_verify_server_certificate: verify_cert = self.configuration.scaleio_server_certificate_path @@ -292,7 +291,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): 'message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("ScaleIO sdc id is %s"), sdc_id) + LOG.info("ScaleIO sdc id is %s", sdc_id) return sdc_id def _check_response(self, response, request, @@ -300,7 +299,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): server_username, server_password): if response.status_code == 401 or response.status_code == 403: LOG.info( - _LI("Token is invalid, going to re-login and get a new one")) + "Token is invalid, going to re-login and get a new one") login_request = ("https://%s:%s/api/login" % (server_ip, six.text_type(server_port))) @@ -317,7 +316,7 @@ class EMCCoprHDScaleIODriver(driver.VolumeDriver): token = r.json() self.server_token = token # repeat request with valid token - LOG.info(_LI("Going to perform request again %s with valid token"), + LOG.info("Going to perform request again %s with valid token", request) res = requests.get( request, auth=(server_username, self.server_token), diff --git a/cinder/volume/drivers/datera/datera_api2.py b/cinder/volume/drivers/datera/datera_api2.py index 1734aa37b86..792c5b20200 100644 --- a/cinder/volume/drivers/datera/datera_api2.py +++ b/cinder/volume/drivers/datera/datera_api2.py @@ -24,8 +24,8 @@ from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units -from cinder.i18n import _, _LI, _LW, _LE from cinder import exception +from cinder.i18n import _ from cinder.volume import utils as volutils import cinder.volume.drivers.datera.datera_common as datc @@ -98,8 +98,8 @@ class DateraApi(object): policies = self._get_policies_for_resource(volume) template = policies['template'] if template: - LOG.warning(_LW("Volume size not extended due to template binding:" - " volume: %(volume)s, template: %(template)s"), + LOG.warning("Volume size not extended due to template binding:" + " volume: %(volume)s, template: %(template)s", volume=volume, template=template) return @@ -164,9 +164,9 @@ class DateraApi(object): method='delete', api_version='2') except exception.NotFound: - msg = _LI("Tried to delete volume %s, but it was not found in the " - "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(volume['id'])) + LOG.info("Tried to delete volume %s, but it was not found in the " + "Datera cluster. Continuing with delete.", + datc._get_name(volume['id'])) # ================= # = Ensure Export = @@ -341,8 +341,8 @@ class DateraApi(object): self._issue_api_request(url, method='put', body=data, api_version='2') except exception.NotFound: - msg = _LI("Tried to detach volume %s, but it was not found in the " - "Datera cluster. Continuing with detach.") + msg = ("Tried to detach volume %s, but it was not found in the " + "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) # TODO(_alastor_): Make acl cleaning multi-attach aware self._clean_acl_2(volume) @@ -436,8 +436,8 @@ class DateraApi(object): else: raise exception.NotFound except exception.NotFound: - msg = _LI("Tried to delete snapshot %s, but was not found in " - "Datera cluster. Continuing with delete.") + msg = ("Tried to delete snapshot %s, but was not found in " + "Datera cluster. Continuing with delete.") LOG.info(msg, datc._get_name(snapshot['id'])) # ======================== @@ -610,8 +610,8 @@ class DateraApi(object): results = self._issue_api_request('system', api_version='2') if 'uuid' not in results: - LOG.error(_LE( - 'Failed to get updated stats from Datera Cluster.')) + LOG.error( + 'Failed to get updated stats from Datera Cluster.') backend_name = self.configuration.safe_get( 'volume_backend_name') @@ -629,8 +629,7 @@ class DateraApi(object): self.cluster_stats = stats except exception.DateraAPIException: - LOG.error(_LE('Failed to get updated stats from Datera ' - 'cluster.')) + LOG.error('Failed to get updated stats from Datera cluster.') return self.cluster_stats def _is_manageable(self, app_inst): @@ -662,10 +661,10 @@ class DateraApi(object): self.datera_api_token = results['key'] except exception.NotAuthorized: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.')) + LOG.error('Logging into the Datera cluster failed. Please ' + 'check your username and password set in the ' + 'cinder.conf and start the cinder-volume ' + 'service again.') # =========== # = Polling = diff --git a/cinder/volume/drivers/datera/datera_api21.py b/cinder/volume/drivers/datera/datera_api21.py index 5aba15c42aa..3d8a5602823 100644 --- a/cinder/volume/drivers/datera/datera_api21.py +++ b/cinder/volume/drivers/datera/datera_api21.py @@ -23,8 +23,8 @@ from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units -from cinder.i18n import _, _LI, _LW, _LE from cinder import exception +from cinder.i18n import _ from cinder.volume import utils as volutils import cinder.volume.drivers.datera.datera_common as datc @@ -104,8 +104,8 @@ class DateraApi(object): policies = self._get_policies_for_resource(volume) template = policies['template'] if template: - LOG.warning(_LW("Volume size not extended due to template binding:" - " volume: %(volume)s, template: %(template)s"), + LOG.warning("Volume size not extended due to template binding:" + " volume: %(volume)s, template: %(template)s", volume=volume, template=template) return @@ -184,8 +184,8 @@ class DateraApi(object): api_version='2.1', tenant=tenant) except exception.NotFound: - msg = _LI("Tried to delete volume %s, but it was not found in the " - "Datera cluster. Continuing with delete.") + msg = ("Tried to delete volume %s, but it was not found in the " + "Datera cluster. Continuing with delete.") LOG.info(msg, datc._get_name(volume['id'])) # ================= @@ -378,8 +378,8 @@ class DateraApi(object): self._issue_api_request(url, method='put', body=data, api_version='2.1', tenant=tenant) except exception.NotFound: - msg = _LI("Tried to detach volume %s, but it was not found in the " - "Datera cluster. Continuing with detach.") + msg = ("Tried to detach volume %s, but it was not found in the " + "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) # TODO(_alastor_): Make acl cleaning multi-attach aware self._clean_acl_2_1(volume, tenant) @@ -481,8 +481,8 @@ class DateraApi(object): else: raise exception.NotFound except exception.NotFound: - msg = _LI("Tried to delete snapshot %s, but was not found in " - "Datera cluster. Continuing with delete.") + msg = ("Tried to delete snapshot %s, but was not found in " + "Datera cluster. Continuing with delete.") LOG.info(msg, datc._get_name(snapshot['id'])) # ======================== @@ -772,10 +772,10 @@ class DateraApi(object): self.datera_api_token = results['key'] except exception.NotAuthorized: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.')) + LOG.error('Logging into the Datera cluster failed. Please ' + 'check your username and password set in the ' + 'cinder.conf and start the cinder-volume ' + 'service again.') # =========== # = Polling = @@ -834,8 +834,8 @@ class DateraApi(object): 'system', api_version='2.1')['data'] if 'uuid' not in results: - LOG.error(_LE( - 'Failed to get updated stats from Datera Cluster.')) + LOG.error( + 'Failed to get updated stats from Datera Cluster.') backend_name = self.configuration.safe_get( 'volume_backend_name') @@ -854,8 +854,7 @@ class DateraApi(object): self.cluster_stats = stats except exception.DateraAPIException: - LOG.error(_LE('Failed to get updated stats from Datera ' - 'cluster.')) + LOG.error('Failed to get updated stats from Datera cluster.') return self.cluster_stats # ======= diff --git a/cinder/volume/drivers/datera/datera_common.py b/cinder/volume/drivers/datera/datera_common.py index dc2c19035f0..197783512af 100644 --- a/cinder/volume/drivers/datera/datera_common.py +++ b/cinder/volume/drivers/datera/datera_common.py @@ -21,7 +21,7 @@ import time from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LE +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -156,7 +156,7 @@ def _api_lookup(func): name = "_" + "_".join( (func.__name__, api_version.replace(".", "_"))) try: - LOG.info(_LI("Trying method: %s"), name) + LOG.info("Trying method: %s", name) return getattr(obj, name)(*args[1:], **kwargs) except AttributeError as e: # If we find the attribute name in the error message @@ -206,6 +206,6 @@ def _get_supported_api_versions(driver): str(resp.json().get("code")) == "99"): results.append(version) else: - LOG.error(_LE("No supported API versions available, " - "Please upgrade your Datera EDF software")) + LOG.error("No supported API versions available, " + "Please upgrade your Datera EDF software") return results diff --git a/cinder/volume/drivers/dell/dell_storagecenter_api.py b/cinder/volume/drivers/dell/dell_storagecenter_api.py index 30a25e0c7d1..45e8d823abe 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_api.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_api.py @@ -25,7 +25,7 @@ import six import uuid from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) @@ -152,15 +152,15 @@ class HttpClient(object): url = url + id else: # No hope. - LOG.error(_LE('_get_async_url: Bogus return async task %r'), + LOG.error('_get_async_url: Bogus return async task %r', asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Invalid URL.')) # Check for an odd error case if url.startswith('<') and url.endswith('>'): - LOG.error(_LE('_get_async_url: Malformed URL ' - '(XML returned). (%r)'), asyncTask) + LOG.error('_get_async_url: Malformed URL (XML returned). (%r)', + asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Malformed URL.')) @@ -308,8 +308,8 @@ class StorageCenterApiHelper(object): self.san_login = self.config.secondary_san_login self.san_password = self.config.secondary_san_password else: - LOG.info(_LI('Swapping DSM credentials: Secondary DSM ' - 'credentials are not set or are incomplete.')) + LOG.info('Swapping DSM credentials: Secondary DSM ' + 'credentials are not set or are incomplete.') # Cannot swap. return False # Odds on this hasn't changed so no need to make setting this a @@ -322,7 +322,7 @@ class StorageCenterApiHelper(object): self.san_login = self.config.san_login self.san_password = self.config.san_password self.san_port = self.config.dell_sc_api_port - LOG.info(_LI('Swapping DSM credentials: New DSM IP is %r.'), + LOG.info('Swapping DSM credentials: New DSM IP is %r.', self.san_ip) return True @@ -363,7 +363,7 @@ class StorageCenterApiHelper(object): :raises: VolumeBackendAPIException """ connection = None - LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'), + LOG.info('open_connection to %(ssn)s at %(ip)s', {'ssn': self.primaryssn, 'ip': self.config.san_ip}) if self.primaryssn: @@ -376,11 +376,11 @@ class StorageCenterApiHelper(object): connection = self._setup_connection() else: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to connect to the API. ' - 'No backup DSM provided.')) + LOG.error('Failed to connect to the API. ' + 'No backup DSM provided.') # Save our api version for next time. if self.apiversion != connection.apiversion: - LOG.info(_LI('open_connection: Updating API version to %s'), + LOG.info('open_connection: Updating API version to %s', connection.apiversion) self.apiversion = connection.apiversion @@ -488,7 +488,7 @@ class StorageCenterApi(object): 'reason': rest_response.reason, 'text': response_text}) else: - LOG.warning(_LW('Failed to get REST call result.')) + LOG.warning('Failed to get REST call result.') return False @staticmethod @@ -563,12 +563,11 @@ class StorageCenterApi(object): try: return blob.json() except AttributeError: - LOG.error(_LE('Error invalid json: %s'), - blob) + LOG.error('Error invalid json: %s', blob) except TypeError as ex: - LOG.error(_LE('Error TypeError. %s'), ex) + LOG.error('Error TypeError. %s', ex) except scanner.JSONDecodeError as ex: - LOG.error(_LE('Error JSONDecodeError. %s'), ex) + LOG.error('Error JSONDecodeError. %s', ex) # We are here so this went poorly. Log our blob. LOG.debug('_get_json blob %s', blob) return None @@ -583,12 +582,11 @@ class StorageCenterApi(object): if isinstance(blob, dict): return blob.get('instanceId') except AttributeError: - LOG.error(_LE('Invalid API object: %s'), - blob) + LOG.error('Invalid API object: %s', blob) except TypeError as ex: - LOG.error(_LE('Error TypeError. %s'), ex) + LOG.error('Error TypeError. %s', ex) except scanner.JSONDecodeError as ex: - LOG.error(_LE('Error JSONDecodeError. %s'), ex) + LOG.error('Error JSONDecodeError. %s', ex) LOG.debug('_get_id failed: blob %s', blob) return None @@ -617,7 +615,7 @@ class StorageCenterApi(object): except Exception: # We don't care what failed. The clues are already in the logs. # Just log a parsing error and move on. - LOG.error(_LE('_check_version_fail: Parsing error.')) + LOG.error('_check_version_fail: Parsing error.') # Just eat this if it isn't a version error. return response @@ -662,7 +660,7 @@ class StorageCenterApi(object): except Exception: # Good return but not the login response we were expecting. # Log it and error out. - LOG.error(_LE('Unrecognized Login Response: %s'), r) + LOG.error('Unrecognized Login Response: %s', r) def close_connection(self): """Logout of Dell REST API.""" @@ -691,7 +689,7 @@ class StorageCenterApi(object): '%(pid)r not valid on %(ssn)r', {'pid': provider_id, 'ssn': self.ssn}) except Exception: - LOG.error(_LE('_use_provider_id: provider_id %s is invalid!'), + LOG.error('_use_provider_id: provider_id %s is invalid!', provider_id) return ret @@ -708,7 +706,7 @@ class StorageCenterApi(object): r = self.client.get('StorageCenter/StorageCenter') result = self._get_result(r, 'scSerialNumber', ssn) if result is None: - LOG.error(_LE('Failed to find %(s)s. Result %(r)s'), + LOG.error('Failed to find %(s)s. Result %(r)s', {'s': ssn, 'r': r}) raise exception.VolumeBackendAPIException( @@ -779,7 +777,7 @@ class StorageCenterApi(object): scfolder = self._create_folder(url, instanceId, folder, ssn) # If we haven't found a folder or created it then leave if scfolder is None: - LOG.error(_LE('Unable to create folder path %s'), folderpath) + LOG.error('Unable to create folder path %s', folderpath) break # Next part of the path will need this instanceId = self._get_id(scfolder) @@ -878,9 +876,9 @@ class StorageCenterApi(object): # has likely been attempted before the volume has been instantiated # on the Storage Center. In the real world no one will snapshot # a volume without first putting some data in that volume. - LOG.warning(_LW('Volume %(name)s initialization failure. ' - 'Operations such as snapshot and clone may fail due ' - 'to inactive volume.)'), {'name': scvolume['name']}) + LOG.warning('Volume %(name)s initialization failure. ' + 'Operations such as snapshot and clone may fail due ' + 'to inactive volume.)', {'name': scvolume['name']}) def _find_storage_profile(self, storage_profile): """Looks for a Storage Profile on the array. @@ -1066,7 +1064,7 @@ class StorageCenterApi(object): # If we actually have a place to put our volume create it if folder is None: - LOG.warning(_LW('Unable to create folder %s'), self.vfname) + LOG.warning('Unable to create folder %s', self.vfname) # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) @@ -1108,17 +1106,17 @@ class StorageCenterApi(object): # Our volume should be in the return. scvolume = self._get_json(r) if scvolume: - LOG.info(_LI('Created volume %(instanceId)s: %(name)s'), + LOG.info('Created volume %(instanceId)s: %(name)s', {'instanceId': scvolume['instanceId'], 'name': scvolume['name']}) else: - LOG.error(_LE('ScVolume returned success with empty payload.' - ' Attempting to locate volume')) + LOG.error('ScVolume returned success with empty payload.' + ' Attempting to locate volume') # In theory it is there since success was returned. # Try one last time to find it before returning. scvolume = self._search_for_volume(name) else: - LOG.error(_LE('Unable to create volume on SC: %s'), name) + LOG.error('Unable to create volume on SC: %s', name) return scvolume @@ -1170,8 +1168,7 @@ class StorageCenterApi(object): # if there is no live volume then we return our provider_id. primary_id = provider_id lv = self.get_live_volume(provider_id, name) - LOG.info(_LI('Volume %(name)r, ' - 'id %(provider)s at primary %(primary)s.'), + LOG.info('Volume %(name)r, id %(provider)s at primary %(primary)s.', {'name': name, 'provider': provider_id, 'primary': primary_id}) @@ -1180,7 +1177,7 @@ class StorageCenterApi(object): if lv and (self.is_swapped(provider_id, lv) and not self.failed_over and self._autofailback(lv)): lv = self.get_live_volume(provider_id) - LOG.info(_LI('After failback %s'), lv) + LOG.info('After failback %s', lv) # Make sure we still have a LV. if lv: # At this point if the secondaryRole is Active we have @@ -1226,7 +1223,7 @@ class StorageCenterApi(object): msg = (_('Unable to complete failover of %s.') % name) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI('Imported %(fail)s to %(guid)s.'), + LOG.info('Imported %(fail)s to %(guid)s.', {'fail': self._repl_name(name), 'guid': name}) else: @@ -1313,8 +1310,8 @@ class StorageCenterApi(object): return self._get_json(r) # If we can't find the volume then it is effectively gone. - LOG.warning(_LW('delete_volume: unable to find volume ' - 'provider_id: %s'), provider_id) + LOG.warning('delete_volume: unable to find volume ' + 'provider_id: %s', provider_id) return True def _find_server_folder(self, create=False, ssn=-1): @@ -1354,7 +1351,7 @@ class StorageCenterApi(object): r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba' % self._get_id(scserver), payload, True) if not self._check_result(r): - LOG.error(_LE('_add_hba error: %(wwn)s to %(srvname)s'), + LOG.error('_add_hba error: %(wwn)s to %(srvname)s', {'wwn': wwnoriscsiname, 'srvname': scserver['name']}) return False @@ -1385,7 +1382,7 @@ class StorageCenterApi(object): # Found it return the id return self._get_id(srvos) - LOG.warning(_LW('Unable to find appropriate OS %s'), osname) + LOG.warning('Unable to find appropriate OS %s', osname) return None @@ -1412,7 +1409,7 @@ class StorageCenterApi(object): for wwn in wwnlist: if not self._add_hba(scserver, wwn): # We failed so log it. Delete our server and return None. - LOG.error(_LE('Error adding HBA %s to server'), wwn) + LOG.error('Error adding HBA %s to server', wwn) self._delete_server(scserver) return None return scserver @@ -1420,7 +1417,7 @@ class StorageCenterApi(object): def _create_server(self, servername, folder, serveros, ssn): ssn = self._vet_ssn(ssn) - LOG.info(_LI('Creating server %s'), servername) + LOG.info('Creating server %s', servername) payload = {} payload['Name'] = servername payload['StorageCenter'] = ssn @@ -1445,9 +1442,9 @@ class StorageCenterApi(object): if self._check_result(r): # Server was created scserver = self._first_result(r) - LOG.info(_LI('SC server created %s'), scserver) + LOG.info('SC server created %s', scserver) return scserver - LOG.error(_LE('Unable to create SC server %s'), servername) + LOG.error('Unable to create SC server %s', servername) return None def _vet_ssn(self, ssn): @@ -1529,7 +1526,7 @@ class StorageCenterApi(object): domains = self._get_json(r) return domains - LOG.error(_LE('Error getting FaultDomainList for %s'), cportid) + LOG.error('Error getting FaultDomainList for %s', cportid) return None def _find_initiators(self, scserver): @@ -1549,7 +1546,7 @@ class StorageCenterApi(object): wwn is not None): initiators.append(wwn) else: - LOG.error(_LE('Unable to find initiators')) + LOG.error('Unable to find initiators') LOG.debug('_find_initiators: %s', initiators) return initiators @@ -1580,8 +1577,8 @@ class StorageCenterApi(object): if self._check_result(r): mappings = self._get_json(r) else: - LOG.error(_LE('_find_mappings: volume is not active')) - LOG.info(_LI('Volume mappings for %(name)s: %(mappings)s'), + LOG.error('_find_mappings: volume is not active') + LOG.info('Volume mappings for %(name)s: %(mappings)s', {'name': scvolume.get('name'), 'mappings': mappings}) return mappings @@ -1598,7 +1595,7 @@ class StorageCenterApi(object): if self._check_result(r): mapping_profiles = self._get_json(r) else: - LOG.error(_LE('Unable to find mapping profiles: %s'), + LOG.error('Unable to find mapping profiles: %s', scvolume.get('name')) LOG.debug(mapping_profiles) return mapping_profiles @@ -1655,17 +1652,17 @@ class StorageCenterApi(object): if lun is None: lun = mappinglun elif lun != mappinglun: - LOG.warning(_LW('Inconsistent Luns.')) + LOG.warning('Inconsistent Luns.') else: LOG.debug('%s not found in initiator list', hbaname) else: - LOG.warning(_LW('_find_wwn: serverhba is None.')) + LOG.warning('_find_wwn: serverhba is None.') else: - LOG.warning(_LW('_find_wwn: Unable to find port wwn.')) + LOG.warning('_find_wwn: Unable to find port wwn.') else: - LOG.warning(_LW('_find_wwn: controllerport is None.')) - LOG.info(_LI('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s'), + LOG.warning('_find_wwn: controllerport is None.') + LOG.info('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s', {'lun': lun, 'wwn': wwns, 'map': itmap}) @@ -1686,7 +1683,7 @@ class StorageCenterApi(object): controller = volconfig.get('controller') actvctrl = self._get_id(controller) else: - LOG.error(_LE('Unable to retrieve VolumeConfiguration: %s'), + LOG.error('Unable to retrieve VolumeConfiguration: %s', self._get_id(scvolume)) LOG.debug('_find_active_controller: %s', actvctrl) return actvctrl @@ -1731,8 +1728,8 @@ class StorageCenterApi(object): if self._check_result(r): controllerport = self._first_result(r) else: - LOG.error(_LE('_find_controller_port_iscsi_config: ' - 'Error finding configuration: %s'), cportid) + LOG.error('_find_controller_port_iscsi_config: ' + 'Error finding configuration: %s', cportid) return controllerport def find_iscsi_properties(self, scvolume): @@ -1904,7 +1901,7 @@ class StorageCenterApi(object): mprofiles = self._find_mapping_profiles(scvolume) for mprofile in mprofiles: if self._get_id(mprofile.get('server')) == serverid: - LOG.info(_LI('Volume %(vol)s already mapped to %(srv)s'), + LOG.info('Volume %(vol)s already mapped to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return mprofile @@ -1916,13 +1913,13 @@ class StorageCenterApi(object): % volumeid, payload, True) if self._check_result(r): # We just return our mapping - LOG.info(_LI('Volume %(vol)s mapped to %(srv)s'), + LOG.info('Volume %(vol)s mapped to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return self._first_result(r) # Error out - LOG.error(_LE('Unable to map %(vol)s to %(srv)s'), + LOG.error('Unable to map %(vol)s to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return None @@ -1956,12 +1953,12 @@ class StorageCenterApi(object): if result is True or (type(result) is dict and result.get('result')): LOG.info( - _LI('Volume %(vol)s unmapped from %(srv)s'), + 'Volume %(vol)s unmapped from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) continue - LOG.error(_LE('Unable to unmap %(vol)s from %(srv)s'), + LOG.error('Unable to unmap %(vol)s from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) # 1 failed unmap is as good as 100. @@ -2018,7 +2015,7 @@ class StorageCenterApi(object): # Quick double check. if replay is None: - LOG.warning(_LW('Unable to create snapshot %s'), replayid) + LOG.warning('Unable to create snapshot %s', replayid) # Return replay or None. return replay @@ -2052,10 +2049,10 @@ class StorageCenterApi(object): # We found our replay so return it. return replay except Exception: - LOG.error(_LE('Invalid ReplayList return: %s'), + LOG.error('Invalid ReplayList return: %s', r) # If we are here then we didn't find the replay so warn and leave. - LOG.warning(_LW('Unable to find snapshot %s'), + LOG.warning('Unable to find snapshot %s', replayid) return None @@ -2075,7 +2072,7 @@ class StorageCenterApi(object): self._get_id(screplay), payload, True) if self._check_result(r): return True - LOG.error(_LE('Error managing replay %s'), + LOG.error('Error managing replay %s', screplay.get('description')) return False @@ -2092,7 +2089,7 @@ class StorageCenterApi(object): self._get_id(screplay), payload, True) if self._check_result(r): return True - LOG.error(_LE('Error unmanaging replay %s'), + LOG.error('Error unmanaging replay %s', screplay.get('description')) return False @@ -2162,12 +2159,11 @@ class StorageCenterApi(object): # If we have a dr_profile to apply we should do so now. if dr_profile and not self.update_datareduction_profile(volume, dr_profile): - LOG.error(_LE('Unable to apply %s to volume.'), dr_profile) + LOG.error('Unable to apply %s to volume.', dr_profile) volume = None if volume is None: - LOG.error(_LE('Unable to create volume %s from replay'), - volname) + LOG.error('Unable to create volume %s from replay', volname) return volume @@ -2230,7 +2226,7 @@ class StorageCenterApi(object): :returns: The new volume's Dell volume object. :raises: VolumeBackendAPIException if error doing copy. """ - LOG.info(_LI('create_cloned_volume: Creating %(dst)s from %(src)s'), + LOG.info('create_cloned_volume: Creating %(dst)s from %(src)s', {'dst': volumename, 'src': scvolume['name']}) @@ -2273,7 +2269,7 @@ class StorageCenterApi(object): self.delete_volume(volumename, self._get_id(newvol)) raise # Tell the user. - LOG.error(_LE('create_cloned_volume: Unable to clone volume')) + LOG.error('create_cloned_volume: Unable to clone volume') return None def expand_volume(self, scvolume, newsize): @@ -2296,7 +2292,7 @@ class StorageCenterApi(object): {'name': vol['name'], 'size': vol['configuredSize']}) else: - LOG.error(_LE('Error expanding volume %s.'), scvolume['name']) + LOG.error('Error expanding volume %s.', scvolume['name']) return vol def rename_volume(self, scvolume, name): @@ -2316,7 +2312,7 @@ class StorageCenterApi(object): if self._check_result(r): return True - LOG.error(_LE('Error renaming volume %(original)s to %(name)s'), + LOG.error('Error renaming volume %(original)s to %(name)s', {'original': scvolume['name'], 'name': name}) return False @@ -2329,13 +2325,13 @@ class StorageCenterApi(object): return False if not prefs.get(allowprefname): - LOG.error(_LE('User does not have permission to change ' - '%s selection.'), profiletype) + LOG.error('User does not have permission to change ' + '%s selection.', profiletype) return False if profilename: if not profile: - LOG.error(_LE('%(ptype)s %(pname)s was not found.'), + LOG.error('%(ptype)s %(pname)s was not found.', {'ptype': profiletype, 'pname': profilename}) return False @@ -2343,10 +2339,10 @@ class StorageCenterApi(object): # Going from specific profile to the user default profile = prefs.get(restname) if not profile and not continuewithoutdefault: - LOG.error(_LE('Default %s was not found.'), profiletype) + LOG.error('Default %s was not found.', profiletype) return False - LOG.info(_LI('Switching volume %(vol)s to profile %(prof)s.'), + LOG.info('Switching volume %(vol)s to profile %(prof)s.', {'vol': scvolume['name'], 'prof': profile.get('name')}) payload = {} @@ -2356,8 +2352,8 @@ class StorageCenterApi(object): if self._check_result(r): return True - LOG.error(_LE('Error changing %(ptype)s for volume ' - '%(original)s to %(name)s'), + LOG.error('Error changing %(ptype)s for volume ' + '%(original)s to %(name)s', {'ptype': profiletype, 'original': scvolume['name'], 'name': profilename}) @@ -2467,7 +2463,7 @@ class StorageCenterApi(object): profilelist = self._get_json(r) if profilelist: if len(profilelist) > 1: - LOG.error(_LE('Multiple replay profiles under name %s'), + LOG.error('Multiple replay profiles under name %s', name) raise exception.VolumeBackendAPIException( data=_('Multiple profiles found.')) @@ -2507,12 +2503,12 @@ class StorageCenterApi(object): r = self.client.delete('StorageCenter/ScReplayProfile/%s' % self._get_id(profile), async=True) if self._check_result(r): - LOG.info(_LI('Profile %s has been deleted.'), + LOG.info('Profile %s has been deleted.', profile.get('name')) else: # We failed due to a failure to delete an existing profile. # This is reason to raise an exception. - LOG.error(_LE('Unable to delete profile %s.'), profile.get('name')) + LOG.error('Unable to delete profile %s.', profile.get('name')) raise exception.VolumeBackendAPIException( data=_('Error deleting replay profile.')) @@ -2580,9 +2576,9 @@ class StorageCenterApi(object): if (self._update_volume_profiles(scvolume, addid=profileid, removeid=None)): - LOG.info(_LI('Added %s to cg.'), vol['id']) + LOG.info('Added %s to cg.', vol['id']) else: - LOG.error(_LE('Failed to add %s to cg.'), vol['id']) + LOG.error('Failed to add %s to cg.', vol['id']) return False return True @@ -2599,9 +2595,9 @@ class StorageCenterApi(object): if (self._update_volume_profiles(scvolume, addid=None, removeid=profileid)): - LOG.info(_LI('Removed %s from cg.'), vol['id']) + LOG.info('Removed %s from cg.', vol['id']) else: - LOG.error(_LE('Failed to remove %s from cg.'), vol['id']) + LOG.error('Failed to remove %s from cg.', vol['id']) return False return True @@ -2622,10 +2618,10 @@ class StorageCenterApi(object): ret = True profileid = self._get_id(profile) if add_volumes: - LOG.info(_LI('Adding volumes to cg %s.'), profile['name']) + LOG.info('Adding volumes to cg %s.', profile['name']) ret = self._add_cg_volumes(profileid, add_volumes) if ret and remove_volumes: - LOG.info(_LI('Removing volumes from cg %s.'), profile['name']) + LOG.info('Removing volumes from cg %s.', profile['name']) ret = self._remove_cg_volumes(profileid, remove_volumes) return ret @@ -2666,7 +2662,7 @@ class StorageCenterApi(object): 'CreateReplay' % self._get_id(profile), payload, True) if self._check_result(r): - LOG.info(_LI('CreateReplay success %s'), replayid) + LOG.info('CreateReplay success %s', replayid) return True return False @@ -2716,7 +2712,7 @@ class StorageCenterApi(object): replays = self._get_json(r) else: - LOG.error(_LE('Unable to locate snapshot %s'), replayid) + LOG.error('Unable to locate snapshot %s', replayid) return replays @@ -2780,7 +2776,7 @@ class StorageCenterApi(object): # If we actually have a place to put our volume create it if folder is None: - LOG.warning(_LW('Unable to create folder %s'), self.vfname) + LOG.warning('Unable to create folder %s', self.vfname) # Rename and move our volume. payload = {} @@ -2882,7 +2878,7 @@ class StorageCenterApi(object): r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload, True) if self._check_result(r): - LOG.info(_LI('Volume %s unmanaged.'), scvolume['name']) + LOG.info('Volume %s unmanaged.', scvolume['name']) else: msg = _('Unable to rename volume %(existing)s to %(newname)s') % { 'existing': scvolume['name'], @@ -2917,7 +2913,7 @@ class StorageCenterApi(object): if self._check_result(r): return self._get_json(r) - LOG.error(_LE('Unable to find or create QoS Node named %s'), qosnode) + LOG.error('Unable to find or create QoS Node named %s', qosnode) raise exception.VolumeBackendAPIException( data=_('Failed to find QoSnode')) @@ -2961,7 +2957,7 @@ class StorageCenterApi(object): if replication.get('destinationScSerialNumber') == destssn: return replication # Unable to locate replication. - LOG.warning(_LW('Unable to locate replication %(vol)s to %(ssn)s'), + LOG.warning('Unable to locate replication %(vol)s to %(ssn)s', {'vol': scvolume.get('name'), 'ssn': destssn}) return None @@ -2985,13 +2981,13 @@ class StorageCenterApi(object): async=True) if self._check_result(r): # check that we whacked the dest volume - LOG.info(_LI('Replication %(vol)s to %(dest)s.'), + LOG.info('Replication %(vol)s to %(dest)s.', {'vol': scvolume.get('name'), 'dest': destssn}) return True - LOG.error(_LE('Unable to delete replication for ' - '%(vol)s to %(dest)s.'), + LOG.error('Unable to delete replication for ' + '%(vol)s to %(dest)s.', {'vol': scvolume.get('name'), 'dest': destssn}) return False @@ -3014,8 +3010,8 @@ class StorageCenterApi(object): diskfolder = self._get_json(r)[0] except Exception: # We just log this as an error and return nothing. - LOG.error(_LE('Unable to find ' - 'disk folder %(name)s on %(ssn)s'), + LOG.error('Unable to find ' + 'disk folder %(name)s on %(ssn)s', {'name': foldername, 'ssn': ssn}) return diskfolder @@ -3061,7 +3057,7 @@ class StorageCenterApi(object): r = self.client.post('StorageCenter/ScReplication', payload, True) # 201 expected. if self._check_result(r): - LOG.info(_LI('Replication created for %(volname)s to %(destsc)s'), + LOG.info('Replication created for %(volname)s to %(destsc)s', {'volname': scvolume.get('name'), 'destsc': destssn}) screpl = self._get_json(r) @@ -3069,7 +3065,7 @@ class StorageCenterApi(object): # Check we did something. if not screpl: # Failed to launch. Inform user. Throw. - LOG.error(_LE('Unable to replicate %(volname)s to %(destsc)s'), + LOG.error('Unable to replicate %(volname)s to %(destsc)s', {'volname': scvolume.get('name'), 'destsc': destssn}) return screpl @@ -3206,8 +3202,8 @@ class StorageCenterApi(object): True) # 201 expected. if self._check_result(r): - LOG.info(_LI('Replication created for ' - '%(src)s to %(dest)s'), + LOG.info('Replication created for ' + '%(src)s to %(dest)s', {'src': svolume.get('name'), 'dest': dvolume.get('name')}) screpl = self._get_json(r) @@ -3267,8 +3263,8 @@ class StorageCenterApi(object): if (self.rename_volume(svolume, self._repl_name(name)) and self.rename_volume(dvolume, name)): return True - LOG.warning(_LW('flip_replication: Unable to replicate ' - '%(name)s from %(src)s to %(dst)s'), + LOG.warning('flip_replication: Unable to replicate ' + '%(name)s from %(src)s to %(dst)s', {'name': name, 'src': dvolume['scSerialNumber'], 'dst': svolume['scSerialNumber']}) @@ -3290,8 +3286,8 @@ class StorageCenterApi(object): progress['amountRemaining'].split(' ', 1)[0]) return progress['synced'], remaining except Exception: - LOG.warning(_LW('replication_progress: Invalid replication' - ' progress information returned: %s'), + LOG.warning('replication_progress: Invalid replication' + ' progress information returned: %s', progress) return None, None @@ -3416,14 +3412,14 @@ class StorageCenterApi(object): pscqos = self._find_qos(primaryqos) sscqos = self._find_qos(secondaryqos, destssn) if not destssn: - LOG.error(_LE('create_live_volume: Unable to find remote %s'), + LOG.error('create_live_volume: Unable to find remote %s', remotessn) elif not pscqos: - LOG.error(_LE('create_live_volume: Unable to find or create ' - 'qos node %s'), primaryqos) + LOG.error('create_live_volume: Unable to find or create ' + 'qos node %s', primaryqos) elif not sscqos: - LOG.error(_LE('create_live_volume: Unable to find or create remote' - ' qos node %(qos)s on %(ssn)s'), + LOG.error('create_live_volume: Unable to find or create remote' + ' qos node %(qos)s on %(ssn)s', {'qos': secondaryqos, 'ssn': destssn}) else: payload = {} @@ -3451,12 +3447,12 @@ class StorageCenterApi(object): r = self.client.post('StorageCenter/ScLiveVolume', payload, True) if self._check_result(r): - LOG.info(_LI('create_live_volume: Live Volume created from' - '%(svol)s to %(ssn)s'), + LOG.info('create_live_volume: Live Volume created from' + '%(svol)s to %(ssn)s', {'svol': self._get_id(scvolume), 'ssn': remotessn}) return self._get_json(r) - LOG.error(_LE('create_live_volume: Failed to create Live Volume from' - '%(svol)s to %(ssn)s'), + LOG.error('create_live_volume: Failed to create Live Volume from' + '%(svol)s to %(ssn)s', {'svol': self._get_id(scvolume), 'ssn': remotessn}) return None diff --git a/cinder/volume/drivers/dell/dell_storagecenter_common.py b/cinder/volume/drivers/dell/dell_storagecenter_common.py index 407b0d8eee7..ead80206bf3 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_common.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_common.py @@ -20,7 +20,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_api @@ -88,7 +88,7 @@ class DellCommonDriver(driver.ManageableVD, self.is_direct_connect = False self.active_backend_id = kwargs.get('active_backend_id', None) self.failed_over = True if self.active_backend_id else False - LOG.info(_LI('Loading %(name)s: Failover state is %(state)r'), + LOG.info('Loading %(name)s: Failover state is %(state)r', {'name': self.backend_name, 'state': self.failed_over}) self.storage_protocol = 'iSCSI' @@ -279,7 +279,7 @@ class DellCommonDriver(driver.ManageableVD, try: api.delete_volume(volumename) except exception.VolumeBackendAPIException as ex: - LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg) + LOG.info('Non fatal cleanup error: %s.', ex.msg) def create_volume(self, volume): """Create a volume.""" @@ -324,7 +324,7 @@ class DellCommonDriver(driver.ManageableVD, # clean up the volume now. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create volume %s'), + LOG.error('Failed to create volume %s', volume_name) if scvolume is None: raise exception.VolumeBackendAPIException( @@ -374,16 +374,15 @@ class DellCommonDriver(driver.ManageableVD, if (sclivevolume and sclivevolume.get('secondaryScSerialNumber') == ssn and api.delete_live_volume(sclivevolume, True)): - LOG.info(_LI('%(vname)s\'s replication live volume has ' - 'been deleted from storage Center %(sc)s,'), + LOG.info('%(vname)s\'s replication live volume has ' + 'been deleted from storage Center %(sc)s,', {'vname': volume.get('id'), 'sc': ssn}) return True # If we are here either we do not have a live volume, we do not have # one on our configured SC or we were not able to delete it. # Either way, warn and leave. - LOG.warning(_LW('Unable to delete %s live volume.'), - volume.get('id')) + LOG.warning('Unable to delete %s live volume.', volume.get('id')) return False def _delete_replications(self, api, volume): @@ -409,8 +408,8 @@ class DellCommonDriver(driver.ManageableVD, ssn = int(ssnstring) # Are we a replication or a live volume? if not api.delete_replication(scvol, ssn): - LOG.warning(_LW('Unable to delete replication of Volume ' - '%(vname)s to Storage Center %(sc)s.'), + LOG.warning('Unable to delete replication of Volume ' + '%(vname)s to Storage Center %(sc)s.', {'vname': volume_name, 'sc': ssnstring}) # If none of that worked or there was nothing to do doesn't matter. @@ -439,7 +438,7 @@ class DellCommonDriver(driver.ManageableVD, deleted = api.delete_volume(volume_name, provider_id) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to delete volume %s'), + LOG.error('Failed to delete volume %s', volume_name) # if there was an error we will have raised an @@ -466,8 +465,7 @@ class DellCommonDriver(driver.ManageableVD, return {'status': fields.SnapshotStatus.AVAILABLE, 'provider_id': scvolume['instanceId']} else: - LOG.warning(_LW('Unable to locate volume:%s'), - volume_name) + LOG.warning('Unable to locate volume:%s', volume_name) snapshot['status'] = fields.SnapshotStatus.ERROR msg = _('Failed to create snapshot %s') % snapshot_id @@ -540,8 +538,7 @@ class DellCommonDriver(driver.ManageableVD, # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create volume %s'), - volume_name) + LOG.error('Failed to create volume %s', volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s created from %(snap)s', {'vol': volume_name, @@ -604,8 +601,7 @@ class DellCommonDriver(driver.ManageableVD, # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create volume %s'), - volume_name) + LOG.error('Failed to create volume %s', volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s cloned from %(src)s', {'vol': volume_name, @@ -656,7 +652,7 @@ class DellCommonDriver(driver.ManageableVD, self._is_live_vol(volume)) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to ensure export of volume %s'), + LOG.error('Failed to ensure export of volume %s', volume_name) if scvolume is None: msg = _('Unable to find volume %s') % volume_name @@ -738,7 +734,7 @@ class DellCommonDriver(driver.ManageableVD, data['free_capacity_gb'] = freespacegb else: # Soldier on. Just return 0 for this iteration. - LOG.error(_LE('Unable to retrieve volume stats.')) + LOG.error('Unable to retrieve volume stats.') data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 @@ -782,7 +778,7 @@ class DellCommonDriver(driver.ManageableVD, return model_update # The world was horrible to us so we should error and leave. - LOG.error(_LE('Unable to rename the logical volume for volume: %s'), + LOG.error('Unable to rename the logical volume for volume: %s', original_volume_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} @@ -799,7 +795,7 @@ class DellCommonDriver(driver.ManageableVD, with self._client.open_connection() as api: cgroup = api.create_replay_profile(gid) if cgroup: - LOG.info(_LI('Created Consistency Group %s'), gid) + LOG.info('Created Consistency Group %s', gid) return msg = _('Unable to create consistency group %s') % gid raise exception.VolumeBackendAPIException(data=msg) @@ -860,11 +856,11 @@ class DellCommonDriver(driver.ManageableVD, with self._client.open_connection() as api: profile = api.find_replay_profile(gid) if not profile: - LOG.error(_LE('Cannot find Consistency Group %s'), gid) + LOG.error('Cannot find Consistency Group %s', gid) elif api.update_cg_volumes(profile, add_volumes, remove_volumes): - LOG.info(_LI('Updated Consistency Group %s'), gid) + LOG.info('Updated Consistency Group %s', gid) # we need nothing updated above us so just return None. return None, None, None # Things did not go well so throw. @@ -900,9 +896,9 @@ class DellCommonDriver(driver.ManageableVD, return model_update, snapshot_updates # That didn't go well. Tell them why. Then bomb out. - LOG.error(_LE('Failed to snap Consistency Group %s'), cgid) + LOG.error('Failed to snap Consistency Group %s', cgid) else: - LOG.error(_LE('Cannot find Consistency Group %s'), cgid) + LOG.error('Cannot find Consistency Group %s', cgid) msg = _('Unable to snap Consistency Group %s') % cgid raise exception.VolumeBackendAPIException(data=msg) @@ -924,7 +920,7 @@ class DellCommonDriver(driver.ManageableVD, with self._client.open_connection() as api: profile = api.find_replay_profile(cgid) if profile: - LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'), + LOG.info('Deleting snapshot %(ss)s from %(pro)s', {'ss': snapshotid, 'pro': profile}) if not api.delete_cg_replay(profile, snapshotid): @@ -1058,7 +1054,7 @@ class DellCommonDriver(driver.ManageableVD, 'spec': requested}) return current, requested else: - LOG.info(_LI('Retype was to same Storage Profile.')) + LOG.info('Retype was to same Storage Profile.') return None, None def _retype_replication(self, api, volume, scvolume, new_type, diff): @@ -1104,8 +1100,8 @@ class DellCommonDriver(driver.ManageableVD, dictionary of its reported capabilities (Not Used). :returns: Boolean or Boolean, model_update tuple. """ - LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s ' - 'diff: %(diff)s host: %(host)s'), + LOG.info('retype: volume_name: %(name)s new_type: %(newtype)s ' + 'diff: %(diff)s host: %(host)s', {'name': volume.get('id'), 'newtype': new_type, 'diff': diff, 'host': host}) model_update = None @@ -1118,7 +1114,7 @@ class DellCommonDriver(driver.ManageableVD, # Get our volume scvolume = api.find_volume(volume_name, provider_id) if scvolume is None: - LOG.error(_LE('Retype unable to find volume %s.'), + LOG.error('Retype unable to find volume %s.', volume_name) return False # Check our specs. @@ -1130,7 +1126,7 @@ class DellCommonDriver(driver.ManageableVD, # if there is a change and it didn't work fast fail. if (current != requested and not api.update_storage_profile(scvolume, requested)): - LOG.error(_LE('Failed to update storage profile')) + LOG.error('Failed to update storage profile') return False # Replay profiles. @@ -1141,7 +1137,7 @@ class DellCommonDriver(driver.ManageableVD, # if there is a change and it didn't work fast fail. if requested and not api.update_replay_profiles(scvolume, requested): - LOG.error(_LE('Failed to update replay profiles')) + LOG.error('Failed to update replay profiles') return False # Volume QOS profiles. @@ -1151,8 +1147,7 @@ class DellCommonDriver(driver.ManageableVD, 'storagetype:volumeqos')) if current != requested: if not api.update_qos_profile(scvolume, requested): - LOG.error(_LE('Failed to update volume ' - 'qos profile')) + LOG.error('Failed to update volume qos profile') # Group QOS profiles. current, requested = ( @@ -1162,8 +1157,7 @@ class DellCommonDriver(driver.ManageableVD, if current != requested: if not api.update_qos_profile(scvolume, requested, True): - LOG.error(_LE('Failed to update group ' - 'qos profile')) + LOG.error('Failed to update group qos profile') return False # Data reduction profiles. @@ -1174,8 +1168,8 @@ class DellCommonDriver(driver.ManageableVD, if current != requested: if not api.update_datareduction_profile(scvolume, requested): - LOG.error(_LE('Failed to update data reduction ' - 'profile')) + LOG.error('Failed to update data reduction ' + 'profile') return False # Active Replay @@ -1186,8 +1180,8 @@ class DellCommonDriver(driver.ManageableVD, if current != requested and not ( api.update_replicate_active_replay( scvolume, requested == ' True')): - LOG.error(_LE('Failed to apply ' - 'replication:activereplay setting')) + LOG.error('Failed to apply ' + 'replication:activereplay setting') return False # Deal with replication. @@ -1231,8 +1225,8 @@ class DellCommonDriver(driver.ManageableVD, destssn = ssn break except exception.VolumeBackendAPIException: - LOG.warning(_LW('SSN %s appears to be down.'), ssn) - LOG.info(_LI('replication failover secondary is %(ssn)s'), + LOG.warning('SSN %s appears to be down.', ssn) + LOG.info('replication failover secondary is %(ssn)s', {'ssn': destssn}) return destssn @@ -1309,8 +1303,8 @@ class DellCommonDriver(driver.ManageableVD, ovol, 'org:' + ovol['name']): # Not a reason to fail but will possibly # cause confusion so warn. - LOG.warning(_LW('Unable to locate and rename ' - 'original volume: %s'), + LOG.warning('Unable to locate and rename ' + 'original volume: %s', item['ovol']) item['status'] = 'synced' else: @@ -1329,9 +1323,9 @@ class DellCommonDriver(driver.ManageableVD, if lastremain == currentremain: # One chance down. Warn user. deadcount -= 1 - LOG.warning(_LW('Waiting for replications to complete. ' - 'No progress for %(timeout)d seconds. ' - 'deadcount = %(cnt)d'), + LOG.warning('Waiting for replications to complete. ' + 'No progress for %(timeout)d seconds. ' + 'deadcount = %(cnt)d', {'timeout': self.failback_timeout, 'cnt': deadcount}) else: @@ -1341,13 +1335,13 @@ class DellCommonDriver(driver.ManageableVD, # If we've used up our 5 chances we error and log.. if deadcount == 0: - LOG.error(_LE('Replication progress has stopped: ' - '%f remaining.'), currentremain) + LOG.error('Replication progress has stopped: %f remaining.', + currentremain) for item in items: if item['status'] == 'inprogress': - LOG.error(_LE('Failback failed for volume: %s. ' - 'Timeout waiting for replication to ' - 'sync with original volume.'), + LOG.error('Failback failed for volume: %s. ' + 'Timeout waiting for replication to ' + 'sync with original volume.', item['volume']['id']) item['status'] = 'error' break @@ -1426,7 +1420,7 @@ class DellCommonDriver(driver.ManageableVD, :param qosnode: Dell QOS node object. :return: replitem dict. """ - LOG.info(_LI('failback_volumes: replicated volume')) + LOG.info('failback_volumes: replicated volume') # Get our current volume. cvol = api.find_volume(volume['id'], volume['provider_id']) # Original volume on the primary. @@ -1446,7 +1440,7 @@ class DellCommonDriver(driver.ManageableVD, nvolid = screpl['destinationVolume']['instanceId'] status = 'inprogress' else: - LOG.error(_LE('Unable to restore %s'), volume['id']) + LOG.error('Unable to restore %s', volume['id']) screplid = None nvolid = None status = 'error' @@ -1481,14 +1475,14 @@ class DellCommonDriver(driver.ManageableVD, sclivevolume = api.get_live_volume(provider_id) # TODO(tswanson): Check swapped state first. if sclivevolume and api.swap_roles_live_volume(sclivevolume): - LOG.info(_LI('Success swapping sclivevolume roles %s'), id) + LOG.info('Success swapping sclivevolume roles %s', id) model_update = { 'status': 'available', 'replication_status': fields.ReplicationStatus.ENABLED, 'provider_id': sclivevolume['secondaryVolume']['instanceId']} else: - LOG.info(_LI('Failure swapping roles %s'), id) + LOG.info('Failure swapping roles %s', id) model_update = {'status': 'error'} return model_update @@ -1509,7 +1503,7 @@ class DellCommonDriver(driver.ManageableVD, :param volumes: List of volumes that need to be failed back. :return: volume_updates for the list of volumes. """ - LOG.info(_LI('failback_volumes')) + LOG.info('failback_volumes') with self._client.open_connection() as api: # Get our qosnode. This is a good way to make sure the backend # is still setup so that we can do this. @@ -1524,7 +1518,7 @@ class DellCommonDriver(driver.ManageableVD, # Trundle through the volumes. Update non replicated to alive again # and reverse the replications for the remaining volumes. for volume in volumes: - LOG.info(_LI('failback_volumes: starting volume: %s'), volume) + LOG.info('failback_volumes: starting volume: %s', volume) model_update = {} if volume.get('replication_driver_data'): rspecs = self._get_replication_specs( @@ -1567,12 +1561,12 @@ class DellCommonDriver(driver.ManageableVD, rvol = api.break_replication(id, provider_id, destssn) model_update = {} if rvol: - LOG.info(_LI('Success failing over volume %s'), id) + LOG.info('Success failing over volume %s', id) model_update = {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_id': rvol['instanceId']} else: - LOG.info(_LI('Failed failing over volume %s'), id) + LOG.info('Failed failing over volume %s', id) model_update = {'status': 'error'} return model_update @@ -1585,11 +1579,11 @@ class DellCommonDriver(driver.ManageableVD, swapped = api.is_swapped(provider_id, sclivevolume) # If we aren't swapped try it. If fail error out. if not swapped and not api.swap_roles_live_volume(sclivevolume): - LOG.info(_LI('Failure swapping roles %s'), id) + LOG.info('Failure swapping roles %s', id) model_update = {'status': 'error'} return model_update - LOG.info(_LI('Success swapping sclivevolume roles %s'), id) + LOG.info('Success swapping sclivevolume roles %s', id) sclivevolume = api.get_live_volume(provider_id) model_update = { 'replication_status': @@ -1628,7 +1622,7 @@ class DellCommonDriver(driver.ManageableVD, raise exception.InvalidReplicationTarget( reason=_('Already failed over')) - LOG.info(_LI('Failing backend to %s'), secondary_id) + LOG.info('Failing backend to %s', secondary_id) # basic check if self.replication_enabled: with self._client.open_connection() as api: @@ -1747,9 +1741,9 @@ class DellCommonDriver(driver.ManageableVD, raise exception.VolumeBackendAPIException(data=msg) # Life is good. Let the world know what we've done. - LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on ' - 'volume %(volume)s has been renamed to %(id)s and is ' - 'now managed by Cinder.'), + LOG.info('manage_existing_snapshot: snapshot %(exist)s on ' + 'volume %(volume)s has been renamed to %(id)s and is ' + 'now managed by Cinder.', {'exist': screplay.get('description'), 'volume': volume_name, 'id': snapshot_id}) diff --git a/cinder/volume/drivers/dell/dell_storagecenter_fc.py b/cinder/volume/drivers/dell/dell_storagecenter_fc.py index d473a6e15d7..4574159b155 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_fc.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_fc.py @@ -18,7 +18,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common @@ -147,11 +147,11 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, 'discard': True}} LOG.debug('Return FC data: %s', data) return data - LOG.error(_LE('Lun mapping returned null!')) + LOG.error('Lun mapping returned null!') except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to initialize connection.')) + LOG.error('Failed to initialize connection.') # We get here because our mapping is none so blow up. raise exception.VolumeBackendAPIException(_('Unable to map volume.')) @@ -187,8 +187,8 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, sclivevolume['secondaryVolume']['instanceId']) if secondaryvol: return api.find_wwns(secondaryvol, secondary) - LOG.warning(_LW('Unable to map live volume secondary volume' - ' %(vol)s to secondary server wwns: %(wwns)r'), + LOG.warning('Unable to map live volume secondary volume' + ' %(vol)s to secondary server wwns: %(wwns)r', {'vol': sclivevolume['secondaryVolume']['instanceName'], 'wwns': wwns}) return None, [], {} @@ -253,7 +253,7 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver, except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to terminate connection')) + LOG.error('Failed to terminate connection') raise exception.VolumeBackendAPIException( _('Terminate connection unable to connect to backend.')) diff --git a/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py b/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py index 322a3488cc7..3fdbb4a006d 100644 --- a/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py +++ b/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py @@ -18,7 +18,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_common @@ -92,8 +92,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, islivevol = self._is_live_vol(volume) initiator_name = connector.get('initiator') multipath = connector.get('multipath', False) - LOG.info(_LI('initialize_ connection: %(vol)s:%(pid)s:' - '%(intr)s. Multipath is %(mp)r'), + LOG.info('initialize_ connection: %(vol)s:%(pid)s:' + '%(intr)s. Multipath is %(mp)r', {'vol': volume_name, 'pid': provider_id, 'intr': initiator_name, @@ -166,7 +166,7 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, # Re-raise any backend exception. except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to initialize connection')) + LOG.error('Failed to initialize connection') # If there is a data structure issue then detail the exception # and bail with a Backend Exception. except Exception as error: @@ -211,8 +211,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, 'target_lun': None, 'target_luns': [], } - LOG.warning(_LW('Unable to map live volume secondary volume' - ' %(vol)s to secondary server intiator: %(init)r'), + LOG.warning('Unable to map live volume secondary volume' + ' %(vol)s to secondary server intiator: %(init)r', {'vol': sclivevolume['secondaryVolume']['instanceName'], 'init': initiatorname}) return data @@ -255,8 +255,8 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver, return except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to terminate connection ' - '%(initiator)s %(vol)s'), + LOG.error('Failed to terminate connection ' + '%(initiator)s %(vol)s', {'initiator': initiator_name, 'vol': volume_name}) raise exception.VolumeBackendAPIException( diff --git a/cinder/volume/drivers/dell_emc/ps.py b/cinder/volume/drivers/dell_emc/ps.py index a4bb9ccb0cb..fb9434e37c1 100644 --- a/cinder/volume/drivers/dell_emc/ps.py +++ b/cinder/volume/drivers/dell_emc/ps.py @@ -29,7 +29,7 @@ from oslo_utils import excutils from six.moves import range from cinder import exception -from cinder.i18n import _, _LE, _LW, _LI +from cinder.i18n import _ from cinder import interface from cinder import ssh_utils from cinder import utils @@ -199,7 +199,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): if any(ln.startswith(('% Error', 'Error:')) for ln in out): desc = _("Error executing PS command") cmdout = '\n'.join(out) - LOG.error(_LE("%s"), cmdout) + LOG.error(cmdout) raise processutils.ProcessExecutionError( stdout=cmdout, cmd=command, description=desc) return out @@ -232,12 +232,12 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): while attempts > 0: attempts -= 1 try: - LOG.info(_LI('PS-driver: executing "%s".'), command) + LOG.info('PS-driver: executing "%s".', command) return self._ssh_execute( ssh, command, timeout=self.configuration.ssh_conn_timeout) except Exception: - LOG.exception(_LE('Error running command.')) + LOG.exception('Error running command.') greenthread.sleep(random.randint(20, 500) / 100.0) msg = (_("SSH Command failed after '%(total_attempts)r' " "attempts : '%(command)s'") % @@ -247,7 +247,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Error running SSH command: "%s".'), command) + LOG.error('Error running SSH command: "%s".', command) def check_for_setup_error(self): super(PSSeriesISCSIDriver, self).check_for_setup_error() @@ -398,11 +398,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): out_tup = line.rstrip().partition(' ') self._group_ip = out_tup[-1] - LOG.info(_LI('PS-driver: Setup is complete, group IP is "%s".'), + LOG.info('PS-driver: Setup is complete, group IP is "%s".', self._group_ip) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to setup the Dell EMC PS driver.')) + LOG.error('Failed to setup the Dell EMC PS driver.') def create_volume(self, volume): """Create a volume.""" @@ -419,7 +419,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create volume "%s".'), volume['name']) + LOG.error('Failed to create volume "%s".', volume['name']) def add_multihost_access(self, volume): """Add multihost-access to a volume. Needed for live migration.""" @@ -429,8 +429,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to add multihost-access ' - 'for volume "%s".'), + LOG.error('Failed to add multihost-access ' + 'for volume "%s".', volume['name']) def _set_volume_description(self, volume, description): @@ -441,8 +441,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to set description ' - 'for volume "%s".'), + LOG.error('Failed to set description ' + 'for volume "%s".', volume['name']) def delete_volume(self, volume): @@ -452,12 +452,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): self._eql_execute('volume', 'select', volume['name'], 'offline') self._eql_execute('volume', 'delete', volume['name']) except exception.VolumeNotFound: - LOG.warning(_LW('Volume %s was not found while trying to delete ' - 'it.'), volume['name']) + LOG.warning('Volume %s was not found while trying to delete it.', + volume['name']) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to delete ' - 'volume "%s".'), volume['name']) + LOG.error('Failed to delete volume "%s".', volume['name']) def create_snapshot(self, snapshot): """Create snapshot of existing volume on appliance.""" @@ -472,7 +471,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create snapshot of volume "%s".'), + LOG.error('Failed to create snapshot of volume "%s".', snapshot['volume_name']) def create_volume_from_snapshot(self, volume, snapshot): @@ -495,7 +494,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create volume from snapshot "%s".'), + LOG.error('Failed to create volume from snapshot "%s".', snapshot['name']) def create_cloned_volume(self, volume, src_vref): @@ -513,7 +512,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create clone of volume "%s".'), + LOG.error('Failed to create clone of volume "%s".', volume['name']) def delete_snapshot(self, snapshot): @@ -526,8 +525,8 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): LOG.debug('Snapshot %s could not be found.', snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to delete snapshot %(snap)s of ' - 'volume %(vol)s.'), + LOG.error('Failed to delete snapshot %(snap)s of ' + 'volume %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) @@ -548,8 +547,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): } except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to initialize connection ' - 'to volume "%s".'), + LOG.error('Failed to initialize connection to volume "%s".', volume['name']) def terminate_connection(self, volume, connector, force=False, **kwargs): @@ -563,8 +561,7 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): 'access', 'delete', connection_id) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to terminate connection ' - 'to volume "%s".'), + LOG.error('Failed to terminate connection to volume "%s".', volume['name']) def create_export(self, context, volume, connector): @@ -585,11 +582,11 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): try: self._check_volume(volume) except exception.VolumeNotFound: - LOG.warning(_LW('Volume %s is not found!, it may have been ' - 'deleted.'), volume['name']) + LOG.warning('Volume %s is not found!, it may have been deleted.', + volume['name']) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to ensure export of volume "%s".'), + LOG.error('Failed to ensure export of volume "%s".', volume['name']) def remove_export(self, context, volume): @@ -606,15 +603,15 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): try: self._eql_execute('volume', 'select', volume['name'], 'size', "%sG" % new_size) - LOG.info(_LI('Volume %(name)s resized from ' - '%(current_size)sGB to %(new_size)sGB.'), + LOG.info('Volume %(name)s resized from ' + '%(current_size)sGB to %(new_size)sGB.', {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to extend_volume %(name)s from ' - '%(current_size)sGB to %(new_size)sGB.'), + LOG.error('Failed to extend_volume %(name)s from ' + '%(current_size)sGB to %(new_size)sGB.', {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) @@ -643,14 +640,14 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): self.add_multihost_access(volume) data = self._get_volume_info(volume['name']) updates = self._get_model_update(data['iSCSI_Name']) - LOG.info(_LI("Backend volume %(back_vol)s renamed to " - "%(vol)s and is now managed by cinder."), + LOG.info("Backend volume %(back_vol)s renamed to " + "%(vol)s and is now managed by cinder.", {'back_vol': existing_volume_name, 'vol': volume['name']}) return updates except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to manage volume "%s".'), volume['name']) + LOG.error('Failed to manage volume "%s".', volume['name']) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. @@ -674,13 +671,13 @@ class PSSeriesISCSIDriver(san.SanISCSIDriver): """ try: self._set_volume_description(volume, '"OpenStack UnManaged"') - LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no " - "longer managed."), + LOG.info("Virtual volume %(disp)s '%(vol)s' is no " + "longer managed.", {'disp': volume['display_name'], 'vol': volume['name']}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to unmanage volume "%s".'), + LOG.error('Failed to unmanage volume "%s".', volume['name']) def local_path(self, volume): diff --git a/cinder/volume/drivers/dell_emc/scaleio/driver.py b/cinder/volume/drivers/dell_emc/scaleio/driver.py index a68b32d0a06..9dd5a36f6bd 100644 --- a/cinder/volume/drivers/dell_emc/scaleio/driver.py +++ b/cinder/volume/drivers/dell_emc/scaleio/driver.py @@ -31,7 +31,7 @@ from six.moves import urllib from cinder import context from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils @@ -135,10 +135,8 @@ class ScaleIODriver(driver.VolumeDriver): if self.verify_server_certificate: self.server_certificate_path = ( self.configuration.sio_server_certificate_path) - LOG.info(_LI( - "REST server IP: %(ip)s, port: %(port)s, username: %(" - "user)s. " - "Verify server's certificate: %(verify_cert)s."), + LOG.info("REST server IP: %(ip)s, port: %(port)s, username: %(" + "user)s. Verify server's certificate: %(verify_cert)s.", {'ip': self.server_ip, 'port': self.server_port, 'user': self.server_username, @@ -153,29 +151,25 @@ class ScaleIODriver(driver.VolumeDriver): self.storage_pool_name = self.configuration.sio_storage_pool_name self.storage_pool_id = self.configuration.sio_storage_pool_id if self.storage_pool_name is None and self.storage_pool_id is None: - LOG.warning(_LW("No storage pool name or id was found.")) + LOG.warning("No storage pool name or id was found.") else: - LOG.info(_LI( - "Storage pools names: %(pools)s, " - "storage pool name: %(pool)s, pool id: %(pool_id)s."), + LOG.info("Storage pools names: %(pools)s, " + "storage pool name: %(pool)s, pool id: %(pool_id)s.", {'pools': self.storage_pools, 'pool': self.storage_pool_name, 'pool_id': self.storage_pool_id}) self.protection_domain_name = ( self.configuration.sio_protection_domain_name) - LOG.info(_LI( - "Protection domain name: %(domain_name)s."), + LOG.info("Protection domain name: %(domain_name)s.", {'domain_name': self.protection_domain_name}) self.protection_domain_id = self.configuration.sio_protection_domain_id - LOG.info(_LI( - "Protection domain id: %(domain_id)s."), + LOG.info("Protection domain id: %(domain_id)s.", {'domain_id': self.protection_domain_id}) self.provisioning_type = ( 'thin' if self.configuration.san_thin_provision else 'thick') - LOG.info(_LI( - "Default provisioning type: %(provisioning_type)s."), + LOG.info("Default provisioning type: %(provisioning_type)s.", {'provisioning_type': self.provisioning_type}) self.configuration.max_over_subscription_ratio = ( self.configuration.sio_max_over_subscription_ratio) @@ -199,8 +193,8 @@ class ScaleIODriver(driver.VolumeDriver): def check_for_setup_error(self): if (not self.protection_domain_name and not self.protection_domain_id): - LOG.warning(_LW("No protection domain name or id " - "was specified in configuration.")) + LOG.warning("No protection domain name or id " + "was specified in configuration.") if self.protection_domain_name and self.protection_domain_id: msg = _("Cannot specify both protection domain name " @@ -220,8 +214,8 @@ class ScaleIODriver(driver.VolumeDriver): raise exception.InvalidInput(reason=msg) if not self.verify_server_certificate: - LOG.warning(_LW("Verify certificate is not set, using default of " - "False.")) + LOG.warning("Verify certificate is not set, using default of " + "False.") if self.verify_server_certificate and not self.server_certificate_path: msg = _("Path to REST server's certificate must be specified.") @@ -273,10 +267,10 @@ class ScaleIODriver(driver.VolumeDriver): new_provisioning_type = storage_type.get(PROVISIONING_KEY) old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY) if new_provisioning_type is None and old_provisioning_type is not None: - LOG.info(_LI("Using sio:provisioning_type for defining " - "thin or thick volume will be deprecated in the " - "Ocata release of OpenStack. Please use " - "provisioning:type configuration option.")) + LOG.info("Using sio:provisioning_type for defining " + "thin or thick volume will be deprecated in the " + "Ocata release of OpenStack. Please use " + "provisioning:type configuration option.") provisioning_type = old_provisioning_type else: provisioning_type = new_provisioning_type @@ -298,11 +292,11 @@ class ScaleIODriver(driver.VolumeDriver): if extraspecs_key is not None else None) if extraspecs_limit is not None: if qos_limit is not None: - LOG.warning(_LW("QoS specs are overriding extra_specs.")) + LOG.warning("QoS specs are overriding extra_specs.") else: - LOG.info(_LI("Using extra_specs for defining QoS specs " - "will be deprecated in the N release " - "of OpenStack. Please use QoS specs.")) + LOG.info("Using extra_specs for defining QoS specs " + "will be deprecated in the N release " + "of OpenStack. Please use QoS specs.") return qos_limit if qos_limit is not None else extraspecs_limit @staticmethod @@ -341,11 +335,10 @@ class ScaleIODriver(driver.VolumeDriver): self._find_protection_domain_name_from_storage_type(storage_type)) provisioning_type = self._find_provisioning_type(storage_type) - LOG.info(_LI( - "Volume type: %(volume_type)s, " + LOG.info("Volume type: %(volume_type)s, " "storage pool name: %(pool_name)s, " "storage pool id: %(pool_id)s, protection domain id: " - "%(domain_id)s, protection domain name: %(domain_name)s."), + "%(domain_id)s, protection domain name: %(domain_name)s.", {'volume_type': storage_type, 'pool_name': storage_pool_name, 'pool_id': storage_pool_id, @@ -382,7 +375,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Domain/instances/getByName::" "%(encoded_domain_name)s") % req_vars - LOG.info(_LI("ScaleIO get domain id by name request: %s."), + LOG.info("ScaleIO get domain id by name request: %s.", request) r = requests.get( request, @@ -405,7 +398,7 @@ class ScaleIODriver(driver.VolumeDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Domain id is %s."), domain_id) + LOG.info("Domain id is %s.", domain_id) pool_name = self.storage_pool_name pool_id = self.storage_pool_id if pool_name: @@ -417,7 +410,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Pool/instances/getByName::" "%(domain_id)s,%(encoded_domain_name)s") % req_vars - LOG.info(_LI("ScaleIO get pool id by name request: %s."), request) + LOG.info("ScaleIO get pool id by name request: %s.", request) r = requests.get( request, auth=( @@ -440,7 +433,7 @@ class ScaleIODriver(driver.VolumeDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Pool id is %s."), pool_id) + LOG.info("Pool id is %s.", pool_id) if provisioning_type == 'thin': provisioning = "ThinProvisioned" # Default volume type is thick. @@ -455,7 +448,7 @@ class ScaleIODriver(driver.VolumeDriver): 'volumeType': provisioning, 'storagePoolId': pool_id} - LOG.info(_LI("Params for add volume request: %s."), params) + LOG.info("Params for add volume request: %s.", params) r = requests.post( "https://" + self.server_ip + @@ -469,14 +462,14 @@ class ScaleIODriver(driver.VolumeDriver): self.server_token), verify=verify_cert) response = r.json() - LOG.info(_LI("Add volume response: %s"), response) + LOG.info("Add volume response: %s", response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Error creating volume: %s.") % response['message']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."), + LOG.info("Created volume %(volname)s, volume id %(volid)s.", {'volname': volname, 'volid': volume.id}) real_size = int(self._round_to_num_gran(volume.size)) @@ -501,7 +494,7 @@ class ScaleIODriver(driver.VolumeDriver): return self._snapshot_volume(volume_id, snapname) def _snapshot_volume(self, vol_id, snapname): - LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") % + LOG.info("Snapshot volume %(vol)s into snapshot %(id)s.", {'vol': vol_id, 'id': snapname}) params = { 'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]} @@ -510,7 +503,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/System/action/snapshotVolumes") % req_vars r, response = self._execute_scaleio_post_request(params, request) - LOG.info(_LI("Snapshot volume response: %s."), response) + LOG.info("Snapshot volume response: %s.", response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for volume %(volname)s: " "%(response)s.") % @@ -537,8 +530,8 @@ class ScaleIODriver(driver.VolumeDriver): def _check_response(self, response, request, is_get_request=True, params=None): if response.status_code == 401 or response.status_code == 403: - LOG.info(_LI("Token is invalid, going to re-login and get " - "a new one.")) + LOG.info("Token is invalid, going to re-login and get " + "a new one.") login_request = ( "https://" + self.server_ip + ":" + self.server_port + "/api/login") @@ -552,8 +545,7 @@ class ScaleIODriver(driver.VolumeDriver): token = r.json() self.server_token = token # Repeat request with valid token. - LOG.info(_LI( - "Going to perform request again %s with valid token."), + LOG.info("Going to perform request again %s with valid token.", request) if is_get_request: res = requests.get(request, @@ -579,9 +571,8 @@ class ScaleIODriver(driver.VolumeDriver): # exposed by the system volume_id = snapshot.provider_id snapname = self._id_to_base64(volume.id) - LOG.info(_LI( - "ScaleIO create volume from snapshot: snapshot %(snapname)s " - "to volume %(volname)s."), + LOG.info("ScaleIO create volume from snapshot: snapshot %(snapname)s " + "to volume %(volname)s.", {'volname': volume_id, 'snapname': snapname}) @@ -608,8 +599,8 @@ class ScaleIODriver(driver.VolumeDriver): def _extend_volume(self, volume_id, old_size, new_size): vol_id = volume_id - LOG.info(_LI( - "ScaleIO extend volume: volume %(volname)s to size %(new_size)s."), + LOG.info( + "ScaleIO extend volume: volume %(volname)s to size %(new_size)s.", {'volname': vol_id, 'new_size': new_size}) @@ -619,7 +610,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(vol_id)s" "/action/setVolumeSize") % req_vars - LOG.info(_LI("Change volume capacity request: %s."), request) + LOG.info("Change volume capacity request: %s.", request) # Round up the volume size so that it is a granularity of 8 GBs # because ScaleIO only supports volumes with a granularity of 8 GBs. @@ -630,8 +621,8 @@ class ScaleIODriver(driver.VolumeDriver): round_volume_capacity = self.configuration.sio_round_volume_capacity if not round_volume_capacity and not new_size % 8 == 0: - LOG.warning(_LW("ScaleIO only supports volumes with a granularity " - "of 8 GBs. The new volume size is: %d."), + LOG.warning("ScaleIO only supports volumes with a granularity " + "of 8 GBs. The new volume size is: %d.", volume_new_size) params = {'sizeInGB': six.text_type(volume_new_size)} @@ -658,9 +649,8 @@ class ScaleIODriver(driver.VolumeDriver): """Creates a cloned volume.""" volume_id = src_vref['provider_id'] snapname = self._id_to_base64(volume.id) - LOG.info(_LI( - "ScaleIO create cloned volume: source volume %(src)s to " - "target volume %(tgt)s."), + LOG.info("ScaleIO create cloned volume: source volume %(src)s to " + "target volume %(tgt)s.", {'src': volume_id, 'tgt': snapname}) @@ -691,9 +681,8 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(vol_id)s" "/action/removeMappedSdc") % req_vars - LOG.info(_LI( - "Trying to unmap volume from all sdcs" - " before deletion: %s."), + LOG.info("Trying to unmap volume from all sdcs" + " before deletion: %s.", request) r = requests.post( request, @@ -725,14 +714,12 @@ class ScaleIODriver(driver.VolumeDriver): response = r.json() error_code = response['errorCode'] if error_code == VOLUME_NOT_FOUND_ERROR: - LOG.warning(_LW( - "Ignoring error in delete volume %s:" - " Volume not found."), vol_id) + LOG.warning("Ignoring error in delete volume %s:" + " Volume not found.", vol_id) elif vol_id is None: - LOG.warning(_LW( - "Volume does not have provider_id thus does not " + LOG.warning("Volume does not have provider_id thus does not " "map to a ScaleIO volume. " - "Allowing deletion to proceed.")) + "Allowing deletion to proceed.") else: msg = (_("Error deleting volume %(vol)s: %(err)s.") % {'vol': vol_id, @@ -743,7 +730,7 @@ class ScaleIODriver(driver.VolumeDriver): def delete_snapshot(self, snapshot): """Deletes a ScaleIO snapshot.""" snap_id = snapshot.provider_id - LOG.info(_LI("ScaleIO delete snapshot.")) + LOG.info("ScaleIO delete snapshot.") return self._delete_volume(snap_id) def initialize_connection(self, volume, connector, **kwargs): @@ -762,13 +749,13 @@ class ScaleIODriver(driver.VolumeDriver): qos_specs = self._get_volumetype_qos(volume) storage_type = extra_specs.copy() storage_type.update(qos_specs) - LOG.info(_LI("Volume type is %s."), storage_type) + LOG.info("Volume type is %s.", storage_type) round_volume_size = self._round_to_num_gran(volume.size) iops_limit = self._get_iops_limit(round_volume_size, storage_type) bandwidth_limit = self._get_bandwidth_limit(round_volume_size, storage_type) - LOG.info(_LI("iops limit is %s"), iops_limit) - LOG.info(_LI("bandwidth limit is %s"), bandwidth_limit) + LOG.info("iops limit is %s", iops_limit) + LOG.info("bandwidth limit is %s", bandwidth_limit) connection_properties['iopsLimit'] = iops_limit connection_properties['bandwidthLimit'] = bandwidth_limit return {'driver_volume_type': 'scaleio', @@ -782,10 +769,10 @@ class ScaleIODriver(driver.VolumeDriver): max_bandwidth = (self._round_to_num_gran(int(max_bandwidth), units.Ki)) max_bandwidth = six.text_type(max_bandwidth) - LOG.info(_LI("max bandwidth is: %s"), max_bandwidth) + LOG.info("max bandwidth is: %s", max_bandwidth) bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB, None) - LOG.info(_LI("bandwidth per gb is: %s"), bw_per_gb) + LOG.info("bandwidth per gb is: %s", bw_per_gb) if bw_per_gb is None: return max_bandwidth # Since ScaleIO volumes size is in 8GB granularity @@ -805,9 +792,9 @@ class ScaleIODriver(driver.VolumeDriver): def _get_iops_limit(self, size, storage_type): max_iops = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY, IOPS_LIMIT_KEY) - LOG.info(_LI("max iops is: %s"), max_iops) + LOG.info("max iops is: %s", max_iops) iops_per_gb = self._find_limit(storage_type, QOS_IOPS_PER_GB, None) - LOG.info(_LI("iops per gb is: %s"), iops_per_gb) + LOG.info("iops per gb is: %s", iops_per_gb) try: if iops_per_gb is None: if max_iops is not None: @@ -862,9 +849,9 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Domain/instances/getByName::" "%(encoded_domain_name)s") % req_vars - LOG.info(_LI("ScaleIO get domain id by name request: %s."), + LOG.info("ScaleIO get domain id by name request: %s.", request) - LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."), + LOG.info("username: %(username)s, verify_cert: %(verify)s.", {'username': self.server_username, 'verify': verify_cert}) r = requests.get( @@ -874,7 +861,7 @@ class ScaleIODriver(driver.VolumeDriver): self.server_token), verify=verify_cert) r = self._check_response(r, request) - LOG.info(_LI("Get domain by name response: %s"), r.text) + LOG.info("Get domain by name response: %s", r.text) domain_id = r.json() if not domain_id: msg = (_("Domain with name %s wasn't found.") @@ -888,7 +875,7 @@ class ScaleIODriver(driver.VolumeDriver): 'err': domain_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Domain id is %s."), domain_id) + LOG.info("Domain id is %s.", domain_id) # Get pool id from name. encoded_pool_name = urllib.parse.quote(pool_name, '') @@ -899,7 +886,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/types/Pool/instances/getByName::" "%(domain_id)s,%(encoded_pool_name)s") % req_vars - LOG.info(_LI("ScaleIO get pool id by name request: %s."), request) + LOG.info("ScaleIO get pool id by name request: %s.", request) r = requests.get( request, auth=( @@ -921,7 +908,7 @@ class ScaleIODriver(driver.VolumeDriver): 'err': pool_id['message']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Pool id is %s."), pool_id) + LOG.info("Pool id is %s.", pool_id) req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} request = ("https://%(server_ip)s:%(server_port)s" @@ -941,7 +928,7 @@ class ScaleIODriver(driver.VolumeDriver): self.server_token), verify=verify_cert) response = r.json() - LOG.info(_LI("Query capacity stats response: %s."), response) + LOG.info("Query capacity stats response: %s.", response) for res in response.values(): # Divide by two because ScaleIO creates a copy for each volume total_capacity_kb = ( @@ -956,10 +943,9 @@ class ScaleIODriver(driver.VolumeDriver): provisioned_capacity = ( ((res['thickCapacityInUseInKb'] + res['thinCapacityAllocatedInKm']) / 2) / units.Mi) - LOG.info(_LI( - "free capacity of pool %(pool)s is: %(free)s, " + LOG.info("Free capacity of pool %(pool)s is: %(free)s, " "total capacity: %(total)s, " - "provisioned capacity: %(prov)s"), + "provisioned capacity: %(prov)s", {'pool': pool_name, 'free': free_capacity_gb, 'total': total_capacity_gb, @@ -983,15 +969,14 @@ class ScaleIODriver(driver.VolumeDriver): stats['total_capacity_gb'] = total_capacity stats['free_capacity_gb'] = free_capacity - LOG.info(_LI( - "Free capacity for backend is: %(free)s, total capacity: " - "%(total)s."), + LOG.info("Free capacity for backend is: %(free)s, total capacity: " + "%(total)s.", {'free': free_capacity, 'total': total_capacity}) stats['pools'] = pools - LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"]) + LOG.info("Backend name is %s.", stats["volume_backend_name"]) self._stats = stats @@ -1046,7 +1031,7 @@ class ScaleIODriver(driver.VolumeDriver): def _sio_detach_volume(self, volume): """Call the connector.disconnect() """ - LOG.info(_LI("Calling os-brick to detach ScaleIO volume.")) + LOG.info("Calling os-brick to detach ScaleIO volume.") connection_properties = dict(self.connection_properties) connection_properties['scaleIO_volname'] = self._id_to_base64( volume.id) @@ -1055,9 +1040,8 @@ class ScaleIODriver(driver.VolumeDriver): def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" - LOG.info(_LI( - "ScaleIO copy_image_to_volume volume: %(vol)s image service: " - "%(service)s image id: %(id)s."), + LOG.info("ScaleIO copy_image_to_volume volume: %(vol)s image service: " + "%(service)s image id: %(id)s.", {'vol': volume, 'service': six.text_type(image_service), 'id': six.text_type(image_id)}) @@ -1075,9 +1059,8 @@ class ScaleIODriver(driver.VolumeDriver): def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" - LOG.info(_LI( - "ScaleIO copy_volume_to_image volume: %(vol)s image service: " - "%(service)s image meta: %(meta)s."), + LOG.info("ScaleIO copy_volume_to_image volume: %(vol)s image service: " + "%(service)s image meta: %(meta)s.", {'vol': volume, 'service': six.text_type(image_service), 'meta': six.text_type(image_meta)}) @@ -1109,8 +1092,8 @@ class ScaleIODriver(driver.VolumeDriver): current_name = new_volume['id'] new_name = volume['id'] vol_id = new_volume['provider_id'] - LOG.info(_LI("Renaming %(id)s from %(current_name)s to " - "%(new_name)s."), + LOG.info("Renaming %(id)s from %(current_name)s to " + "%(new_name)s.", {'id': vol_id, 'current_name': current_name, 'new_name': new_name}) @@ -1134,7 +1117,7 @@ class ScaleIODriver(driver.VolumeDriver): request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(id)s/action/setVolumeName" % req_vars) - LOG.info(_LI("ScaleIO rename volume request: %s."), request) + LOG.info("ScaleIO rename volume request: %s.", request) params = {'newName': new_name} r = requests.post( @@ -1153,8 +1136,8 @@ class ScaleIODriver(driver.VolumeDriver): if ((error_code == VOLUME_NOT_FOUND_ERROR or error_code == OLD_VOLUME_NOT_FOUND_ERROR or error_code == ILLEGAL_SYNTAX)): - LOG.info(_LI("Ignoring renaming action because the volume " - "%(vol)s is not a ScaleIO volume."), + LOG.info("Ignoring renaming action because the volume " + "%(vol)s is not a ScaleIO volume.", {'vol': vol_id}) else: msg = (_("Error renaming volume %(vol)s: %(err)s.") % @@ -1162,14 +1145,14 @@ class ScaleIODriver(driver.VolumeDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI("ScaleIO volume %(vol)s was renamed to " - "%(new_name)s."), + LOG.info("ScaleIO volume %(vol)s was renamed to " + "%(new_name)s.", {'vol': vol_id, 'new_name': new_name}) def _query_scaleio_volume(self, volume, existing_ref): request = self._create_scaleio_get_volume_request(volume, existing_ref) r, response = self._execute_scaleio_get_request(request) - LOG.info(_LI("Get Volume response: %(res)s"), + LOG.info("Get Volume response: %(res)s", {'res': response}) self._manage_existing_check_legal_response(r, existing_ref) return response @@ -1258,7 +1241,7 @@ class ScaleIODriver(driver.VolumeDriver): 'id': vol_id} request = ("https://%(server_ip)s:%(server_port)s" "/api/instances/Volume::%(id)s" % req_vars) - LOG.info(_LI("ScaleIO get volume by id request: %s."), request) + LOG.info("ScaleIO get volume by id request: %s.", request) return request @staticmethod @@ -1286,7 +1269,7 @@ class ScaleIODriver(driver.VolumeDriver): ScaleIO won't create CG until cg-snapshot creation, db will maintain the volumes and CG relationship. """ - LOG.info(_LI("Creating Consistency Group")) + LOG.info("Creating Consistency Group") model_update = {'status': 'available'} return model_update @@ -1295,7 +1278,7 @@ class ScaleIODriver(driver.VolumeDriver): ScaleIO will delete the volumes of the CG. """ - LOG.info(_LI("Deleting Consistency Group")) + LOG.info("Deleting Consistency Group") model_update = {'status': 'deleted'} error_statuses = ['error', 'error_deleting'] volumes_model_update = [] @@ -1311,8 +1294,8 @@ class ScaleIODriver(driver.VolumeDriver): volumes_model_update.append(update_item) if model_update['status'] not in error_statuses: model_update['status'] = 'error_deleting' - LOG.error(_LE("Failed to delete the volume %(vol)s of CG. " - "Exception: %(exception)s."), + LOG.error("Failed to delete the volume %(vol)s of CG. " + "Exception: %(exception)s.", {'vol': volume['name'], 'exception': err}) return model_update, volumes_model_update @@ -1323,7 +1306,7 @@ class ScaleIODriver(driver.VolumeDriver): 'snapshotName': self._id_to_base64(snapshot['id'])} snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots)) r, response = self._snapshot_volume_group(snapshot_defs) - LOG.info(_LI("Snapshot volume response: %s."), response) + LOG.info("Snapshot volume response: %s.", response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for group: " "%(response)s.") % @@ -1356,9 +1339,9 @@ class ScaleIODriver(driver.VolumeDriver): snapshot_model_update.append(update_item) if model_update['status'] not in error_statuses: model_update['status'] = 'error_deleting' - LOG.error(_LE("Failed to delete the snapshot %(snap)s " - "of cgsnapshot: %(cgsnapshot_id)s. " - "Exception: %(exception)s."), + LOG.error("Failed to delete the snapshot %(snap)s " + "of cgsnapshot: %(cgsnapshot_id)s. " + "Exception: %(exception)s.", {'snap': snapshot['name'], 'exception': err, 'cgsnapshot_id': cgsnapshot.id}) @@ -1381,7 +1364,7 @@ class ScaleIODriver(driver.VolumeDriver): source_vols, volumes) r, response = self._snapshot_volume_group(list(snapshot_defs)) - LOG.info(_LI("Snapshot volume response: %s."), response) + LOG.info("Snapshot volume response: %s.", response) if r.status_code != OK_STATUS_CODE and "errorCode" in response: msg = (_("Failed creating snapshot for group: " "%(response)s.") % @@ -1407,7 +1390,7 @@ class ScaleIODriver(driver.VolumeDriver): return None, None, None def _snapshot_volume_group(self, snapshot_defs): - LOG.info(_LI("ScaleIO snapshot group of volumes")) + LOG.info("ScaleIO snapshot group of volumes") params = {'snapshotDefs': snapshot_defs} req_vars = {'server_ip': self.server_ip, 'server_port': self.server_port} diff --git a/cinder/volume/drivers/dell_emc/unity/adapter.py b/cinder/volume/drivers/dell_emc/unity/adapter.py index e19b7f57df4..1b2dd29157c 100644 --- a/cinder/volume/drivers/dell_emc/unity/adapter.py +++ b/cinder/volume/drivers/dell_emc/unity/adapter.py @@ -21,8 +21,8 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception +from cinder.i18n import _ from cinder import utils as cinder_utils -from cinder.i18n import _, _LE, _LI from cinder.volume.drivers.dell_emc.unity import client from cinder.volume.drivers.dell_emc.unity import utils from cinder.volume import utils as vol_utils @@ -111,21 +111,21 @@ class CommonAdapter(object): matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id, whitelist) if not matched: - LOG.error(_LE('No matched ports filtered by all patterns: %s'), + LOG.error('No matched ports filtered by all patterns: %s', whitelist) raise exception.InvalidConfigurationValue( option='%s.unity_io_ports' % self.config.config_group, value=self.config.unity_io_ports) if unmatched_whitelist: - LOG.error(_LE('No matched ports filtered by below patterns: %s'), + LOG.error('No matched ports filtered by below patterns: %s', unmatched_whitelist) raise exception.InvalidConfigurationValue( option='%s.unity_io_ports' % self.config.config_group, value=self.config.unity_io_ports) - LOG.info(_LI('These ports %(matched)s will be used based on ' - 'the option unity_io_ports: %(config)s'), + LOG.info('These ports %(matched)s will be used based on ' + 'the option unity_io_ports: %(config)s', {'matched': matched, 'config': self.config.unity_io_ports}) return matched @@ -174,8 +174,8 @@ class CommonAdapter(object): qos_specs = utils.get_backend_qos_specs(volume) limit_policy = self.client.get_io_limit_policy(qos_specs) - LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s ' - 'Pool: %(pool)s Qos: %(qos)s.'), + LOG.info('Create Volume: %(volume)s Size: %(size)s ' + 'Pool: %(pool)s Qos: %(qos)s.', {'volume': volume_name, 'size': volume_size, 'pool': pool.name, @@ -193,8 +193,8 @@ class CommonAdapter(object): def delete_volume(self, volume): lun_id = self.get_lun_id(volume) if lun_id is None: - LOG.info(_LI('Backend LUN not found, skipping the deletion. ' - 'Volume: %(volume_name)s.'), + LOG.info('Backend LUN not found, skipping the deletion. ' + 'Volume: %(volume_name)s.', {'volume_name': volume.name}) else: self.client.delete_lun(lun_id) @@ -457,8 +457,8 @@ class CommonAdapter(object): except Exception: with excutils.save_and_reraise_exception(): utils.ignore_exception(self.delete_volume, volume) - LOG.error(_LE('Failed to create cloned volume: %(vol_id)s, ' - 'from source unity snapshot: %(snap_name)s. '), + LOG.error('Failed to create cloned volume: %(vol_id)s, ' + 'from source unity snapshot: %(snap_name)s.', {'vol_id': volume.id, 'snap_name': snap.name}) return model_update diff --git a/cinder/volume/drivers/dell_emc/unity/client.py b/cinder/volume/drivers/dell_emc/unity/client.py index 7c35ea96083..e404bfd69d1 100644 --- a/cinder/volume/drivers/dell_emc/unity/client.py +++ b/cinder/volume/drivers/dell_emc/unity/client.py @@ -25,7 +25,7 @@ else: storops_ex = None from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.unity import utils @@ -98,13 +98,13 @@ class UnityClient(object): lun = None if lun_id is None and name is None: LOG.warning( - _LW("Both lun_id and name are None to get LUN. Return None.")) + "Both lun_id and name are None to get LUN. Return None.") else: try: lun = self.system.get_lun(_id=lun_id, name=name) except storops_ex.UnityResourceNotFoundError: LOG.warning( - _LW("LUN id=%(id)s, name=%(name)s doesn't exist."), + "LUN id=%(id)s, name=%(name)s doesn't exist.", {'id': lun_id, 'name': name}) return lun @@ -159,16 +159,16 @@ class UnityClient(object): 'err': err}) except storops_ex.UnityDeleteAttachedSnapError as err: with excutils.save_and_reraise_exception(): - LOG.warning(_LW("Failed to delete snapshot %(snap_name)s " - "which is in use. Message: %(err)s"), + LOG.warning("Failed to delete snapshot %(snap_name)s " + "which is in use. Message: %(err)s", {'snap_name': snap.name, 'err': err}) def get_snap(self, name=None): try: return self.system.get_snap(name=name) except storops_ex.UnityResourceNotFoundError as err: - msg = _LW("Snapshot %(name)s doesn't exist. Message: %(err)s") - LOG.warning(msg, {'name': name, 'err': err}) + LOG.warning("Snapshot %(name)s doesn't exist. Message: %(err)s", + {'name': name, 'err': err}) return None def create_host(self, name, uids): diff --git a/cinder/volume/drivers/dell_emc/unity/utils.py b/cinder/volume/drivers/dell_emc/unity/utils.py index a9a1756610b..ba10ade2842 100644 --- a/cinder/volume/drivers/dell_emc/unity/utils.py +++ b/cinder/volume/drivers/dell_emc/unity/utils.py @@ -24,7 +24,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder.volume import utils as vol_utils from cinder.volume import volume_types from cinder.zonemanager import utils as zm_utils @@ -70,11 +70,11 @@ def extract_provider_location(provider_location, key): if len(fields) == 2 and fields[0] == key: return fields[1] else: - msg = _LW('"%(key)s" is not found in provider ' - 'location "%(location)s."') - LOG.warning(msg, {'key': key, 'location': provider_location}) + LOG.warning('"%(key)s" is not found in provider ' + 'location "%(location)s."', + {'key': key, 'location': provider_location}) else: - LOG.warning(_LW('Empty provider location received.')) + LOG.warning('Empty provider location received.') def byte_to_gib(byte): @@ -186,9 +186,9 @@ def ignore_exception(func, *args, **kwargs): try: func(*args, **kwargs) except Exception as ex: - LOG.warning(_LW('Error occurred but ignored. Function: %(func_name)s, ' - 'args: %(args)s, kwargs: %(kwargs)s, ' - 'exception: %(ex)s.'), + LOG.warning('Error occurred but ignored. Function: %(func_name)s, ' + 'args: %(args)s, kwargs: %(kwargs)s, ' + 'exception: %(ex)s.', {'func_name': func, 'args': args, 'kwargs': kwargs, 'ex': ex}) diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index 53f2c7954a4..7fa3eacfb77 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -25,11 +25,11 @@ import six import uuid from cinder import exception -from cinder import utils as cinder_utils -from cinder.i18n import _, _LE, _LI, _LW -from cinder.objects.consistencygroup import ConsistencyGroup +from cinder.i18n import _ +import cinder.objects.consistencygroup as cg_obj from cinder.objects import fields -from cinder.objects.group import Group +import cinder.objects.group as group_obj +from cinder import utils as cinder_utils from cinder.volume.drivers.dell_emc.vmax import fast from cinder.volume.drivers.dell_emc.vmax import https from cinder.volume.drivers.dell_emc.vmax import masking @@ -138,9 +138,8 @@ class VMAXCommon(object): active_backend_id=None): if not pywbemAvailable: - LOG.info(_LI( - "Module PyWBEM not installed. " - "Install PyWBEM using the python-pywbem package.")) + LOG.info("Module PyWBEM not installed. Install PyWBEM using the " + "python-pywbem package.") self.protocol = prtcl self.configuration = configuration @@ -221,9 +220,9 @@ class VMAXCommon(object): LOG.debug("The replication configuration is %(rep_config)s.", {'rep_config': self.rep_config}) elif self.rep_devices and len(self.rep_devices) > 1: - LOG.error(_LE("More than one replication target is configured. " - "EMC VMAX only suppports a single replication " - "target. Replication will not be enabled.")) + LOG.error("More than one replication target is configured. " + "EMC VMAX only suppports a single replication " + "target. Replication will not be enabled.") def _get_slo_workload_combinations(self, arrayInfoList): """Method to query the array for SLO and Workloads. @@ -356,9 +355,9 @@ class VMAXCommon(object): volumeName, extraSpecs) - LOG.info(_LI("Leaving create_volume: %(volumeName)s " - "Return code: %(rc)lu " - "volume dict: %(name)s."), + LOG.info("Leaving create_volume: %(volumeName)s " + "Return code: %(rc)lu " + "volume dict: %(name)s.", {'volumeName': volumeName, 'rc': rc, 'name': volumeDict}) @@ -449,12 +448,12 @@ class VMAXCommon(object): :param volume: volume Object """ - LOG.info(_LI("Deleting Volume: %(volume)s"), + LOG.info("Deleting Volume: %(volume)s", {'volume': volume['name']}) rc, volumeName = self._delete_volume(volume) - LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: " - "%(rc)lu."), + LOG.info("Leaving delete_volume: %(volumename)s Return code: " + "%(rc)lu.", {'volumename': volumeName, 'rc': rc}) @@ -476,7 +475,7 @@ class VMAXCommon(object): :param snapshot: snapshot object :param volume: volume Object to create snapshot from """ - LOG.info(_LI("Delete Snapshot: %(snapshotName)s."), + LOG.info("Delete Snapshot: %(snapshotName)s.", {'snapshotName': snapshot['name']}) self._delete_snapshot(snapshot, volume['host']) @@ -516,12 +515,12 @@ class VMAXCommon(object): extraSpecs = self._get_replication_extraSpecs( extraSpecs, self.rep_config) volumename = volume['name'] - LOG.info(_LI("Unmap volume: %(volume)s."), + LOG.info("Unmap volume: %(volume)s.", {'volume': volumename}) device_info = self.find_device_number(volume, connector['host']) if 'hostlunid' not in device_info: - LOG.info(_LI("Volume %s is not mapped. No volume to unmap."), + LOG.info("Volume %s is not mapped. No volume to unmap.", volumename) return @@ -584,7 +583,7 @@ class VMAXCommon(object): is_multipath = connector.get('multipath', False) volumeName = volume['name'] - LOG.info(_LI("Initialize connection: %(volume)s."), + LOG.info("Initialize connection: %(volume)s.", {'volume': volumeName}) self.conn = self._get_ecom_connection() deviceInfoDict = self._wrap_find_device_number( @@ -603,8 +602,8 @@ class VMAXCommon(object): # the state as is. deviceNumber = deviceInfoDict['hostlunid'] - LOG.info(_LI("Volume %(volume)s is already mapped. " - "The device number is %(deviceNumber)s."), + LOG.info("Volume %(volume)s is already mapped. " + "The device number is %(deviceNumber)s.", {'volume': volumeName, 'deviceNumber': deviceNumber}) # Special case, we still need to get the iscsi ip address. @@ -663,7 +662,7 @@ class VMAXCommon(object): if 'hostlunid' not in deviceInfoDict: # Did not successfully attach to host, # so a rollback for FAST is required. - LOG.error(_LE("Error Attaching volume %(vol)s."), + LOG.error("Error Attaching volume %(vol)s.", {'vol': volumeName}) if ((rollbackDict['fastPolicyName'] is not None) or (rollbackDict['isV3'] is not None)): @@ -754,7 +753,7 @@ class VMAXCommon(object): :params connector: the connector Object """ volumename = volume['name'] - LOG.info(_LI("Terminate connection: %(volume)s."), + LOG.info("Terminate connection: %(volume)s.", {'volume': volumename}) self._unmap_lun(volume, connector) @@ -1020,11 +1019,11 @@ class VMAXCommon(object): provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = ( self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo)) - LOG.info(_LI( + LOG.info( "Capacity stats for SRP pool %(poolName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu, " - "provisioned_capacity_gb=%(provisioned_capacity_gb)lu"), + "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", {'poolName': arrayInfo['PoolName'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': totalManagedSpaceGbs, @@ -1055,7 +1054,7 @@ class VMAXCommon(object): volumeName = volume['name'] volumeStatus = volume['status'] - LOG.info(_LI("Migrating using retype Volume: %(volume)s."), + LOG.info("Migrating using retype Volume: %(volume)s.", {'volume': volumeName}) extraSpecs = self._initial_setup(volume) @@ -1063,17 +1062,17 @@ class VMAXCommon(object): volumeInstance = self._find_lun(volume) if volumeInstance is None: - LOG.error(_LE("Volume %(name)s not found on the array. " - "No volume to migrate using retype."), + LOG.error("Volume %(name)s not found on the array. " + "No volume to migrate using retype.", {'name': volumeName}) return False if extraSpecs[ISV3]: if self.utils.is_replication_enabled(extraSpecs): - LOG.error(_LE("Volume %(name)s is replicated - " - "Replicated volumes are not eligible for " - "storage assisted retype. Host assisted " - "retype is supported."), + LOG.error("Volume %(name)s is replicated - " + "Replicated volumes are not eligible for " + "storage assisted retype. Host assisted " + "retype is supported.", {'name': volumeName}) return False @@ -1097,12 +1096,12 @@ class VMAXCommon(object): :returns: boolean -- Always returns True :returns: dict -- Empty dict {} """ - LOG.warning(_LW("The VMAX plugin only supports Retype. " - "If a pool based migration is necessary " - "this will happen on a Retype " - "From the command line: " - "cinder --os-volume-api-version 2 retype " - " --migration-policy on-demand")) + LOG.warning("The VMAX plugin only supports Retype. " + "If a pool based migration is necessary " + "this will happen on a Retype " + "From the command line: " + "cinder --os-volume-api-version 2 retype " + " --migration-policy on-demand") return True, {} def _migrate_volume( @@ -1134,11 +1133,11 @@ class VMAXCommon(object): if moved is False and sourceFastPolicyName is not None: # Return the volume to the default source fast policy storage # group because the migrate was unsuccessful. - LOG.warning(_LW( + LOG.warning( "Failed to migrate: %(volumeName)s from " "default source storage group " "for FAST policy: %(sourceFastPolicyName)s. " - "Attempting cleanup... "), + "Attempting cleanup... ", {'volumeName': volumeName, 'sourceFastPolicyName': sourceFastPolicyName}) if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume( @@ -1162,9 +1161,9 @@ class VMAXCommon(object): if not self._migrate_volume_fast_target( volumeInstance, storageSystemName, targetFastPolicyName, volumeName, extraSpecs): - LOG.warning(_LW( + LOG.warning( "Attempting a rollback of: %(volumeName)s to " - "original pool %(sourcePoolInstanceName)s."), + "original pool %(sourcePoolInstanceName)s.", {'volumeName': volumeName, 'sourcePoolInstanceName': sourcePoolInstanceName}) self._migrate_rollback( @@ -1194,7 +1193,7 @@ class VMAXCommon(object): :param extraSpecs: extra specifications """ - LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."), + LOG.warning("_migrate_rollback on : %(volumeName)s.", {'volumeName': volumeName}) storageRelocationService = self.utils.find_storage_relocation_service( @@ -1205,10 +1204,10 @@ class VMAXCommon(object): conn, storageRelocationService, volumeInstance.path, sourcePoolInstanceName, extraSpecs) except Exception: - LOG.error(_LE( + LOG.error( "Failed to return volume %(volumeName)s to " "original storage pool. Please contact your system " - "administrator to return it to the correct location."), + "administrator to return it to the correct location.", {'volumeName': volumeName}) if sourceFastPolicyName is not None: @@ -1230,7 +1229,7 @@ class VMAXCommon(object): :returns: boolean -- True/False """ - LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."), + LOG.warning("_migrate_cleanup on : %(volumeName)s.", {'volumeName': volumeName}) return_to_default = True controllerConfigurationService = ( @@ -1279,9 +1278,9 @@ class VMAXCommon(object): :returns: boolean -- True/False """ falseRet = False - LOG.info(_LI( + LOG.info( "Adding volume: %(volumeName)s to default storage group " - "for FAST policy: %(fastPolicyName)s."), + "for FAST policy: %(fastPolicyName)s.", {'volumeName': volumeName, 'fastPolicyName': targetFastPolicyName}) @@ -1294,9 +1293,9 @@ class VMAXCommon(object): self.conn, controllerConfigurationService, targetFastPolicyName, volumeInstance, extraSpecs)) if defaultStorageGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Unable to create or get default storage group for FAST policy" - ": %(fastPolicyName)s."), + ": %(fastPolicyName)s.", {'fastPolicyName': targetFastPolicyName}) return falseRet @@ -1306,9 +1305,9 @@ class VMAXCommon(object): self.conn, controllerConfigurationService, volumeInstance, volumeName, targetFastPolicyName, extraSpecs)) if defaultStorageGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Failed to verify that volume was added to storage group for " - "FAST policy: %(fastPolicyName)s."), + "FAST policy: %(fastPolicyName)s.", {'fastPolicyName': targetFastPolicyName}) return falseRet @@ -1348,9 +1347,9 @@ class VMAXCommon(object): targetPoolInstanceName = self.utils.get_pool_by_name( self.conn, targetPoolName, storageSystemName) if targetPoolInstanceName is None: - LOG.error(_LE( + LOG.error( "Error finding target pool instance name for pool: " - "%(targetPoolName)s."), + "%(targetPoolName)s.", {'targetPoolName': targetPoolName}) return falseRet try: @@ -1360,9 +1359,9 @@ class VMAXCommon(object): except Exception: # Rollback by deleting the volume if adding the volume to the # default storage group were to fail. - LOG.exception(_LE( + LOG.exception( "Error migrating volume: %(volumename)s. " - "to target pool %(targetPoolName)s."), + "to target pool %(targetPoolName)s.", {'volumename': volumeName, 'targetPoolName': targetPoolName}) return falseRet @@ -1375,9 +1374,9 @@ class VMAXCommon(object): if (foundPoolInstanceName is None or (foundPoolInstanceName['InstanceID'] != targetPoolInstanceName['InstanceID'])): - LOG.error(_LE( + LOG.error( "Volume : %(volumeName)s. was not successfully migrated to " - "target pool %(targetPoolName)s."), + "target pool %(targetPoolName)s.", {'volumeName': volumeName, 'targetPoolName': targetPoolName}) return falseRet @@ -1427,10 +1426,10 @@ class VMAXCommon(object): raise exception.VolumeBackendAPIException(data=exceptionMessage) if defaultStorageGroupInstanceName is None: - LOG.warning(_LW( + LOG.warning( "The volume: %(volumename)s " "was not first part of the default storage " - "group for FAST policy %(fastPolicyName)s."), + "group for FAST policy %(fastPolicyName)s.", {'volumename': volumeName, 'fastPolicyName': sourceFastPolicyName}) @@ -1455,10 +1454,10 @@ class VMAXCommon(object): conn, controllerConfigurationService, volumeInstance, volumeName, targetFastPolicyName, extraSpecs)) if assocDefaultStorageGroupName is None: - LOG.error(_LE( + LOG.error( "Failed to add %(volumeName)s " "to default storage group for fast policy " - "%(fastPolicyName)s."), + "%(fastPolicyName)s.", {'volumeName': volumeName, 'fastPolicyName': targetFastPolicyName}) @@ -1483,7 +1482,7 @@ class VMAXCommon(object): """ falseRet = (False, None, None) if 'location_info' not in host['capabilities']: - LOG.error(_LE('Error getting array, pool, SLO and workload.')) + LOG.error('Error getting array, pool, SLO and workload.') return falseRet info = host['capabilities']['location_info'] @@ -1496,24 +1495,24 @@ class VMAXCommon(object): targetSlo = infoDetail[2] targetWorkload = infoDetail[3] except KeyError: - LOG.error(_LE("Error parsing array, pool, SLO and workload.")) + LOG.error("Error parsing array, pool, SLO and workload.") if targetArraySerialNumber not in sourceArraySerialNumber: - LOG.error(_LE( + LOG.error( "The source array : %(sourceArraySerialNumber)s does not " "match the target array: %(targetArraySerialNumber)s " - "skipping storage-assisted migration."), + "skipping storage-assisted migration.", {'sourceArraySerialNumber': sourceArraySerialNumber, 'targetArraySerialNumber': targetArraySerialNumber}) return falseRet if targetPoolName not in sourcePoolName: - LOG.error(_LE( + LOG.error( "Only SLO/workload migration within the same SRP Pool " "is supported in this version " "The source pool : %(sourcePoolName)s does not " "match the target array: %(targetPoolName)s. " - "Skipping storage-assisted migration."), + "Skipping storage-assisted migration.", {'sourcePoolName': sourcePoolName, 'targetPoolName': targetPoolName}) return falseRet @@ -1522,9 +1521,9 @@ class VMAXCommon(object): self.utils.get_storage_group_from_volume( self.conn, volumeInstanceName, sgName)) if foundStorageGroupInstanceName is None: - LOG.warning(_LW( + LOG.warning( "Volume: %(volumeName)s is not currently " - "belonging to any storage group."), + "belonging to any storage group.", {'volumeName': volumeName}) else: @@ -1539,10 +1538,10 @@ class VMAXCommon(object): # Check if migration is from compression to non compression # of vice versa if not doChangeCompression: - LOG.error(_LE( + LOG.error( "No action required. Volume: %(volumeName)s is " "already part of slo/workload combination: " - "%(targetCombination)s."), + "%(targetCombination)s.", {'volumeName': volumeName, 'targetCombination': targetCombination}) return falseRet @@ -1566,7 +1565,7 @@ class VMAXCommon(object): """ falseRet = (False, None, None) if 'location_info' not in host['capabilities']: - LOG.error(_LE("Error getting target pool name and array.")) + LOG.error("Error getting target pool name and array.") return falseRet info = host['capabilities']['location_info'] @@ -1578,14 +1577,14 @@ class VMAXCommon(object): targetPoolName = infoDetail[1] targetFastPolicy = infoDetail[2] except KeyError: - LOG.error(_LE( - "Error parsing target pool name, array, and fast policy.")) + LOG.error( + "Error parsing target pool name, array, and fast policy.") if targetArraySerialNumber not in sourceArraySerialNumber: - LOG.error(_LE( + LOG.error( "The source array : %(sourceArraySerialNumber)s does not " "match the target array: %(targetArraySerialNumber)s, " - "skipping storage-assisted migration."), + "skipping storage-assisted migration.", {'sourceArraySerialNumber': sourceArraySerialNumber, 'targetArraySerialNumber': targetArraySerialNumber}) return falseRet @@ -1597,19 +1596,19 @@ class VMAXCommon(object): assocPoolInstance = self.conn.GetInstance( assocPoolInstanceName) if assocPoolInstance['ElementName'] == targetPoolName: - LOG.error(_LE( + LOG.error( "No action required. Volume: %(volumeName)s is " - "already part of pool: %(pool)s."), + "already part of pool: %(pool)s.", {'volumeName': volumeName, 'pool': targetPoolName}) return falseRet - LOG.info(_LI("Volume status is: %s."), volumeStatus) + LOG.info("Volume status is: %s.", volumeStatus) if (host['capabilities']['storage_protocol'] != self.protocol and (volumeStatus != 'available' and volumeStatus != 'retyping')): - LOG.error(_LE( + LOG.error( "Only available volumes can be migrated between " - "different protocols.")) + "different protocols.") return falseRet return (True, targetPoolName, targetFastPolicy) @@ -1799,7 +1798,7 @@ class VMAXCommon(object): foundVolumeinstance['ElementName']): foundVolumeinstance = None except Exception as e: - LOG.info(_LI("Exception in retrieving volume: %(e)s."), + LOG.info("Exception in retrieving volume: %(e)s.", {'e': e}) foundVolumeinstance = None @@ -1944,9 +1943,9 @@ class VMAXCommon(object): if not data: if len(maskedvols) > 0: data = maskedvols[0] - LOG.warning(_LW( + LOG.warning( "Volume is masked but not to host %(host)s as is " - "expected. Assuming live migration."), + "expected. Assuming live migration.", {'host': hoststr}) LOG.debug("Device info: %(data)s.", {'data': data}) @@ -1982,15 +1981,15 @@ class VMAXCommon(object): self.utils.get_target_endpoints( self.conn, hardwareIdInstance)) if not targetEndpoints: - LOG.warning(_LW( + LOG.warning( "Unable to get target endpoints for hardwareId " - "%(instance)s."), + "%(instance)s.", {'instance': hardwareIdInstance}) continue except Exception: - LOG.warning(_LW( + LOG.warning( "Unable to get target endpoints for hardwareId " - "%(instance)s."), + "%(instance)s.", {'instance': hardwareIdInstance}, exc_info=True) continue @@ -2447,9 +2446,9 @@ class VMAXCommon(object): volumeInstance.path, appendVolumeInstanceName, compositeType, extraSpecs) else: - LOG.error(_LE( + LOG.error( "Unable to determine whether %(volumeName)s is " - "composite or not."), + "composite or not.", {'volumeName': volumeName}) raise @@ -2497,9 +2496,9 @@ class VMAXCommon(object): sourceName = sourceVolume['name'] cloneName = cloneVolume['name'] - LOG.info(_LI( + LOG.info( "Create a replica from Volume: Clone Volume: %(cloneName)s " - "Source Volume: %(sourceName)s."), + "Source Volume: %(sourceName)s.", {'cloneName': cloneName, 'sourceName': sourceName}) @@ -2555,8 +2554,8 @@ class VMAXCommon(object): self.conn, sourceInstance)) if cloneVolume['size'] != old_size_gbs: - LOG.info(_LI("Extending clone %(cloneName)s to " - "%(newSize)d GBs"), + LOG.info("Extending clone %(cloneName)s to " + "%(newSize)d GBs", {'cloneName': cloneName, 'newSize': cloneVolume['size']}) cloneInstance = self.utils.find_volume_instance( @@ -2638,9 +2637,9 @@ class VMAXCommon(object): volumeInstance = self._find_lun(volume) if volumeInstance is None: - LOG.error(_LE( + LOG.error( "Volume %(name)s not found on the array. " - "No volume to delete."), + "No volume to delete.", {'name': volumeName}) return errorRet @@ -2683,10 +2682,10 @@ class VMAXCommon(object): self.masking.get_associated_masking_groups_from_device( self.conn, volumeInstanceName)) if storageGroupInstanceNames: - LOG.warning(_LW( + LOG.warning( "Pre check for deletion. " "Volume: %(volumeName)s is part of a storage group. " - "Attempting removal from %(storageGroupInstanceNames)s."), + "Attempting removal from %(storageGroupInstanceNames)s.", {'volumeName': volumeName, 'storageGroupInstanceNames': storageGroupInstanceNames}) for storageGroupInstanceName in storageGroupInstanceNames: @@ -2829,8 +2828,8 @@ class VMAXCommon(object): # Delete the target device. rc, snapshotname = self._delete_volume(snapshot, True, host) - LOG.info(_LI("Leaving delete_snapshot: %(ssname)s Return code: " - "%(rc)lu."), + LOG.info("Leaving delete_snapshot: %(ssname)s Return code: " + "%(rc)lu.", {'ssname': snapshotname, 'rc': rc}) @@ -2842,7 +2841,7 @@ class VMAXCommon(object): :returns: dict -- modelUpdate = {'status': 'available'} :raises: VolumeBackendAPIException """ - LOG.info(_LI("Create Consistency Group: %(group)s."), + LOG.info("Create Consistency Group: %(group)s.", {'group': group['id']}) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} @@ -2876,7 +2875,7 @@ class VMAXCommon(object): :returns: list -- list of volume objects :raises: VolumeBackendAPIException """ - LOG.info(_LI("Delete Consistency Group: %(group)s."), + LOG.info("Delete Consistency Group: %(group)s.", {'group': group['id']}) modelUpdate = {} @@ -2894,7 +2893,7 @@ class VMAXCommon(object): cgInstanceName, cgName = self._find_consistency_group( replicationService, six.text_type(group['id'])) if cgInstanceName is None: - LOG.error(_LE("Cannot find CG group %(cgName)s."), + LOG.error("Cannot find CG group %(cgName)s.", {'cgName': six.text_type(group['id'])}) modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED} volumes_model_update = self.utils.get_volume_model_updates( @@ -2980,9 +2979,9 @@ class VMAXCommon(object): snapshots_model_update = [] - LOG.info(_LI( + LOG.info( "Create snapshot for Consistency Group %(cgId)s " - "cgsnapshotID: %(cgsnapshot)s."), + "cgsnapshotID: %(cgsnapshot)s.", {'cgsnapshot': cgsnapshot['id'], 'cgId': cgsnapshot['consistencygroup_id']}) @@ -3011,7 +3010,7 @@ class VMAXCommon(object): interval_retries_dict) targetCgInstanceName, targetCgName = self._find_consistency_group( replicationService, cgsnapshot['id']) - LOG.info(_LI("Create target consistency group %(targetCg)s."), + LOG.info("Create target consistency group %(targetCg)s.", {'targetCg': targetCgInstanceName}) for snapshot in snapshots: @@ -3135,9 +3134,9 @@ class VMAXCommon(object): consistencyGroup = cgsnapshot.get('consistencygroup') model_update = {} snapshots_model_update = [] - LOG.info(_LI( + LOG.info( "Delete snapshot for source CG %(cgId)s " - "cgsnapshotID: %(cgsnapshot)s."), + "cgsnapshotID: %(cgsnapshot)s.", {'cgsnapshot': cgsnapshot['id'], 'cgId': cgsnapshot['consistencygroup_id']}) @@ -3278,9 +3277,9 @@ class VMAXCommon(object): # add the volume to the default storage group created for # volumes in pools associated with this fast policy. if extraSpecs[FASTPOLICY]: - LOG.info(_LI( + LOG.info( "Adding volume: %(volumeName)s to default storage group" - " for FAST policy: %(fastPolicyName)s."), + " for FAST policy: %(fastPolicyName)s.", {'volumeName': volumeName, 'fastPolicyName': extraSpecs[FASTPOLICY]}) defaultStorageGroupInstanceName = ( @@ -3551,9 +3550,9 @@ class VMAXCommon(object): storageSystemName = volumeInstance['SystemName'] if not isValid: - LOG.error(_LE( + LOG.error( "Volume %(name)s is not suitable for storage " - "assisted migration using retype."), + "assisted migration using retype.", {'name': volumeName}) return False if volume['host'] != host['host'] or doChangeCompression: @@ -3601,9 +3600,9 @@ class VMAXCommon(object): self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, defaultSgName)) if foundStorageGroupInstanceName is None: - LOG.warning(_LW( + LOG.warning( "Volume : %(volumeName)s is not currently " - "belonging to any storage group."), + "belonging to any storage group.", {'volumeName': volumeName}) else: self.masking.remove_and_reset_members( @@ -3621,8 +3620,8 @@ class VMAXCommon(object): poolName, targetSlo, targetWorkload, isCompressionDisabled, storageSystemName, extraSpecs) if targetSgInstanceName is None: - LOG.error(_LE( - "Failed to get or create storage group %(storageGroupName)s."), + LOG.error( + "Failed to get or create storage group %(storageGroupName)s.", {'storageGroupName': storageGroupName}) return False @@ -3634,9 +3633,9 @@ class VMAXCommon(object): self.utils.get_storage_group_from_volume( self.conn, volumeInstance.path, storageGroupName)) if sgFromVolAddedInstanceName is None: - LOG.error(_LE( + LOG.error( "Volume : %(volumeName)s has not been " - "added to target storage group %(storageGroup)s."), + "added to target storage group %(storageGroup)s.", {'volumeName': volumeName, 'storageGroup': targetSgInstanceName}) return False @@ -3665,9 +3664,9 @@ class VMAXCommon(object): volumeName, volumeStatus)) if not isValid: - LOG.error(_LE( + LOG.error( "Volume %(name)s is not suitable for storage " - "assisted migration using retype."), + "assisted migration using retype.", {'name': volumeName}) return False if volume['host'] != host['host']: @@ -3718,10 +3717,10 @@ class VMAXCommon(object): self.fast.get_capacities_associated_to_policy( self.conn, arrayInfo['SerialNumber'], arrayInfo['FastPolicy'])) - LOG.info(_LI( + LOG.info( "FAST: capacity stats for policy %(fastPolicyName)s on array " "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu."), + "free_capacity_gb=%(free_capacity_gb)lu.", {'fastPolicyName': arrayInfo['FastPolicy'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': total_capacity_gb, @@ -3732,10 +3731,10 @@ class VMAXCommon(object): self.utils.get_pool_capacities(self.conn, arrayInfo['PoolName'], arrayInfo['SerialNumber'])) - LOG.info(_LI( + LOG.info( "NON-FAST: capacity stats for pool %(poolName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu."), + "free_capacity_gb=%(free_capacity_gb)lu.", {'poolName': arrayInfo['PoolName'], 'arrayName': arrayInfo['SerialNumber'], 'total_capacity_gb': total_capacity_gb, @@ -3813,8 +3812,8 @@ class VMAXCommon(object): sloFromExtraSpec = poolDetails[0] workloadFromExtraSpec = poolDetails[1] except KeyError: - LOG.error(_LE("Error parsing SLO, workload from " - "the provided extra_specs.")) + LOG.error("Error parsing SLO, workload from " + "the provided extra_specs.") else: # Throw an exception as it is compulsory to have # pool_name in the extra specs @@ -3904,10 +3903,10 @@ class VMAXCommon(object): volumeInstance.path, volumeName, fastPolicyName, extraSpecs)) if defaultStorageGroupInstanceName is None: - LOG.warning(_LW( + LOG.warning( "The volume: %(volumename)s. was not first part of the " "default storage group for FAST policy %(fastPolicyName)s" - "."), + ".", {'volumename': volumeName, 'fastPolicyName': fastPolicyName}) # Check if it is part of another storage group. @@ -3946,12 +3945,12 @@ class VMAXCommon(object): volumeInstance, volumeName, fastPolicyName, extraSpecs)) if assocDefaultStorageGroupName is None: - LOG.error(_LE( + LOG.error( "Failed to Roll back to re-add volume %(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s. Please contact your sysadmin to " "get the volume returned to the default " - "storage group."), + "storage group.", {'volumeName': volumeName, 'fastPolicyName': fastPolicyName}) @@ -4208,8 +4207,8 @@ class VMAXCommon(object): self._add_clone_to_default_storage_group( fastPolicyName, storageSystemName, cloneDict, cloneName, extraSpecs) - LOG.info(_LI("Snapshot creation %(cloneName)s completed. " - "Source Volume: %(sourceName)s."), + LOG.info("Snapshot creation %(cloneName)s completed. " + "Source Volume: %(sourceName)s.", {'cloneName': cloneName, 'sourceName': sourceName}) @@ -4246,8 +4245,8 @@ class VMAXCommon(object): if mvInstanceName is not None: targetWwns = self.masking.get_target_wwns( self.conn, mvInstanceName) - LOG.info(_LI("Target wwns in masking view %(maskingView)s: " - "%(targetWwns)s."), + LOG.info("Target wwns in masking view %(maskingView)s: " + "%(targetWwns)s.", {'maskingView': mvInstanceName, 'targetWwns': six.text_type(targetWwns)}) return targetWwns @@ -4347,9 +4346,9 @@ class VMAXCommon(object): sourceInstance, extraSpecs, targetInstance, rsdInstance, copyState)) except Exception: - LOG.warning(_LW( + LOG.warning( "Clone failed on V3. Cleaning up the target volume. " - "Clone name: %(cloneName)s "), + "Clone name: %(cloneName)s ", {'cloneName': cloneName}) if targetInstance: self._cleanup_target( @@ -4361,7 +4360,7 @@ class VMAXCommon(object): self.conn, job['Job']) targetVolumeInstance = ( self.provisionv3.get_volume_from_job(self.conn, job['Job'])) - LOG.info(_LI("The target instance device id is: %(deviceid)s."), + LOG.info("The target instance device id is: %(deviceid)s.", {'deviceid': targetVolumeInstance['DeviceID']}) if not isSnapshot: @@ -4426,7 +4425,7 @@ class VMAXCommon(object): replicationService, six.text_type(cgsnapshot['id'])) if cgInstanceName is None: - LOG.error(_LE("Cannot find CG group %(cgName)s."), + LOG.error("Cannot find CG group %(cgName)s.", {'cgName': cgsnapshot['id']}) modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED} return modelUpdate, [] @@ -4579,8 +4578,8 @@ class VMAXCommon(object): # Manage existing volume is not supported if fast enabled. if extraSpecs[FASTPOLICY]: - LOG.warning(_LW( - "FAST is enabled. Policy: %(fastPolicyName)s."), + LOG.warning( + "FAST is enabled. Policy: %(fastPolicyName)s.", {'fastPolicyName': extraSpecs[FASTPOLICY]}) exceptionMessage = (_( "Manage volume is not supported if FAST is enable. " @@ -4743,8 +4742,8 @@ class VMAXCommon(object): :param remove_volumes: the volumes uuids you want to remove from the CG """ - LOG.info(_LI("Update Consistency Group: %(group)s. " - "This adds and/or removes volumes from a CG."), + LOG.info("Update Consistency Group: %(group)s. " + "This adds and/or removes volumes from a CG.", {'group': group['id']}) modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} @@ -4780,7 +4779,7 @@ class VMAXCommon(object): except exception.ConsistencyGroupNotFound: raise except Exception as ex: - LOG.error(_LE("Exception: %(ex)s"), {'ex': ex}) + LOG.error("Exception: %(ex)s", {'ex': ex}) exceptionMessage = (_("Failed to update consistency group:" " %(cgName)s.") % {'cgName': group['id']}) @@ -4799,7 +4798,7 @@ class VMAXCommon(object): for volume in volumes: volumeInstance = self._find_lun(volume) if volumeInstance is None: - LOG.error(_LE("Volume %(name)s not found on the array."), + LOG.error("Volume %(name)s not found on the array.", {'name': volume['name']}) else: volumeInstanceNames.append(volumeInstance.path) @@ -5136,14 +5135,14 @@ class VMAXCommon(object): extraSpecsDictList = [] isV3 = False - if isinstance(group, Group): + if isinstance(group, group_obj.Group): for volume_type in group.volume_types: extraSpecsDict, storageSystems, isV3 = ( self._update_extra_specs_list( volume_type.extra_specs, len(group.volume_types), volume_type.id)) extraSpecsDictList.append(extraSpecsDict) - elif isinstance(group, ConsistencyGroup): + elif isinstance(group, cg_obj.ConsistencyGroup): volumeTypeIds = group.volume_type_id.split(",") volumeTypeIds = list(filter(None, volumeTypeIds)) for volumeTypeId in volumeTypeIds: @@ -5321,7 +5320,7 @@ class VMAXCommon(object): sourceVolume, sourceInstance, targetInstance, extraSpecs, self.rep_config) - LOG.info(_LI('Successfully setup replication for %s.'), + LOG.info('Successfully setup replication for %s.', sourceVolume['name']) replication_status = REPLICATION_ENABLED replication_driver_data = rdfDict['keybindings'] @@ -5378,19 +5377,19 @@ class VMAXCommon(object): self._cleanup_remote_target( conn, repServiceInstanceName, sourceInstance, targetInstance, extraSpecs, repExtraSpecs) - LOG.info(_LI('Successfully destroyed replication for ' - 'volume: %(volume)s'), + LOG.info('Successfully destroyed replication for ' + 'volume: %(volume)s', {'volume': volumeName}) else: - LOG.warning(_LW('Replication target not found for ' - 'replication-enabled volume: %(volume)s'), + LOG.warning('Replication target not found for ' + 'replication-enabled volume: %(volume)s', {'volume': volumeName}) except Exception as e: - LOG.error(_LE('Cannot get necessary information to cleanup ' - 'replication target for volume: %(volume)s. ' - 'The exception received was: %(e)s. Manual ' - 'clean-up may be required. Please contact ' - 'your administrator.'), + LOG.error('Cannot get necessary information to cleanup ' + 'replication target for volume: %(volume)s. ' + 'The exception received was: %(e)s. Manual ' + 'clean-up may be required. Please contact ' + 'your administrator.', {'volume': volumeName, 'e': e}) def _cleanup_remote_target( @@ -5438,9 +5437,9 @@ class VMAXCommon(object): :param volumeDict: the source volume dictionary :param extraSpecs: the extra specifications """ - LOG.warning(_LW( + LOG.warning( "Replication failed. Cleaning up the source volume. " - "Volume name: %(sourceName)s "), + "Volume name: %(sourceName)s.", {'sourceName': volumeName}) sourceInstance = self.utils.find_volume_instance( conn, volumeDict, volumeName) @@ -5484,11 +5483,11 @@ class VMAXCommon(object): repServiceInstanceName = self.utils.find_replication_service( conn, storageSystem) RDFGroupName = self.rep_config['rdf_group_label'] - LOG.info(_LI("Replication group: %(RDFGroup)s."), + LOG.info("Replication group: %(RDFGroup)s.", {'RDFGroup': RDFGroupName}) rdfGroupInstance = self.provisionv3.get_rdf_group_instance( conn, repServiceInstanceName, RDFGroupName) - LOG.info(_LI("Found RDF group instance: %(RDFGroup)s."), + LOG.info("Found RDF group instance: %(RDFGroup)s.", {'RDFGroup': rdfGroupInstance}) if rdfGroupInstance is None: exception_message = (_("Cannot find replication group: " @@ -5597,11 +5596,10 @@ class VMAXCommon(object): rep_data = six.text_type(replication_driver_data) except Exception as ex: - msg = _LE( + LOG.error( 'Failed to failover volume %(volume_id)s. ' - 'Error: %(error)s.') - LOG.error(msg, {'volume_id': vol['id'], - 'error': ex}, ) + 'Error: %(error)s.', + {'volume_id': vol['id'], 'error': ex}) new_status = FAILOVER_ERROR model_update = {'volume_id': vol['id'], @@ -5628,7 +5626,7 @@ class VMAXCommon(object): recovery = self.recover_volumes_on_failback(volume) volume_update_list.append(recovery) - LOG.info(_LI("Failover host complete")) + LOG.info("Failover host complete") return secondary_id, volume_update_list @@ -5733,24 +5731,24 @@ class VMAXCommon(object): targetVolumeInstance, volumeName, repExtraSpecs, None, False) - LOG.info(_LI("Breaking replication relationship...")) + LOG.info("Breaking replication relationship...") self.break_rdf_relationship( self.conn, repServiceInstanceName, storageSynchronizationSv, extraSpecs) # extend the source volume - LOG.info(_LI("Extending source volume...")) + LOG.info("Extending source volume...") rc, volumeDict = self._extend_v3_volume( volumeInstance, volumeName, newSize, extraSpecs) # extend the target volume - LOG.info(_LI("Extending target volume...")) + LOG.info("Extending target volume...") self._extend_v3_volume(targetVolumeInstance, volumeName, newSize, repExtraSpecs) # re-create replication relationship - LOG.info(_LI("Recreating replication relationship...")) + LOG.info("Recreating replication relationship...") self.setup_volume_replication( self.conn, volume, volumeDict, extraSpecs, targetVolumeInstance) @@ -5826,9 +5824,9 @@ class VMAXCommon(object): except Exception as e: LOG.warning( - _LW("Remote replication failed. Cleaning up the target " - "volume and returning source volume to default storage " - "group. Volume name: %(cloneName)s "), + "Remote replication failed. Cleaning up the target " + "volume and returning source volume to default storage " + "group. Volume name: %(cloneName)s ", {'cloneName': volumeName}) self._cleanup_remote_target( @@ -5958,10 +5956,10 @@ class VMAXCommon(object): extraSpecs[WORKLOAD]) except Exception: LOG.warning( - _LW("The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. Not assigning any SLO or " - "workload."), + "The target array does not support the storage " + "pool setting for SLO %(slo)s or workload " + "%(workload)s. Not assigning any SLO or " + "workload.", {'slo': extraSpecs[SLO], 'workload': extraSpecs[WORKLOAD]}) repExtraSpecs[SLO] = None @@ -5969,9 +5967,9 @@ class VMAXCommon(object): repExtraSpecs[WORKLOAD] = None else: - LOG.warning(_LW("Cannot determine storage pool settings of " - "target array. Not assigning any SLO or " - "workload")) + LOG.warning("Cannot determine storage pool settings of " + "target array. Not assigning any SLO or " + "workload") repExtraSpecs[SLO] = None if extraSpecs[WORKLOAD]: repExtraSpecs[WORKLOAD] = None @@ -6004,9 +6002,9 @@ class VMAXCommon(object): arrayInfo['Workload']) except Exception: LOG.info( - _LI("The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. SLO stats will not be reported."), + "The target array does not support the storage " + "pool setting for SLO %(slo)s or workload " + "%(workload)s. SLO stats will not be reported.", {'slo': arrayInfo['SLO'], 'workload': arrayInfo['Workload']}) secondaryInfo['SLO'] = None @@ -6016,8 +6014,8 @@ class VMAXCommon(object): self.multiPoolSupportEnabled = False else: - LOG.info(_LI("Cannot determine storage pool settings of " - "target array. SLO stats will not be reported.")) + LOG.info("Cannot determine storage pool settings of " + "target array. SLO stats will not be reported.") secondaryInfo['SLO'] = None if arrayInfo['Workload']: secondaryInfo['Workload'] = None diff --git a/cinder/volume/drivers/dell_emc/vmax/fast.py b/cinder/volume/drivers/dell_emc/vmax/fast.py index 3fa04488f9c..5c8f50600cb 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fast.py +++ b/cinder/volume/drivers/dell_emc/vmax/fast.py @@ -16,7 +16,7 @@ from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import utils @@ -50,12 +50,11 @@ class VMAXFast(object): isTieringPolicySupported = self.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) if isTieringPolicySupported is None: - LOG.error(_LE("Cannot determine whether " - "Tiering Policy is supported on this array.")) + LOG.error("Cannot determine whether " + "Tiering Policy is supported on this array.") if isTieringPolicySupported is False: - LOG.error(_LE("Tiering Policy is not " - "supported on this array.")) + LOG.error("Tiering Policy is not supported on this array.") return isTieringPolicySupported def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName): @@ -87,8 +86,8 @@ class VMAXFast(object): break if foundIsSupportsTieringPolicies is None: - LOG.error(_LE("Cannot determine if Tiering Policies " - "are supported.")) + LOG.error("Cannot determine if Tiering Policies " + "are supported.") return foundIsSupportsTieringPolicies @@ -113,8 +112,7 @@ class VMAXFast(object): conn, controllerConfigService) if not self._check_if_fast_supported(conn, storageSystemInstanceName): - LOG.error(_LE( - "FAST is not supported on this array.")) + LOG.error("FAST is not supported on this array.") raise defaultSgName = self.format_default_sg_string(fastPolicyName) @@ -127,9 +125,9 @@ class VMAXFast(object): controllerConfigService, defaultSgName)) if defaultStorageGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Unable to find default storage group " - "for FAST policy : %(fastPolicyName)s."), + "for FAST policy : %(fastPolicyName)s.", {'fastPolicyName': fastPolicyName}) raise @@ -137,9 +135,9 @@ class VMAXFast(object): foundDefaultStorageGroupInstanceName = ( assocStorageGroupInstanceName) else: - LOG.warning(_LW( + LOG.warning( "Volume: %(volumeName)s Does not belong " - "to storage group %(defaultSgName)s."), + "to storage group %(defaultSgName)s.", {'volumeName': volumeName, 'defaultSgName': defaultSgName}) return foundDefaultStorageGroupInstanceName, defaultSgName @@ -177,8 +175,8 @@ class VMAXFast(object): storageGroupInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigService, defaultSgName) if storageGroupInstanceName is None: - LOG.error(_LE( - "Unable to get default storage group %(defaultSgName)s."), + LOG.error( + "Unable to get default storage group %(defaultSgName)s.", {'defaultSgName': defaultSgName}) return failedRet @@ -214,9 +212,9 @@ class VMAXFast(object): firstVolumeInstance = self._create_volume_for_default_volume_group( conn, controllerConfigService, volumeInstance.path, extraSpecs) if firstVolumeInstance is None: - LOG.error(_LE( + LOG.error( "Failed to create a first volume for storage " - "group : %(storageGroupName)s."), + "group : %(storageGroupName)s.", {'storageGroupName': storageGroupName}) return failedRet @@ -225,9 +223,9 @@ class VMAXFast(object): conn, controllerConfigService, storageGroupName, firstVolumeInstance.path, extraSpecs)) if defaultStorageGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Failed to create default storage group for " - "FAST policy : %(fastPolicyName)s."), + "FAST policy : %(fastPolicyName)s.", {'fastPolicyName': fastPolicyName}) return failedRet @@ -240,9 +238,9 @@ class VMAXFast(object): tierPolicyRuleInstanceName = self._get_service_level_tier_policy( conn, tierPolicyServiceInstanceName, fastPolicyName) if tierPolicyRuleInstanceName is None: - LOG.error(_LE( + LOG.error( "Unable to get policy rule for fast policy: " - "%(fastPolicyName)s."), + "%(fastPolicyName)s.", {'fastPolicyName': fastPolicyName}) return failedRet @@ -280,7 +278,7 @@ class VMAXFast(object): poolInstanceName = self.utils.get_assoc_pool_from_volume( conn, volumeInstanceName) if poolInstanceName is None: - LOG.error(_LE("Unable to get associated pool of volume.")) + LOG.error("Unable to get associated pool of volume.") return failedRet volumeName = 'vol1' @@ -408,8 +406,8 @@ class VMAXFast(object): if len(storageTierInstanceNames) == 0: storageTierInstanceNames = None - LOG.warning(_LW( - "Unable to get storage tiers from tier policy rule.")) + LOG.warning( + "Unable to get storage tiers from tier policy rule.") return storageTierInstanceNames @@ -503,8 +501,8 @@ class VMAXFast(object): tierPolicyRuleInstanceName = self._get_service_level_tier_policy( conn, tierPolicyServiceInstanceName, fastPolicyName) if tierPolicyRuleInstanceName is None: - LOG.error(_LE( - "Cannot find the fast policy %(fastPolicyName)s."), + LOG.error( + "Cannot find the fast policy %(fastPolicyName)s.", {'fastPolicyName': fastPolicyName}) return failedRet else: @@ -521,9 +519,9 @@ class VMAXFast(object): storageGroupInstanceName, tierPolicyRuleInstanceName, storageGroupName, fastPolicyName, extraSpecs) except Exception: - LOG.exception(_LE( + LOG.exception( "Failed to add storage group %(storageGroupInstanceName)s " - "to tier policy rule %(tierPolicyRuleInstanceName)s."), + "to tier policy rule %(tierPolicyRuleInstanceName)s.", {'storageGroupInstanceName': storageGroupInstanceName, 'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName}) return failedRet @@ -588,15 +586,15 @@ class VMAXFast(object): rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: - LOG.error(_LE("Error disassociating storage group from " - "policy: %s."), errordesc) + LOG.error("Error disassociating storage group from " + "policy: %s.", errordesc) else: LOG.debug("Disassociated storage group from policy.") else: LOG.debug("ModifyStorageTierPolicyRule completed.") except Exception as e: - LOG.info(_LI("Storage group not associated with the " - "policy. Exception is %s."), e) + LOG.info("Storage group not associated with the " + "policy. Exception is %s.", e) def get_pool_associated_to_policy( self, conn, fastPolicyName, arraySN, @@ -664,7 +662,7 @@ class VMAXFast(object): isTieringPolicySupported = self.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) except Exception as e: - LOG.error(_LE("Exception: %s."), e) + LOG.error("Exception: %s.", e) return False return isTieringPolicySupported diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index 1abadf546fa..4e8dc910bec 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -18,7 +18,6 @@ import ast from oslo_log import log as logging import six -from cinder.i18n import _LW from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell_emc.vmax import common @@ -274,7 +273,7 @@ class VMAXFCDriver(driver.FibreChannelDriver): 'target_wwns': target_wwns, 'init_targ_map': init_targ_map} else: - LOG.warning(_LW("Volume %(volume)s is not in any masking view."), + LOG.warning("Volume %(volume)s is not in any masking view.", {'volume': volume['name']}) return zoning_mappings diff --git a/cinder/volume/drivers/dell_emc/vmax/https.py b/cinder/volume/drivers/dell_emc/vmax/https.py index aa9584f4770..6c478c671d6 100644 --- a/cinder/volume/drivers/dell_emc/vmax/https.py +++ b/cinder/volume/drivers/dell_emc/vmax/https.py @@ -30,7 +30,7 @@ import six from six.moves import http_client from six.moves import urllib -from cinder.i18n import _, _LI +from cinder.i18n import _ # Handle case where we are running in a monkey patched environment if OpenSSL and patcher.is_monkey_patched('socket'): @@ -94,9 +94,9 @@ class HTTPSConnection(http_client.HTTPSConnection): def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, ca_certs=None, no_verification=False): if not pywbemAvailable: - LOG.info(_LI( + LOG.info( 'Module PyWBEM not installed. ' - 'Install PyWBEM using the python-pywbem package.')) + 'Install PyWBEM using the python-pywbem package.') if six.PY3: excp_lst = (TypeError, ssl.SSLError) else: diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index 77cceb1e31f..d389a6353f7 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -20,7 +20,7 @@ from oslo_log import log as logging import six from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell_emc.vmax import common @@ -209,7 +209,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): iscsi_properties = self.smis_get_iscsi_properties( volume, connector, ip_and_iqn, is_multipath) - LOG.info(_LI("iSCSI properties are: %s"), iscsi_properties) + LOG.info("iSCSI properties are: %s", iscsi_properties) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties @@ -246,7 +246,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): isError = True if isError: - LOG.error(_LE("Unable to get the lun id")) + LOG.error("Unable to get the lun id") exception_message = (_("Cannot find device number for volume " "%(volumeName)s.") % {'volumeName': volume['name']}) @@ -265,15 +265,14 @@ class VMAXISCSIDriver(driver.ISCSIDriver): properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] - LOG.info(_LI( - "ISCSI properties: %(properties)s."), {'properties': properties}) - LOG.info(_LI( - "ISCSI volume is: %(volume)s."), {'volume': volume}) + LOG.info( + "ISCSI properties: %(properties)s.", {'properties': properties}) + LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume}) if 'provider_auth' in volume: auth = volume['provider_auth'] - LOG.info(_LI( - "AUTH properties: %(authProps)s."), {'authProps': auth}) + LOG.info( + "AUTH properties: %(authProps)s.", {'authProps': auth}) if auth is not None: (auth_method, auth_username, auth_secret) = auth.split() @@ -282,7 +281,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver): properties['auth_username'] = auth_username properties['auth_password'] = auth_secret - LOG.info(_LI("AUTH properties: %s."), properties) + LOG.info("AUTH properties: %s.", properties) return properties diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index 1801a7bbf28..fc60e2687d0 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -18,7 +18,7 @@ import six from cinder import coordination from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vmax import fast from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import provision_v3 @@ -125,10 +125,10 @@ class VMAXMasking(object): {'maskingViewInstanceName': maskingViewInstanceName, 'storageGroupInstanceName': storageGroupInstanceName}) except Exception as e: - LOG.exception(_LE( + LOG.exception( "Masking View creation or retrieval was not successful " "for masking view %(maskingViewName)s. " - "Attempting rollback."), + "Attempting rollback.", {'maskingViewName': maskingViewDict['maskingViewName']}) errorMessage = e @@ -225,9 +225,9 @@ class VMAXMasking(object): volumeName, maskingviewdict, defaultStorageGroupInstanceName) else: - LOG.warning(_LW( + LOG.warning( "Volume: %(volumeName)s does not belong " - "to storage group %(defaultSgGroupName)s."), + "to storage group %(defaultSgGroupName)s.", {'volumeName': volumeName, 'defaultSgGroupName': defaultSgGroupName}) return defaultStorageGroupInstanceName @@ -283,8 +283,7 @@ class VMAXMasking(object): storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] pgGroupName = maskingViewDict['pgGroupName'] - LOG.info(_LI("Returning random Port Group: " - "%(portGroupName)s."), + LOG.info("Returning random Port Group: %(portGroupName)s.", {'portGroupName': pgGroupName}) storageGroupInstanceName, errorMessage = ( @@ -376,7 +375,7 @@ class VMAXMasking(object): self._get_storage_group_instance_name( conn, maskingViewDict, storageGroupInstanceName)) if storageGroupInstanceName is None: - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Cannot get or create a storage group: %(sgGroupName)s" " for volume %(volumeName)s ") % @@ -404,7 +403,7 @@ class VMAXMasking(object): conn, maskingViewInstanceName)) if sgFromMvInstanceName is None: - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Cannot get storage group: %(sgGroupName)s " "from masking view %(maskingViewInstanceName)s. ") % @@ -427,7 +426,7 @@ class VMAXMasking(object): portGroupInstanceName = self._get_port_group_instance_name( conn, controllerConfigService, pgGroupName) if portGroupInstanceName is None: - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Cannot get port group: %(pgGroupName)s. ") % {'pgGroupName': pgGroupName}) @@ -455,7 +454,7 @@ class VMAXMasking(object): conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if initiatorGroupInstanceName is None: - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Cannot get or create initiator group: " "%(igGroupName)s. ") % @@ -486,7 +485,7 @@ class VMAXMasking(object): conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Unable to verify initiator group: %(igGroupName)s " "in masking view %(maskingViewName)s. ") % @@ -518,7 +517,7 @@ class VMAXMasking(object): storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) if maskingViewInstanceName is None: - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Cannot create masking view: %(maskingViewName)s. ") % {'maskingViewName': maskingViewName}) @@ -543,9 +542,9 @@ class VMAXMasking(object): if self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): - LOG.warning(_LW( + LOG.warning( "Volume: %(volumeName)s is already part " - "of storage group %(sgGroupName)s."), + "of storage group %(sgGroupName)s.", {'volumeName': volumeName, 'sgGroupName': sgGroupName}) else: @@ -576,7 +575,7 @@ class VMAXMasking(object): volumeInstance, volumeName, sgGroupName, extraSpecs) if not self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): - # This may be used in exception hence _ instead of _LE. + # This may be used in exception hence the use of _. msg = (_( "Volume: %(volumeName)s was not added " "to storage group %(sgGroupName)s.") % @@ -584,8 +583,7 @@ class VMAXMasking(object): 'sgGroupName': sgGroupName}) LOG.error(msg) else: - LOG.info(_LI("Successfully added %(volumeName)s to " - "%(sgGroupName)s."), + LOG.info("Successfully added %(volumeName)s to %(sgGroupName)s.", {'volumeName': volumeName, 'sgGroupName': sgGroupName}) return msg @@ -742,9 +740,9 @@ class VMAXMasking(object): conn, foundMaskingViewInstanceName) if instance is None: foundMaskingViewInstanceName = None - LOG.error(_LE( + LOG.error( "Looks like masking view: %(maskingViewName)s " - "has recently been deleted."), + "has recently been deleted.", {'maskingViewName': maskingViewName}) else: LOG.debug( @@ -800,21 +798,21 @@ class VMAXMasking(object): storageGroupName, fastPolicyName, maskingViewDict['extraSpecs'])) if assocTierPolicyInstanceName is None: - LOG.error(_LE( + LOG.error( "Cannot add and verify tier policy association for " "storage group : %(storageGroupName)s to " - "FAST policy : %(fastPolicyName)s."), + "FAST policy : %(fastPolicyName)s.", {'storageGroupName': storageGroupName, 'fastPolicyName': fastPolicyName}) return failedRet if foundStorageGroupInstanceName is None: - LOG.error(_LE( - "Cannot get storage Group from job : %(storageGroupName)s."), + LOG.error( + "Cannot get storage Group from job : %(storageGroupName)s.", {'storageGroupName': storageGroupName}) return failedRet else: - LOG.info(_LI( - "Created new storage group: %(storageGroupName)s."), + LOG.info( + "Created new storage group: %(storageGroupName)s.", {'storageGroupName': storageGroupName}) return foundStorageGroupInstanceName @@ -843,9 +841,9 @@ class VMAXMasking(object): break if foundPortGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Could not find port group : %(portGroupName)s. Check that " - "the EMC configuration file has the correct port group name."), + "the EMC configuration file has the correct port group name.", {'portGroupName': portGroupName}) return foundPortGroupInstanceName @@ -886,9 +884,9 @@ class VMAXMasking(object): self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: - LOG.info(_LI( + LOG.info( "Initiator Name(s) %(initiatorNames)s are not on array " - "%(storageSystemName)s."), + "%(storageSystemName)s.", {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( @@ -905,15 +903,13 @@ class VMAXMasking(object): conn, controllerConfigService, igGroupName, storageHardwareIDInstanceNames, extraSpecs) - LOG.info(_LI( - "Created new initiator group name: %(igGroupName)s."), - {'igGroupName': igGroupName}) + LOG.info("Created new initiator group name: %(igGroupName)s.", + {'igGroupName': igGroupName}) else: initiatorGroupInstance = conn.GetInstance( foundInitiatorGroupInstanceName, LocalOnly=False) - LOG.info(_LI( - "Using existing initiator group name: %(igGroupName)s."), - {'igGroupName': initiatorGroupInstance['ElementName']}) + LOG.info("Using existing initiator group name: %(igGroupName)s.", + {'igGroupName': initiatorGroupInstance['ElementName']}) return foundInitiatorGroupInstanceName @@ -1100,9 +1096,8 @@ class VMAXMasking(object): raise exception.VolumeBackendAPIException( data=exceptionMessage) - LOG.info(_LI( - "Created new masking view : %(maskingViewName)s."), - {'maskingViewName': maskingViewName}) + LOG.info("Created new masking view : %(maskingViewName)s.", + {'maskingViewName': maskingViewName}) return rc, job def find_new_masking_view(self, conn, jobDict): @@ -1148,7 +1143,7 @@ class VMAXMasking(object): {'view': maskingViewName, 'masking': foundStorageGroupInstanceName}) else: - LOG.warning(_LW("Unable to find Masking view: %(view)s."), + LOG.warning("Unable to find Masking view: %(view)s.", {'view': maskingViewName}) return foundStorageGroupInstanceName @@ -1221,14 +1216,14 @@ class VMAXMasking(object): foundPortGroupInstanceName = self.find_port_group( conn, controllerConfigService, pgGroupName) if foundPortGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Cannot find a portGroup with name %(pgGroupName)s. " - "The port group for a masking view must be pre-defined."), + "The port group for a masking view must be pre-defined.", {'pgGroupName': pgGroupName}) return foundPortGroupInstanceName - LOG.info(_LI( - "Port group instance name is %(foundPortGroupInstanceName)s."), + LOG.info( + "Port group instance name is %(foundPortGroupInstanceName)s.", {'foundPortGroupInstanceName': foundPortGroupInstanceName}) return foundPortGroupInstanceName @@ -1250,10 +1245,9 @@ class VMAXMasking(object): conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if foundInitiatorGroupInstanceName is None: - LOG.error(_LE( - "Cannot create or find an initiator group with " - "name %(igGroupName)s."), - {'igGroupName': igGroupName}) + LOG.error("Cannot create or find an initiator group with " + "name %(igGroupName)s.", + {'igGroupName': igGroupName}) return foundInitiatorGroupInstanceName def _get_masking_view_instance_name( @@ -1278,9 +1272,9 @@ class VMAXMasking(object): initiatorGroupInstanceName, extraSpecs)) foundMaskingViewInstanceName = self.find_new_masking_view(conn, job) if foundMaskingViewInstanceName is None: - LOG.error(_LE( + LOG.error( "Cannot find the new masking view just created with name " - "%(maskingViewName)s."), + "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) return foundMaskingViewInstanceName @@ -1324,11 +1318,11 @@ class VMAXMasking(object): LOG.error(errorMessage) message = (_("V3 rollback")) else: - LOG.warning(_LW( + LOG.warning( "No storage group found. " "Performing rollback on Volume: %(volumeName)s " "To return it to the default storage group for FAST " - "policy %(fastPolicyName)s."), + "policy %(fastPolicyName)s.", {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) assocDefaultStorageGroupName = ( @@ -1341,22 +1335,21 @@ class VMAXMasking(object): rollbackDict['fastPolicyName'], rollbackDict['extraSpecs'])) if assocDefaultStorageGroupName is None: - LOG.error(_LE( + LOG.error( "Failed to Roll back to re-add volume " "%(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s: Please contact your sys " - "admin to get the volume re-added manually."), + "admin to get the volume re-added manually.", {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) message = (_("V2 rollback, volume is not in any storage " "group.")) else: - LOG.info(_LI( - "The storage group found is " - "%(foundStorageGroupInstanceName)s."), - {'foundStorageGroupInstanceName': - foundStorageGroupInstanceName}) + LOG.info("The storage group found is " + "%(foundStorageGroupInstanceName)s.", + {'foundStorageGroupInstanceName': + foundStorageGroupInstanceName}) # Check the name, see if it is the default storage group # or another. @@ -1422,7 +1415,7 @@ class VMAXMasking(object): {'view': maskingViewName, 'masking': foundInitiatorMaskingGroupInstanceName}) else: - LOG.warning(_LW("Unable to find Masking view: %(view)s."), + LOG.warning("Unable to find Masking view: %(view)s.", {'view': maskingViewName}) return foundInitiatorMaskingGroupInstanceName @@ -1471,18 +1464,18 @@ class VMAXMasking(object): self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: - LOG.info(_LI( + LOG.info( "Initiator Name(s) %(initiatorNames)s are not on " - "array %(storageSystemName)s. "), + "array %(storageSystemName)s.", {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( self._create_hardware_ids(conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: - LOG.error(_LE( + LOG.error( "Failed to create hardware id(s) on " - "%(storageSystemName)s."), + "%(storageSystemName)s.", {'storageSystemName': storageSystemName}) return False @@ -1532,11 +1525,11 @@ class VMAXMasking(object): "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) else: - LOG.error(_LE( + LOG.error( "One of the components of the original masking view " "%(maskingViewName)s cannot be retrieved so " "please contact your system administrator to check " - "that the correct initiator(s) are part of masking."), + "that the correct initiator(s) are part of masking.", {'maskingViewName': maskingViewName}) return False return True @@ -1708,9 +1701,9 @@ class VMAXMasking(object): conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) - LOG.info(_LI( + LOG.info( "Added volume: %(volumeName)s to existing storage group " - "%(sgGroupName)s."), + "%(sgGroupName)s.", {'volumeName': volumeName, 'sgGroupName': sgGroupName}) @@ -1737,9 +1730,9 @@ class VMAXMasking(object): volumeName, fastPolicyName)) if defaultStorageGroupInstanceName is None: - LOG.warning(_LW( + LOG.warning( "Volume %(volumeName)s was not first part of the default " - "storage group for the FAST Policy."), + "storage group for the FAST Policy.", {'volumeName': volumeName}) return failedRet @@ -1775,9 +1768,9 @@ class VMAXMasking(object): defaultSgName)) if emptyStorageGroupInstanceName is not None: - LOG.error(_LE( + LOG.error( "Failed to remove %(volumeName)s from the default storage " - "group for the FAST Policy."), + "group for the FAST Policy.", {'volumeName': volumeName}) return failedRet @@ -1833,7 +1826,7 @@ class VMAXMasking(object): if len(maskingGroupInstanceNames) > 0: return maskingGroupInstanceNames else: - LOG.info(_LI("Volume %(volumeName)s not in any storage group."), + LOG.info("Volume %(volumeName)s not in any storage group.", {'volumeName': volumeInstanceName}) return None @@ -1870,7 +1863,7 @@ class VMAXMasking(object): storageGroupInstanceName, volumeInstance, extraSpecs) else: - LOG.warning(_LW("Cannot get storage from connector.")) + LOG.warning("Cannot get storage from connector.") if reset: self._return_back_to_default_sg( @@ -1895,8 +1888,8 @@ class VMAXMasking(object): if storageGroupInstanceNames: sgNum = len(storageGroupInstanceNames) if len(storageGroupInstanceNames) > 1: - LOG.warning(_LW("Volume %(volumeName)s is belong to " - "%(sgNum)s storage groups."), + LOG.warning("Volume %(volumeName)s is belong to %(sgNum)s " + "storage groups.", {'volumeName': volumeInstance['ElementName'], 'sgNum': sgNum}) for storageGroupInstanceName in storageGroupInstanceNames: @@ -2237,8 +2230,8 @@ class VMAXMasking(object): raise exception.VolumeBackendAPIException( data=exceptionMessage) else: - LOG.info(_LI( - "Masking view %(maskingViewName)s successfully deleted."), + LOG.info( + "Masking view %(maskingViewName)s successfully deleted.", {'maskingViewName': maskingViewName}) def _get_and_remove_rule_association( @@ -2355,8 +2348,8 @@ class VMAXMasking(object): ResultClass='Symm_FCSCSIProtocolEndpoint') numberOfPorts = len(targetPortInstanceNames) if numberOfPorts <= 0: - LOG.warning(_LW("No target ports found in " - "masking view %(maskingView)s."), + LOG.warning("No target ports found in " + "masking view %(maskingView)s.", {'numPorts': len(targetPortInstanceNames), 'maskingView': mvInstanceName}) for targetPortInstanceName in targetPortInstanceNames: @@ -2425,7 +2418,7 @@ class VMAXMasking(object): 'mv': maskingViewInstanceName}) return portGroupInstanceNames[0] else: - LOG.warning(_LW("No port group found in masking view %(mv)s."), + LOG.warning("No port group found in masking view %(mv)s.", {'mv': maskingViewInstanceName}) def get_initiator_group_from_masking_view( @@ -2444,8 +2437,8 @@ class VMAXMasking(object): 'mv': maskingViewInstanceName}) return initiatorGroupInstanceNames[0] else: - LOG.warning(_LW("No Initiator group found in masking view " - "%(mv)s."), {'mv': maskingViewInstanceName}) + LOG.warning("No Initiator group found in masking view " + "%(mv)s.", {'mv': maskingViewInstanceName}) def _get_sg_or_mv_associated_with_initiator( self, conn, controllerConfigService, volumeInstanceName, @@ -2656,8 +2649,8 @@ class VMAXMasking(object): LOG.debug("Deletion of initiator path %(hardwareIdPath)s " "is successful.", {'hardwareIdPath': hardwareIdPath}) else: - LOG.warning(_LW("Deletion of initiator path %(hardwareIdPath)s " - "is failed."), {'hardwareIdPath': hardwareIdPath}) + LOG.warning("Deletion of initiator path %(hardwareIdPath)s " + "is failed.", {'hardwareIdPath': hardwareIdPath}) def _delete_initiators_from_initiator_group(self, conn, controllerConfigService, @@ -2740,16 +2733,16 @@ class VMAXMasking(object): initiatorGroupInstanceName, initiatorGroupName, extraSpecs) else: - LOG.warning(_LW("Initiator group %(initiatorGroupName)s is " - "associated with masking views and can't be " - "deleted. Number of associated masking view " - "is: %(nmv)d."), + LOG.warning("Initiator group %(initiatorGroupName)s is " + "associated with masking views and can't be " + "deleted. Number of associated masking view " + "is: %(nmv)d.", {'initiatorGroupName': initiatorGroupName, 'nmv': len(maskingViewInstanceNames)}) else: - LOG.warning(_LW("Initiator group %(initiatorGroupName)s was " - "not created by the VMAX driver so will " - "not be deleted by the VMAX driver."), + LOG.warning("Initiator group %(initiatorGroupName)s was " + "not created by the VMAX driver so will " + "not be deleted by the VMAX driver.", {'initiatorGroupName': initiatorGroupName}) def _create_hardware_ids( @@ -2793,9 +2786,9 @@ class VMAXMasking(object): self._get_port_group_from_masking_view( conn, maskingViewName, storageSystemName)) if portGroupInstanceName is None: - LOG.error(_LE( + LOG.error( "Cannot get port group from masking view: " - "%(maskingViewName)s. "), + "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) else: try: @@ -2804,8 +2797,8 @@ class VMAXMasking(object): portGroupName = ( portGroupInstance['ElementName']) except Exception: - LOG.error(_LE( - "Cannot get port group name.")) + LOG.error( + "Cannot get port group name.") return portGroupName, errorMessage @coordination.synchronized('emc-sg-' diff --git a/cinder/volume/drivers/dell_emc/vmax/provision_v3.py b/cinder/volume/drivers/dell_emc/vmax/provision_v3.py index 0be17ec3738..06cf39a35e4 100644 --- a/cinder/volume/drivers/dell_emc/vmax/provision_v3.py +++ b/cinder/volume/drivers/dell_emc/vmax/provision_v3.py @@ -20,7 +20,7 @@ import six from cinder import coordination from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vmax import utils LOG = logging.getLogger(__name__) @@ -515,9 +515,9 @@ class VMAXProvisionV3(object): rc, errordesc = self.utils.wait_for_job_complete( conn, job, extraSpecs) if rc != 0: - LOG.error(_LE( + LOG.error( "Error Create Group: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s."), + "Return code: %(rc)lu. Error: %(error)s.", {'groupName': groupName, 'rc': rc, 'error': errordesc}) @@ -863,11 +863,11 @@ class VMAXProvisionV3(object): remainingCapacityGb = remainingSLOCapacityGb wlpEnabled = True else: - LOG.warning(_LW( + LOG.warning( "Remaining capacity %(remainingCapacityGb)s " "GBs is determined from SRP pool capacity " "and not the SLO capacity. Performance may " - "not be what you expect."), + "not be what you expect.", {'remainingCapacityGb': remainingCapacityGb}) return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb, diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index 255da5507f9..e88f07c21ea 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -30,7 +30,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.objects import fields from cinder.volume import volume_types @@ -85,9 +85,9 @@ class VMAXUtils(object): def __init__(self, prtcl): if not pywbemAvailable: - LOG.info(_LI( + LOG.info( "Module PyWBEM not installed. " - "Install PyWBEM using the python-pywbem package.")) + "Install PyWBEM using the python-pywbem package.") self.protocol = prtcl def find_storage_configuration_service(self, conn, storageSystemName): @@ -319,9 +319,8 @@ class VMAXUtils(object): if retries > maxJobRetries: kwargs['rc'], kwargs['errordesc'] = ( self._verify_job_state(conn, job)) - LOG.error(_LE("_wait_for_job_complete " - "failed after %(retries)d " - "tries."), + LOG.error("_wait_for_job_complete failed after %(retries)d " + "tries.", {'retries': retries}) raise loopingcall.LoopingCallDone() @@ -457,8 +456,7 @@ class VMAXUtils(object): raise exception.VolumeBackendAPIException(exceptionMessage) if kwargs['retries'] > maxJobRetries: - LOG.error(_LE("_wait_for_sync failed after %(retries)d " - "tries."), + LOG.error("_wait_for_sync failed after %(retries)d tries.", {'retries': retries}) raise loopingcall.LoopingCallDone(retvalue=maxJobRetries) if kwargs['wait_for_sync_called']: @@ -526,7 +524,7 @@ class VMAXUtils(object): if len(groups) > 0: foundStorageSystemInstanceName = groups[0] else: - LOG.error(_LE("Cannot get storage system.")) + LOG.error("Cannot get storage system.") raise return foundStorageSystemInstanceName @@ -549,9 +547,9 @@ class VMAXUtils(object): ResultClass='CIM_DeviceMaskingGroup') if len(storageGroupInstanceNames) > 1: - LOG.info(_LI( + LOG.info( "The volume belongs to more than one storage group. " - "Returning storage group %(sgName)s."), + "Returning storage group %(sgName)s.", {'sgName': sgName}) for storageGroupInstanceName in storageGroupInstanceNames: instance = self.get_existing_instance( @@ -1001,9 +999,9 @@ class VMAXUtils(object): poolInstanceName = self.get_pool_by_name( conn, poolName, storageSystemName) if poolInstanceName is None: - LOG.error(_LE( + LOG.error( "Unable to retrieve pool instance of %(poolName)s on " - "array %(array)s."), + "array %(array)s.", {'poolName': poolName, 'array': storageSystemName}) return (0, 0) storagePoolInstance = conn.GetInstance( @@ -1241,7 +1239,7 @@ class VMAXUtils(object): infoDetail = host.split('@') storageSystem = 'SYMMETRIX+' + infoDetail[0] except Exception: - LOG.error(_LE("Error parsing array from host capabilities.")) + LOG.error("Error parsing array from host capabilities.") return storageSystem @@ -1292,15 +1290,15 @@ class VMAXUtils(object): if foundSyncInstanceName: # Wait for SE_StorageSynchronized_SV_SV to be fully synced. if waitforsync: - LOG.warning(_LW( + LOG.warning( "Expect a performance hit as volume is not fully " - "synced on %(deviceId)s."), + "synced on %(deviceId)s.", {'deviceId': volumeInstance['DeviceID']}) startTime = time.time() self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) - LOG.warning(_LW( + LOG.warning( "Synchronization process took " - "took: %(delta)s H:MM:SS."), + "took: %(delta)s H:MM:SS.", {'delta': self.get_time_delta(startTime, time.time())}) @@ -1336,9 +1334,9 @@ class VMAXUtils(object): break if foundSyncInstanceName is None: - LOG.warning(_LW( + LOG.warning( "Group sync name not found for target group %(target)s " - "on %(storageSystem)s."), + "on %(storageSystem)s.", {'target': targetRgInstanceName['InstanceID'], 'storageSystem': storageSystem}) else: @@ -1570,14 +1568,14 @@ class VMAXUtils(object): break if not isValidSLO: - LOG.error(_LE( + LOG.error( "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, " - "Gold, Platinum, Diamond, Optimized, NONE."), {'slo': slo}) + "Gold, Platinum, Diamond, Optimized, NONE.", {'slo': slo}) if not isValidWorkload: - LOG.error(_LE( + LOG.error( "Workload: %(workload)s is not valid. Valid values are " - "DSS_REP, DSS, OLTP, OLTP_REP, NONE."), {'workload': workload}) + "DSS_REP, DSS, OLTP, OLTP_REP, NONE.", {'workload': workload}) return isValidSLO, isValidWorkload @@ -1641,8 +1639,8 @@ class VMAXUtils(object): if len(metaHeads) > 0: metaHeadInstanceName = metaHeads[0] if metaHeadInstanceName is None: - LOG.info(_LI( - "Volume %(volume)s does not have meta device members."), + LOG.info( + "Volume %(volume)s does not have meta device members.", {'volume': volumeInstanceName}) return metaHeadInstanceName @@ -1714,7 +1712,7 @@ class VMAXUtils(object): instance = None else: # Something else that we cannot recover from has happened. - LOG.error(_LE("Exception: %s"), desc) + LOG.error("Exception: %s", desc) exceptionMessage = (_( "Cannot verify the existence of object:" "%(instanceName)s.") @@ -1806,8 +1804,8 @@ class VMAXUtils(object): {'initiator': initiator, 'rc': rc, 'ret': ret}) hardwareIdList = ret['HardwareID'] else: - LOG.warning(_LW("CreateStorageHardwareID failed. initiator: " - "%(initiator)s, rc=%(rc)d, ret=%(ret)s."), + LOG.warning("CreateStorageHardwareID failed. initiator: " + "%(initiator)s, rc=%(rc)d, ret=%(ret)s.", {'initiator': initiator, 'rc': rc, 'ret': ret}) return hardwareIdList @@ -1826,7 +1824,7 @@ class VMAXUtils(object): if 'iqn' in initiator.lower(): hardwareTypeId = 5 if hardwareTypeId == 0: - LOG.warning(_LW("Cannot determine the hardware type.")) + LOG.warning("Cannot determine the hardware type.") return hardwareTypeId def _process_tag(self, element, tagName): @@ -1976,15 +1974,15 @@ class VMAXUtils(object): portGroup = self._get_random_portgroup(dom) serialNumber = self._process_tag(dom, 'Array') if serialNumber is None: - LOG.error(_LE( + LOG.error( "Array Serial Number must be in the file " - "%(fileName)s."), + "%(fileName)s.", {'fileName': fileName}) poolName = self._process_tag(dom, 'Pool') if poolName is None: - LOG.error(_LE( + LOG.error( "PoolName must be in the file " - "%(fileName)s."), + "%(fileName)s.", {'fileName': fileName}) kwargs = self._fill_record( connargs, serialNumber, poolName, portGroup, dom) @@ -2024,9 +2022,8 @@ class VMAXUtils(object): % {'poolName': arrayInfoRec['PoolName'], 'array': arrayInfoRec['SerialNumber']}) if compString == pool: - LOG.info(_LI( - "The pool_name from extraSpecs is %(pool)s."), - {'pool': pool}) + LOG.info("The pool_name from extraSpecs is %(pool)s.", + {'pool': pool}) foundArrayInfoRec = arrayInfoRec break else: @@ -2284,9 +2281,9 @@ class VMAXUtils(object): break if foundSyncInstanceName is None: - LOG.info(_LI( + LOG.info( "No replication synchronization session found associated " - "with source volume %(source)s on %(storageSystem)s."), + "with source volume %(source)s on %(storageSystem)s.", {'source': sourceDeviceId, 'storageSystem': storageSystem}) return foundSyncInstanceName @@ -2301,16 +2298,13 @@ class VMAXUtils(object): :returns: volume_model_updates - updated volumes """ volume_model_updates = [] - LOG.info(_LI( - "Updating status for CG: %(id)s."), - {'id': cgId}) + LOG.info("Updaing status for CG: %(id)s.", {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume['id'], 'status': status}) else: - LOG.info(_LI("No volume found for CG: %(cg)s."), - {'cg': cgId}) + LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId}) return volume_model_updates def get_smi_version(self, conn): @@ -2612,7 +2606,7 @@ class VMAXUtils(object): try: max_subscription_percent_int = int(max_subscription_percent) except ValueError: - LOG.error(_LE("Cannot convert max subscription percent to int.")) + LOG.error("Cannot convert max subscription percent to int.") return None return float(max_subscription_percent_int) / 100 @@ -2969,14 +2963,14 @@ class VMAXUtils(object): if foundSyncInstanceName: # Wait for SE_StorageSynchronized_SV_SV to be fully synced. if waitforsync: - LOG.warning(_LW( + LOG.warning( "Expect a performance hit as volume is not not fully " - "synced on %(deviceId)s."), + "synced on %(deviceId)s.", {'deviceId': sourceInstance['DeviceID']}) startTime = time.time() self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) - LOG.warning(_LW( - "Synchronization process took: %(delta)s H:MM:SS."), + LOG.warning( + "Synchronization process took: %(delta)s H:MM:SS.", {'delta': self.get_time_delta(startTime, time.time())}) @@ -3011,8 +3005,8 @@ class VMAXUtils(object): extraSpecs[self.POOL] = poolDetails[2] extraSpecs[self.ARRAY] = poolDetails[3] except KeyError: - LOG.error(_LE("Error parsing SLO, workload from " - "the provided extra_specs.")) + LOG.error("Error parsing SLO, workload from " + "the provided extra_specs.") return extraSpecs def get_default_intervals_retries(self): diff --git a/cinder/volume/drivers/dell_emc/vnx/adapter.py b/cinder/volume/drivers/dell_emc/vnx/adapter.py index 39ad9de5c2d..fe3b2d5ae18 100644 --- a/cinder/volume/drivers/dell_emc/vnx/adapter.py +++ b/cinder/volume/drivers/dell_emc/vnx/adapter.py @@ -27,7 +27,7 @@ if storops: from storops import exception as storops_ex from cinder import exception -from cinder.i18n import _, _LI, _LE, _LW +from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common @@ -96,9 +96,9 @@ class CommonAdapter(object): # empty string. naviseccli_path = self.config.naviseccli_path if naviseccli_path is None or len(naviseccli_path.strip()) == 0: - LOG.warning(_LW('[%(group)s] naviseccli_path is not set or set to ' - 'an empty string. None will be passed into ' - 'storops.'), {'group': self.config.config_group}) + LOG.warning('[%(group)s] naviseccli_path is not set or set to ' + 'an empty string. None will be passed into ' + 'storops.', {'group': self.config.config_group}) self.config.naviseccli_path = None # Check option `storage_vnx_pool_names`. @@ -133,32 +133,32 @@ class CommonAdapter(object): self.config.io_port_list = io_port_list if self.config.ignore_pool_full_threshold: - LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. ' - 'LUN creation will still be forced even if the ' - 'pool full threshold is exceeded.'), + LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' + 'LUN creation will still be forced even if the ' + 'pool full threshold is exceeded.', {'group': self.config.config_group}) if self.config.destroy_empty_storage_group: - LOG.warning(_LW('[%(group)s] destroy_empty_storage_group: True. ' - 'Empty storage group will be deleted after volume ' - 'is detached.'), + LOG.warning('[%(group)s] destroy_empty_storage_group: True. ' + 'Empty storage group will be deleted after volume ' + 'is detached.', {'group': self.config.config_group}) if not self.config.initiator_auto_registration: - LOG.info(_LI('[%(group)s] initiator_auto_registration: False. ' - 'Initiator auto registration is not enabled. ' - 'Please register initiator manually.'), + LOG.info('[%(group)s] initiator_auto_registration: False. ' + 'Initiator auto registration is not enabled. ' + 'Please register initiator manually.', {'group': self.config.config_group}) if self.config.force_delete_lun_in_storagegroup: - LOG.warning(_LW( - '[%(group)s] force_delete_lun_in_storagegroup=True'), + LOG.warning( + '[%(group)s] force_delete_lun_in_storagegroup=True', {'group': self.config.config_group}) if self.config.ignore_pool_full_threshold: - LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. ' - 'LUN creation will still be forced even if the ' - 'pool full threshold is exceeded.'), + LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' + 'LUN creation will still be forced even if the ' + 'pool full threshold is exceeded.', {'group': self.config.config_group}) def _build_port_str(self, port): @@ -217,10 +217,10 @@ class CommonAdapter(object): tier = specs.tier volume_metadata['snapcopy'] = 'False' - LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s ' - 'pool: %(pool)s ' - 'provision: %(provision)s ' - 'tier: %(tier)s '), + LOG.info('Create Volume: %(volume)s Size: %(size)s ' + 'pool: %(pool)s ' + 'provision: %(provision)s ' + 'tier: %(tier)s ', {'volume': volume_name, 'size': volume_size, 'pool': pool, @@ -463,7 +463,7 @@ class CommonAdapter(object): model_update = {} volumes_model_update = [] model_update['status'] = group.status - LOG.info(_LI('Start to delete consistency group: %(cg_name)s'), + LOG.info('Start to delete consistency group: %(cg_name)s', {'cg_name': cg_name}) self.client.delete_consistency_group(cg_name) @@ -491,8 +491,8 @@ class CommonAdapter(object): def do_create_cgsnap(self, group_name, snap_name, snapshots): model_update = {} snapshots_model_update = [] - LOG.info(_LI('Creating consistency snapshot for group' - ': %(group_name)s'), + LOG.info('Creating consistency snapshot for group' + ': %(group_name)s', {'group_name': group_name}) self.client.create_cg_snapshot(snap_name, @@ -516,8 +516,8 @@ class CommonAdapter(object): model_update = {} snapshots_model_update = [] model_update['status'] = snap_status - LOG.info(_LI('Deleting consistency snapshot %(snap_name)s for ' - 'group: %(group_name)s'), + LOG.info('Deleting consistency snapshot %(snap_name)s for ' + 'group: %(group_name)s', {'snap_name': snap_name, 'group_name': group_name}) @@ -640,10 +640,10 @@ class CommonAdapter(object): 'Non-existent pools: %s') % ','.join(nonexistent_pools) raise exception.VolumeBackendAPIException(data=msg) if nonexistent_pools: - LOG.warning(_LW('The following specified storage pools ' - 'do not exist: %(nonexistent)s. ' - 'This host will only manage the storage ' - 'pools: %(exist)s'), + LOG.warning('The following specified storage pools ' + 'do not exist: %(nonexistent)s. ' + 'This host will only manage the storage ' + 'pools: %(exist)s', {'nonexistent': ','.join(nonexistent_pools), 'exist': ','.join(pool_names)}) else: @@ -651,8 +651,8 @@ class CommonAdapter(object): ','.join(pool_names)) else: pool_names = [p.name for p in array_pools] - LOG.info(_LI('No storage pool is configured. This host will ' - 'manage all the pools on the VNX system.')) + LOG.info('No storage pool is configured. This host will ' + 'manage all the pools on the VNX system.') return [pool for pool in array_pools if pool.name in pool_names] @@ -684,7 +684,7 @@ class CommonAdapter(object): # or Deleting. if pool.state in common.PoolState.VALID_CREATE_LUN_STATE: pool_stats['free_capacity_gb'] = 0 - LOG.warning(_LW('Storage Pool [%(pool)s] is [%(state)s].'), + LOG.warning('Storage Pool [%(pool)s] is [%(state)s].', {'pool': pool.name, 'state': pool.state}) else: @@ -692,9 +692,9 @@ class CommonAdapter(object): if (pool_feature.max_pool_luns <= pool_feature.total_pool_luns): - LOG.warning(_LW('Maximum number of Pool LUNs %(max_luns)s ' - 'have been created for %(pool_name)s. ' - 'No more LUN creation can be done.'), + LOG.warning('Maximum number of Pool LUNs %(max_luns)s ' + 'have been created for %(pool_name)s. ' + 'No more LUN creation can be done.', {'max_luns': pool_feature.max_pool_luns, 'pool_name': pool.name}) pool_stats['free_capacity_gb'] = 0 @@ -1018,15 +1018,14 @@ class CommonAdapter(object): lun = self.client.get_lun(lun_id=volume.vnx_lun_id) hostname = host.name if not sg.existed: - LOG.warning(_LW("Storage Group %s is not found. " - "Nothing can be done in terminate_connection()."), + LOG.warning("Storage Group %s is not found. " + "Nothing can be done in terminate_connection().", hostname) else: try: sg.detach_alu(lun) except storops_ex.VNXDetachAluNotFoundError: - LOG.warning(_LW("Volume %(vol)s is not in Storage Group" - " %(sg)s."), + LOG.warning("Volume %(vol)s is not in Storage Group %(sg)s.", {'vol': volume.name, 'sg': hostname}) def build_terminate_connection_return_data(self, host, sg): @@ -1042,19 +1041,19 @@ class CommonAdapter(object): def _destroy_empty_sg(self, host, sg): try: - LOG.info(_LI("Storage Group %s is empty."), sg.name) + LOG.info("Storage Group %s is empty.", sg.name) sg.disconnect_host(sg.name) sg.delete() if self.itor_auto_dereg: self._deregister_initiator(host) except storops_ex.StoropsException: - LOG.warning(_LW("Failed to destroy Storage Group %s."), + LOG.warning("Failed to destroy Storage Group %s.", sg.name) try: sg.connect_host(sg.name) except storops_ex.StoropsException: - LOG.warning(_LW("Failed to connect host %(host)s " - "back to storage group %(sg)s."), + LOG.warning("Failed to connect host %(host)s " + "back to storage group %(sg)s.", {'host': sg.name, 'sg': sg.name}) def _deregister_initiator(self, host): @@ -1062,7 +1061,7 @@ class CommonAdapter(object): try: self.client.deregister_initiators(initiators) except storops_ex: - LOG.warning(_LW("Failed to deregister the initiators %s"), + LOG.warning("Failed to deregister the initiators %s", initiators) def _is_allowed_port(self, port): @@ -1138,7 +1137,7 @@ class CommonAdapter(object): volume.name, lun_size, provision, tier) - LOG.info(_LI('Successfully setup replication for %s.'), volume.id) + LOG.info('Successfully setup replication for %s.', volume.id) rep_update.update({'replication_status': fields.ReplicationStatus.ENABLED}) return rep_update @@ -1152,7 +1151,7 @@ class CommonAdapter(object): mirror_view = self.build_mirror_view(self.config, True) mirror_view.destroy_mirror(mirror_name, volume.name) LOG.info( - _LI('Successfully destroyed replication for volume: %s'), + 'Successfully destroyed replication for volume: %s', volume.id) def build_mirror_view(self, configuration, failover=True): @@ -1164,7 +1163,7 @@ class CommonAdapter(object): """ rep_devices = configuration.replication_device if not rep_devices: - LOG.info(_LI('Replication is not configured on backend: %s.'), + LOG.info('Replication is not configured on backend: %s.', configuration.config_group) return None elif len(rep_devices) == 1: @@ -1225,12 +1224,12 @@ class CommonAdapter(object): try: mirror_view.promote_image(mirror_name) except storops_ex.VNXMirrorException as ex: - msg = _LE( + LOG.error( 'Failed to failover volume %(volume_id)s ' - 'to %(target)s: %(error)s.') - LOG.error(msg, {'volume_id': volume.id, - 'target': secondary_backend_id, - 'error': ex},) + 'to %(target)s: %(error)s.', + {'volume_id': volume.id, + 'target': secondary_backend_id, + 'error': ex}) new_status = fields.ReplicationStatus.ERROR else: # Transfer ownership to secondary_backend_id and @@ -1354,8 +1353,7 @@ class ISCSIAdapter(CommonAdapter): raise exception.InvalidConfigurationValue( option=option, value=iscsi_initiators) - LOG.info(_LI("[%(group)s] iscsi_initiators is configured: " - "%(value)s"), + LOG.info("[%(group)s] iscsi_initiators is configured: %(value)s", {'group': self.config.config_group, 'value': self.config.iscsi_initiators}) diff --git a/cinder/volume/drivers/dell_emc/vnx/client.py b/cinder/volume/drivers/dell_emc/vnx/client.py index a6f8c55f10c..21d74118cff 100644 --- a/cinder/volume/drivers/dell_emc/vnx/client.py +++ b/cinder/volume/drivers/dell_emc/vnx/client.py @@ -22,7 +22,7 @@ if storops: from storops.lib import tasks as storops_tasks from cinder import exception -from cinder.i18n import _, _LW, _LE, _LI +from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import const @@ -95,7 +95,7 @@ class Client(object): if queue_path: self.queue = storops_tasks.PQueue(path=queue_path) self.queue.start() - LOG.info(_LI('PQueue[%s] starts now.'), queue_path) + LOG.info('PQueue[%s] starts now.', queue_path) def create_lun(self, pool, name, size, provision, tier, cg_id=None, ignore_thresholds=False): @@ -143,8 +143,8 @@ class Client(object): if smp_attached_snap: smp_attached_snap.delete() except storops_ex.VNXLunNotFoundError as ex: - LOG.info(_LI("LUN %(name)s is already deleted. This message can " - "be safely ignored. Message: %(msg)s"), + LOG.info("LUN %(name)s is already deleted. This message can " + "be safely ignored. Message: %(msg)s", {'name': name, 'msg': ex.message}) def cleanup_async_lun(self, name, force=False): @@ -160,8 +160,8 @@ class Client(object): def delay_delete_lun(self, name): """Delay the deletion by putting it in a storops queue.""" self.queue.put(self.vnx.delete_lun, name=name) - LOG.info(_LI("VNX object has been added to queue for later" - " deletion: %s"), name) + LOG.info("VNX object has been added to queue for later" + " deletion: %s", name) @cinder_utils.retry(const.VNXLunPreparingError, retries=1, backoff_rate=1) @@ -173,8 +173,8 @@ class Client(object): lun.poll = poll lun.expand(new_size, ignore_thresholds=True) except storops_ex.VNXLunExpandSizeError as ex: - LOG.warning(_LW("LUN %(name)s is already expanded. " - "Message: %(msg)s."), + LOG.warning("LUN %(name)s is already expanded. " + "Message: %(msg)s.", {'name': name, 'msg': ex.message}) except storops_ex.VNXLunPreparingError as ex: @@ -182,8 +182,7 @@ class Client(object): # is 'Preparing'. Wait for a while so that the LUN may get out of # the transitioning state. with excutils.save_and_reraise_exception(): - LOG.warning(_LW("LUN %(name)s is not ready for extension: " - "%(msg)s"), + LOG.warning("LUN %(name)s is not ready for extension: %(msg)s", {'name': name, 'msg': ex.message}) utils.wait_until(Condition.is_lun_ops_ready, lun=lun) @@ -206,7 +205,7 @@ class Client(object): if not session.existed: return True elif session.current_state in ('FAULTED', 'STOPPED'): - LOG.warning(_LW('Session is %s, need to handled then.'), + LOG.warning('Session is %s, need to handled then.', session.current_state) return True else: @@ -243,15 +242,15 @@ class Client(object): session = self.vnx.get_migration_session(src_id) src_lun = self.vnx.get_lun(lun_id=src_id) if session.existed: - LOG.warning(_LW('Cancelling migration session: ' - '%(src_id)s -> %(dst_id)s.'), + LOG.warning('Cancelling migration session: ' + '%(src_id)s -> %(dst_id)s.', {'src_id': src_id, 'dst_id': dst_id}) try: src_lun.cancel_migrate() except storops_ex.VNXLunNotMigratingError: - LOG.info(_LI('The LUN is not migrating or completed, ' - 'this message can be safely ignored')) + LOG.info('The LUN is not migrating or completed, ' + 'this message can be safely ignored') except (storops_ex.VNXLunSyncCompletedError, storops_ex.VNXMigrationError): # Wait until session finishes @@ -266,8 +265,8 @@ class Client(object): snap_name, allow_rw=True, auto_delete=False, keep_for=keep_for) except storops_ex.VNXSnapNameInUseError as ex: - LOG.warning(_LW('Snapshot %(name)s already exists. ' - 'Message: %(msg)s'), + LOG.warning('Snapshot %(name)s already exists. ' + 'Message: %(msg)s', {'name': snap_name, 'msg': ex.message}) def delete_snapshot(self, snapshot_name): @@ -277,13 +276,13 @@ class Client(object): try: snap.delete() except storops_ex.VNXSnapNotExistsError as ex: - LOG.warning(_LW("Snapshot %(name)s may be deleted already. " - "Message: %(msg)s"), + LOG.warning("Snapshot %(name)s may be deleted already. " + "Message: %(msg)s", {'name': snapshot_name, 'msg': ex.message}) except storops_ex.VNXDeleteAttachedSnapError as ex: with excutils.save_and_reraise_exception(): - LOG.warning(_LW("Failed to delete snapshot %(name)s " - "which is in use. Message: %(msg)s"), + LOG.warning("Failed to delete snapshot %(name)s " + "which is in use. Message: %(msg)s", {'name': snapshot_name, 'msg': ex.message}) def copy_snapshot(self, snap_name, new_snap_name): @@ -295,8 +294,8 @@ class Client(object): try: return lun.create_mount_point(name=smp_name) except storops_ex.VNXLunNameInUseError as ex: - LOG.warning(_LW('Mount point %(name)s already exists. ' - 'Message: %(msg)s'), + LOG.warning('Mount point %(name)s already exists. ' + 'Message: %(msg)s', {'name': smp_name, 'msg': ex.message}) # Ignore the failure that due to retry. return self.vnx.get_lun(name=smp_name) @@ -306,9 +305,9 @@ class Client(object): try: lun.attach_snap(snap=snap_name) except storops_ex.VNXSnapAlreadyMountedError as ex: - LOG.warning(_LW("Snapshot %(snap_name)s is attached to " - "snapshot mount point %(smp_name)s already. " - "Message: %(msg)s"), + LOG.warning("Snapshot %(snap_name)s is attached to " + "snapshot mount point %(smp_name)s already. " + "Message: %(msg)s", {'snap_name': snap_name, 'smp_name': smp_name, 'msg': ex.message}) @@ -318,8 +317,8 @@ class Client(object): try: lun.detach_snap() except storops_ex.VNXSnapNotAttachedError as ex: - LOG.warning(_LW("Snapshot mount point %(smp_name)s is not " - "currently attached. Message: %(msg)s"), + LOG.warning("Snapshot mount point %(smp_name)s is not " + "currently attached. Message: %(msg)s", {'smp_name': smp_name, 'msg': ex.message}) def modify_snapshot(self, snap_name, allow_rw=None, @@ -417,7 +416,7 @@ class Client(object): try: lun.enable_compression(ignore_thresholds=True) except storops_ex.VNXCompressionAlreadyEnabledError: - LOG.warning(_LW("Compression has already been enabled on %s."), + LOG.warning("Compression has already been enabled on %s.", lun.name) def get_vnx_enabler_status(self): @@ -433,8 +432,8 @@ class Client(object): self.sg_cache[name] = self.vnx.create_sg(name) except storops_ex.VNXStorageGroupNameInUseError as ex: # Ignore the failure due to retry - LOG.warning(_LW('Storage group %(name)s already exists. ' - 'Message: %(msg)s'), + LOG.warning('Storage group %(name)s already exists. ' + 'Message: %(msg)s', {'name': name, 'msg': ex.message}) self.sg_cache[name] = self.vnx.get_sg(name=name) @@ -469,8 +468,8 @@ class Client(object): storage_group.connect_hba(port, initiator_id, host.name, host_ip=host.ip) except storops_ex.VNXStorageGroupError as ex: - LOG.warning(_LW('Failed to set path to port %(port)s for ' - 'initiator %(hba_id)s. Message: %(msg)s'), + LOG.warning('Failed to set path to port %(port)s for ' + 'initiator %(hba_id)s. Message: %(msg)s', {'port': port, 'hba_id': initiator_id, 'msg': ex.message}) @@ -499,9 +498,9 @@ class Client(object): except storops_ex.VNXNoHluAvailableError as ex: with excutils.save_and_reraise_exception(): # Reach the max times of retry, fail the attach action. - LOG.error(_LE('Failed to add %(lun)s into %(sg)s after ' - '%(tried)s tries. Reach the max retry times. ' - 'Message: %(msg)s'), + LOG.error('Failed to add %(lun)s into %(sg)s after ' + '%(tried)s tries. Reach the max retry times. ' + 'Message: %(msg)s', {'lun': lun.lun_id, 'sg': storage_group.name, 'tried': max_retries, 'msg': ex.message}) diff --git a/cinder/volume/drivers/dell_emc/vnx/common.py b/cinder/volume/drivers/dell_emc/vnx/common.py index 3a1d723fa9e..cd3ec459f98 100644 --- a/cinder/volume/drivers/dell_emc/vnx/common.py +++ b/cinder/volume/drivers/dell_emc/vnx/common.py @@ -23,7 +23,7 @@ from oslo_utils import importutils storops = importutils.try_import('storops') from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vnx import const from cinder.volume import volume_types @@ -201,9 +201,9 @@ class ExtraSpecs(object): :param enabler_status: Instance of VNXEnablerStatus """ if "storagetype:pool" in self.specs: - LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted " - "since driver version 5.1.0. This key will be " - "ignored.")) + LOG.warning("Extra spec key 'storagetype:pool' is obsoleted " + "since driver version 5.1.0. This key will be " + "ignored.") if (self._provision == storops.VNXProvisionEnum.DEDUPED and self._tier is not None): @@ -417,7 +417,7 @@ class ReplicationDeviceList(list): device = self._device_map[backend_id] except KeyError: device = None - LOG.warning(_LW('Unable to find secondary device named: %s'), + LOG.warning('Unable to find secondary device named: %s', backend_id) return device @@ -483,7 +483,7 @@ class VNXMirrorView(object): mv = self.primary_client.get_mirror(mirror_name) if not mv.existed: # We will skip the mirror operations if not existed - LOG.warning(_LW('Mirror view %s was deleted already.'), + LOG.warning('Mirror view %s was deleted already.', mirror_name) return self.fracture_image(mirror_name) diff --git a/cinder/volume/drivers/dell_emc/vnx/taskflows.py b/cinder/volume/drivers/dell_emc/vnx/taskflows.py index 340badfbc3f..21717e9c9fa 100644 --- a/cinder/volume/drivers/dell_emc/vnx/taskflows.py +++ b/cinder/volume/drivers/dell_emc/vnx/taskflows.py @@ -24,10 +24,10 @@ from taskflow import task from taskflow.types import failure from cinder import exception +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import const from cinder.volume.drivers.dell_emc.vnx import utils -from cinder.i18n import _, _LI, _LW LOG = logging.getLogger(__name__) @@ -60,8 +60,8 @@ class MigrateLunTask(task.Task): def revert(self, result, client, src_id, dst_id, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method)s: cleanup migration session: ' - '%(src_id)s -> %(dst_id)s.'), + LOG.warning('%(method)s: cleanup migration session: ' + '%(src_id)s -> %(dst_id)s.', {'method': method_name, 'src_id': src_id, 'dst_id': dst_id}) @@ -98,7 +98,7 @@ class CreateLunTask(task.Task): if isinstance(result, failure.Failure): return else: - LOG.warning(_LW('%(method_name)s: delete lun %(lun_name)s'), + LOG.warning('%(method_name)s: delete lun %(lun_name)s', {'method_name': method_name, 'lun_name': lun_name}) client.delete_lun(lun_name) @@ -117,9 +117,9 @@ class CopySnapshotTask(task.Task): def revert(self, result, client, snap_name, new_snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: delete the ' - 'copied snapshot %(new_name)s of ' - '%(source_name)s.'), + LOG.warning('%(method_name)s: delete the ' + 'copied snapshot %(new_name)s of ' + '%(source_name)s.', {'method_name': method_name, 'new_name': new_snap_name, 'source_name': snap_name}) @@ -146,7 +146,7 @@ class CreateSMPTask(task.Task): def revert(self, result, client, smp_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: delete mount point %(name)s'), + LOG.warning('%(method_name)s: delete mount point %(name)s', {'method_name': method_name, 'name': smp_name}) client.delete_lun(smp_name) @@ -164,7 +164,7 @@ class AttachSnapTask(task.Task): def revert(self, result, client, smp_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: detach mount point %(smp_name)s'), + LOG.warning('%(method_name)s: detach mount point %(smp_name)s', {'method_name': method_name, 'smp_name': smp_name}) client.detach_snapshot(smp_name) @@ -178,15 +178,15 @@ class CreateSnapshotTask(task.Task): def execute(self, client, snap_name, lun_id, keep_for=None, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) - LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'), + LOG.info('Create snapshot: %(snapshot)s: lun: %(lun)s', {'snapshot': snap_name, 'lun': lun_id}) client.create_snapshot(lun_id, snap_name, keep_for=keep_for) def revert(self, result, client, snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: ' - 'delete temp snapshot %(snap_name)s'), + LOG.warning('%(method_name)s: ' + 'delete temp snapshot %(snap_name)s', {'method_name': method_name, 'snap_name': snap_name}) client.delete_snapshot(snap_name) @@ -201,8 +201,8 @@ class ModifySnapshotTask(task.Task): def revert(self, result, client, snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: ' - 'setting snapshot %(snap_name)s to read-only.'), + LOG.warning('%(method_name)s: ' + 'setting snapshot %(snap_name)s to read-only.', {'method_name': method_name, 'snap_name': snap_name}) client.modify_snapshot(snap_name, allow_rw=False) @@ -268,8 +268,8 @@ class CreateCGSnapshotTask(task.Task): def revert(self, client, cg_snap_name, cg_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method_name)s: ' - 'deleting CG snapshot %(snap_name)s.'), + LOG.warning('%(method_name)s: ' + 'deleting CG snapshot %(snap_name)s.', {'method_name': method_name, 'snap_name': cg_snap_name}) client.delete_cg_snapshot(cg_snap_name) @@ -288,8 +288,8 @@ class CreateMirrorTask(task.Task): def revert(self, result, mirror, mirror_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method)s: removing mirror ' - 'view %(name)s.'), + LOG.warning('%(method)s: removing mirror ' + 'view %(name)s.', {'method': method_name, 'name': mirror_name}) mirror.delete_mirror(mirror_name) @@ -308,8 +308,8 @@ class AddMirrorImageTask(task.Task): def revert(self, result, mirror, mirror_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ - LOG.warning(_LW('%(method)s: removing secondary image ' - 'from %(name)s.'), + LOG.warning('%(method)s: removing secondary image ' + 'from %(name)s.', {'method': method_name, 'name': mirror_name}) mirror.remove_image(mirror_name) diff --git a/cinder/volume/drivers/dell_emc/vnx/utils.py b/cinder/volume/drivers/dell_emc/vnx/utils.py index 9af43fd422c..853925544b6 100644 --- a/cinder/volume/drivers/dell_emc/vnx/utils.py +++ b/cinder/volume/drivers/dell_emc/vnx/utils.py @@ -24,7 +24,7 @@ from oslo_utils import importutils storops = importutils.try_import('storops') from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.san.san import san_opts from cinder.volume import utils as vol_utils @@ -139,17 +139,17 @@ def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC, def validate_storage_migration(volume, target_host, src_serial, src_protocol): if 'location_info' not in target_host['capabilities']: - LOG.warning(_LW("Failed to get pool name and " - "serial number. 'location_info' " - "from %s."), target_host['host']) + LOG.warning("Failed to get pool name and " + "serial number. 'location_info' " + "from %s.", target_host['host']) return False info = target_host['capabilities']['location_info'] LOG.debug("Host for migration is %s.", info) try: serial_number = info.split('|')[1] except AttributeError: - LOG.warning(_LW('Error on getting serial number ' - 'from %s.'), target_host['host']) + LOG.warning('Error on getting serial number ' + 'from %s.', target_host['host']) return False if serial_number != src_serial: LOG.debug('Skip storage-assisted migration because ' @@ -253,8 +253,8 @@ def get_migration_rate(volume): if rate.lower() in storops.VNXMigrationRate.values(): return storops.VNXMigrationRate.parse(rate.lower()) else: - LOG.warning(_LW('Unknown migration rate specified, ' - 'using [high] as migration rate.')) + LOG.warning('Unknown migration rate specified, ' + 'using [high] as migration rate.') return storops.VNXMigrationRate.HIGH diff --git a/cinder/volume/drivers/dell_emc/xtremio.py b/cinder/volume/drivers/dell_emc/xtremio.py index 6e5c9f7ef24..377be6c0e0a 100644 --- a/cinder/volume/drivers/dell_emc/xtremio.py +++ b/cinder/volume/drivers/dell_emc/xtremio.py @@ -44,7 +44,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder import utils @@ -156,18 +156,18 @@ class XtremIOClient(object): error = response.json() err_msg = error.get('message') if err_msg.endswith(OBJ_NOT_FOUND_ERR): - LOG.warning(_LW("object %(key)s of " - "type %(typ)s not found, %(err_msg)s"), + LOG.warning("object %(key)s of " + "type %(typ)s not found, %(err_msg)s", {'key': key, 'typ': object_type, 'err_msg': err_msg, }) raise exception.NotFound() elif err_msg == VOL_NOT_UNIQUE_ERR: - LOG.error(_LE("can't create 2 volumes with the same name, %s"), + LOG.error("can't create 2 volumes with the same name, %s", err_msg) - msg = (_('Volume by this name already exists')) + msg = _('Volume by this name already exists') raise exception.VolumeBackendAPIException(data=msg) elif err_msg == VOL_OBJ_NOT_FOUND_ERR: - LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"), + LOG.error("Can't find volume to map %(key)s, %(msg)s", {'key': key, 'msg': err_msg, }) raise exception.VolumeNotFound(volume_id=key) elif ALREADY_MAPPED_ERR in err_msg: @@ -338,8 +338,7 @@ class XtremIOClient4(XtremIOClient): self.req(typ, 'PUT', data, idx=int(idx)) except exception.VolumeBackendAPIException: # reverting - msg = _LE('Failed to rename the created snapshot, reverting.') - LOG.error(msg) + LOG.error('Failed to rename the created snapshot, reverting.') self.req(typ, 'DELETE', idx=int(idx)) raise @@ -404,7 +403,7 @@ class XtremIOVolumeDriver(san.SanDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI('XtremIO SW version %s'), version_text) + LOG.info('XtremIO SW version %s', version_text) if ver[0] >= 4: self.client = XtremIOClient4(self.configuration, self.cluster_id) @@ -466,8 +465,8 @@ class XtremIOVolumeDriver(san.SanDriver): try: self.extend_volume(volume, volume['size']) except Exception: - LOG.error(_LE('failes to extend volume %s, ' - 'reverting clone operation'), volume['id']) + LOG.error('failes to extend volume %s, ' + 'reverting clone operation', volume['id']) # remove the volume in case resize failed self.delete_volume(volume) raise @@ -481,7 +480,7 @@ class XtremIOVolumeDriver(san.SanDriver): try: self.client.req('volumes', 'DELETE', name=volume.name_id) except exception.NotFound: - LOG.info(_LI("volume %s doesn't exist"), volume.name_id) + LOG.info("volume %s doesn't exist", volume.name_id) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -492,7 +491,7 @@ class XtremIOVolumeDriver(san.SanDriver): try: self.client.req('volumes', 'DELETE', name=snapshot.id) except exception.NotFound: - LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id) + LOG.info("snapshot %s doesn't exist", snapshot.id) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): @@ -505,8 +504,8 @@ class XtremIOVolumeDriver(san.SanDriver): data = {'name': original_name} self.client.req('volumes', 'PUT', data, name=current_name) except exception.VolumeBackendAPIException: - LOG.error(_LE('Unable to rename the logical volume ' - 'for volume: %s'), original_name) + LOG.error('Unable to rename the logical volume ' + 'for volume: %s', original_name) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. @@ -603,8 +602,8 @@ class XtremIOVolumeDriver(san.SanDriver): self.client.req('volumes', 'PUT', name=volume['id'], data={'vol-name': volume['name'] + '-unmanged'}) except exception.NotFound: - LOG.info(_LI("%(typ)s with the name %(name)s wasn't found, " - "can't unmanage") % + LOG.info("%(typ)s with the name %(name)s wasn't found, " + "can't unmanage", {'typ': 'Snapshot' if is_snapshot else 'Volume', 'name': volume['id']}) raise exception.VolumeNotFound(volume_id=volume['id']) @@ -644,7 +643,7 @@ class XtremIOVolumeDriver(san.SanDriver): try: self.client.req('lun-maps', 'DELETE', name=lm_name) except exception.NotFound: - LOG.warning(_LW("terminate_connection: lun map not found")) + LOG.warning("terminate_connection: lun map not found") def _get_password(self): return ''.join(RANDOM.choice @@ -659,9 +658,9 @@ class XtremIOVolumeDriver(san.SanDriver): res = self.client.req('lun-maps', 'POST', data) lunmap = self._obj_from_result(res) - LOG.info(_LI('Created lun-map:\n%s'), lunmap) + LOG.info('Created lun-map:\n%s', lunmap) except exception.XtremIOAlreadyMappedError: - LOG.info(_LI('Volume already mapped, retrieving %(ig)s, %(vol)s'), + LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s', {'ig': ig, 'vol': volume['id']}) lunmap = self.client.find_lunmap(ig, volume['id']) return lunmap @@ -993,8 +992,7 @@ class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver): discovery_chap) # if CHAP was enabled after the initiator was created if login_chap and not login_passwd: - LOG.info(_LI('initiator has no password while using chap,' - 'adding it')) + LOG.info('Initiator has no password while using chap, adding it.') data = {} (login_passwd, d_passwd) = self._add_auth(data, login_chap, discovery_chap and diff --git a/cinder/volume/drivers/dothill/dothill_client.py b/cinder/volume/drivers/dothill/dothill_client.py index 4f9fb271d3e..340c865b00a 100644 --- a/cinder/volume/drivers/dothill/dothill_client.py +++ b/cinder/volume/drivers/dothill/dothill_client.py @@ -26,7 +26,7 @@ import requests import six from cinder import exception -from cinder.i18n import _, _LE, _LW, _LI +from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) @@ -80,7 +80,7 @@ class DotHillClient(object): return except exception.DotHillConnectionError: not_responding = self._curr_ip_addr - LOG.exception(_LE('session_login failed to connect to %s'), + LOG.exception('session_login failed to connect to %s', self._curr_ip_addr) # Loop through the remaining management addresses # to find one that's up. @@ -92,7 +92,7 @@ class DotHillClient(object): self._get_session_key() return except exception.DotHillConnectionError: - LOG.error(_LE('Failed to connect to %s'), + LOG.error('Failed to connect to %s', self._curr_ip_addr) continue raise exception.DotHillConnectionError( @@ -172,20 +172,20 @@ class DotHillClient(object): return self._api_request(path, *args, **kargs) except exception.DotHillConnectionError as e: if tries_left < 1: - LOG.error(_LE("Array Connection error: " - "%s (no more retries)"), e.msg) + LOG.error("Array Connection error: " + "%s (no more retries)", e.msg) raise # Retry on any network connection errors, SSL errors, etc - LOG.error(_LE("Array Connection error: %s (retrying)"), e.msg) + LOG.error("Array Connection error: %s (retrying)", e.msg) except exception.DotHillRequestError as e: if tries_left < 1: - LOG.error(_LE("Array Request error: %s (no more retries)"), + LOG.error("Array Request error: %s (no more retries)", e.msg) raise # Retry specific errors which may succeed if we log in again # -10027 => The user is not recognized on this system. if '(-10027)' in e.msg: - LOG.error(_LE("Array Request error: %s (retrying)"), e.msg) + LOG.error("Array Request error: %s (retrying)", e.msg) else: raise @@ -248,7 +248,7 @@ class DotHillClient(object): # -10186 => The specified name is already in use. # This can occur during controller failover. if '(-10186)' in e.msg: - LOG.warning(_LW("Ignoring error in create volume: %s"), e.msg) + LOG.warning("Ignoring error in create volume: %s", e.msg) return None raise @@ -261,8 +261,8 @@ class DotHillClient(object): # -10075 => The specified volume was not found. # This can occur during controller failover. if '(-10075)' in e.msg: - LOG.warning(_LW("Ignorning error while deleting %(volume)s:" - " %(reason)s"), + LOG.warning("Ignorning error while deleting %(volume)s:" + " %(reason)s", {'volume': name, 'reason': e.msg}) return raise @@ -277,8 +277,8 @@ class DotHillClient(object): # -10186 => The specified name is already in use. # This can occur during controller failover. if '(-10186)' in e.msg: - LOG.warning(_LW("Ignoring error attempting to create snapshot:" - " %s"), e.msg) + LOG.warning("Ignoring error attempting to create snapshot:" + " %s", e.msg) return None def delete_snapshot(self, snap_name): @@ -288,7 +288,7 @@ class DotHillClient(object): # -10050 => The volume was not found on this system. # This can occur during controller failover. if '(-10050)' in e.msg: - LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg) + LOG.warning("Ignoring unmap error -10050: %s", e.msg) return None raise @@ -381,8 +381,8 @@ class DotHillClient(object): except exception.DotHillRequestError as e: # -10058: The host identifier or nickname is already in use if '(-10058)' in e.msg: - LOG.error(_LE("While trying to create host nickname" - " %(nickname)s: %(error_msg)s"), + LOG.error("While trying to create host nickname" + " %(nickname)s: %(error_msg)s", {'nickname': hostname, 'error_msg': e.msg}) else: @@ -400,9 +400,9 @@ class DotHillClient(object): except exception.DotHillRequestError as e: # -3177 => "The specified LUN overlaps a previously defined LUN if '(-3177)' in e.msg: - LOG.info(_LI("Unable to map volume" - " %(volume_name)s to lun %(lun)d:" - " %(reason)s"), + LOG.info("Unable to map volume" + " %(volume_name)s to lun %(lun)d:" + " %(reason)s", {'volume_name': volume_name, 'lun': lun, 'reason': e.msg}) lun = self._get_next_available_lun_for_host(host, @@ -410,8 +410,8 @@ class DotHillClient(object): continue raise except Exception as e: - LOG.error(_LE("Error while mapping volume" - " %(volume_name)s to lun %(lun)d:"), + LOG.error("Error while mapping volume" + " %(volume_name)s to lun %(lun)d:", {'volume_name': volume_name, 'lun': lun}, e) raise @@ -430,7 +430,7 @@ class DotHillClient(object): # -10050 => The volume was not found on this system. # This can occur during controller failover. if '(-10050)' in e.msg: - LOG.warning(_LW("Ignoring unmap error -10050: %s"), e.msg) + LOG.warning("Ignoring unmap error -10050: %s", e.msg) return None raise @@ -481,7 +481,7 @@ class DotHillClient(object): break else: if count >= 5: - LOG.error(_LE('Error in copying volume: %s'), src_name) + LOG.error('Error in copying volume: %s', src_name) raise exception.DotHillRequestError time.sleep(1) diff --git a/cinder/volume/drivers/dothill/dothill_common.py b/cinder/volume/drivers/dothill/dothill_common.py index c56a83f4e54..12ac6e15fa4 100644 --- a/cinder/volume/drivers/dothill/dothill_common.py +++ b/cinder/volume/drivers/dothill/dothill_common.py @@ -26,7 +26,7 @@ from oslo_config import cfg from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.dothill import dothill_client as dothill @@ -176,7 +176,7 @@ class DotHillCommon(object): self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Creation of volume %s failed."), volume['id']) + LOG.exception("Creation of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: @@ -201,7 +201,7 @@ class DotHillCommon(object): """ if (volume['status'] != "available" or volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED): - LOG.error(_LE("Volume must be detached for clone operation.")) + LOG.error("Volume must be detached for clone operation.") raise exception.VolumeAttached(volume_id=volume['id']) def create_cloned_volume(self, volume, src_vref): @@ -223,7 +223,7 @@ class DotHillCommon(object): self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Cloning of volume %s failed."), + LOG.exception("Cloning of volume %s failed.", src_vref['id']) raise exception.Invalid(ex) finally: @@ -246,7 +246,7 @@ class DotHillCommon(object): self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Create volume failed from snapshot: %s"), + LOG.exception("Create volume failed from snapshot: %s", snapshot['id']) raise exception.Invalid(ex) finally: @@ -269,7 +269,7 @@ class DotHillCommon(object): # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return - LOG.exception(_LE("Deletion of volume %s failed."), volume['id']) + LOG.exception("Deletion of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: self.client_logout() @@ -331,7 +331,7 @@ class DotHillCommon(object): connector_element) return data except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error mapping volume: %s"), volume_name) + LOG.exception("Error mapping volume: %s", volume_name) raise exception.Invalid(ex) def unmap_volume(self, volume, connector, connector_element): @@ -347,7 +347,7 @@ class DotHillCommon(object): connector, connector_element) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error unmapping volume: %s"), volume_name) + LOG.exception("Error unmapping volume: %s", volume_name) raise exception.Invalid(ex) finally: self.client_logout() @@ -356,21 +356,21 @@ class DotHillCommon(object): try: return self.client.get_active_fc_target_ports() except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error getting active FC target ports.")) + LOG.exception("Error getting active FC target ports.") raise exception.Invalid(ex) def get_active_iscsi_target_iqns(self): try: return self.client.get_active_iscsi_target_iqns() except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error getting active ISCSI target iqns.")) + LOG.exception("Error getting active ISCSI target iqns.") raise exception.Invalid(ex) def get_active_iscsi_target_portals(self): try: return self.client.get_active_iscsi_target_portals() except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error getting active ISCSI target portals.")) + LOG.exception("Error getting active ISCSI target portals.") raise exception.Invalid(ex) def create_snapshot(self, snapshot): @@ -387,7 +387,7 @@ class DotHillCommon(object): try: self.client.create_snapshot(vol_name, snap_name) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Creation of snapshot failed for volume: %s"), + LOG.exception("Creation of snapshot failed for volume: %s", snapshot['volume_id']) raise exception.Invalid(ex) finally: @@ -404,7 +404,7 @@ class DotHillCommon(object): # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return - LOG.exception(_LE("Deleting snapshot %s failed"), snapshot['id']) + LOG.exception("Deleting snapshot %s failed", snapshot['id']) raise exception.Invalid(ex) finally: self.client_logout() @@ -428,7 +428,7 @@ class DotHillCommon(object): try: self.client.extend_volume(volume_name, "%dGiB" % growth_size) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Extension of volume %s failed."), volume['id']) + LOG.exception("Extension of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: self.client_logout() @@ -437,14 +437,14 @@ class DotHillCommon(object): try: return self.client.get_chap_record(initiator_name) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error getting chap record.")) + LOG.exception("Error getting chap record.") raise exception.Invalid(ex) def create_chap_record(self, initiator_name, chap_secret): try: self.client.create_chap_record(initiator_name, chap_secret) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error creating chap record.")) + LOG.exception("Error creating chap record.") raise exception.Invalid(ex) def migrate_volume(self, volume, host): @@ -489,7 +489,7 @@ class DotHillCommon(object): self.client.modify_volume_name(dest_name, source_name) return (True, None) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error migrating volume: %s"), source_name) + LOG.exception("Error migrating volume: %s", source_name) raise exception.Invalid(ex) finally: self.client_logout() @@ -512,7 +512,7 @@ class DotHillCommon(object): self.client.modify_volume_name(target_vol_name, modify_target_vol_name) except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error manage existing volume.")) + LOG.exception("Error manage existing volume.") raise exception.Invalid(ex) finally: self.client_logout() @@ -530,7 +530,7 @@ class DotHillCommon(object): size = self.client.get_volume_size(target_vol_name) return size except exception.DotHillRequestError as ex: - LOG.exception(_LE("Error manage existing get volume size.")) + LOG.exception("Error manage existing get volume size.") raise exception.Invalid(ex) finally: self.client_logout() diff --git a/cinder/volume/drivers/drbdmanagedrv.py b/cinder/volume/drivers/drbdmanagedrv.py index 2fb74307b38..65779ef6073 100644 --- a/cinder/volume/drivers/drbdmanagedrv.py +++ b/cinder/volume/drivers/drbdmanagedrv.py @@ -37,7 +37,7 @@ from oslo_utils import units from cinder import exception -from cinder.i18n import _, _LW, _LI, _LE +from cinder.i18n import _ from cinder import interface from cinder.volume import driver @@ -194,7 +194,7 @@ class DrbdManageBaseDriver(driver.VolumeDriver): try: return fn(*args) except dbus.DBusException as e: - LOG.warning(_LW("Got disconnected; trying to reconnect. (%s)"), e) + LOG.warning("Got disconnected; trying to reconnect. (%s)", e) self.dbus_connect() # Old function object is invalid, get new one. return getattr(self.odm, fn._method_name)(*args) @@ -354,8 +354,8 @@ class DrbdManageBaseDriver(driver.VolumeDriver): retry += 1 # Not yet - LOG.warning(_LW('Try #%(try)d: Volume "%(res)s"/%(vol)d ' - 'not yet deployed on "%(host)s", waiting.'), + LOG.warning('Try #%(try)d: Volume "%(res)s"/%(vol)d ' + 'not yet deployed on "%(host)s", waiting.', {'try': retry, 'host': nodenames, 'res': res_name, 'vol': vol_nr}) @@ -771,9 +771,9 @@ class DrbdManageBaseDriver(driver.VolumeDriver): if not d_res_name: # resource already gone? - LOG.warning(_LW("snapshot: %s not found, " - "skipping delete operation"), snapshot['id']) - LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) + LOG.warning("snapshot: %s not found, " + "skipping delete operation", snapshot['id']) + LOG.info('Successfully deleted snapshot: %s', snapshot['id']) return True res = self.call_or_reconnect(self.odm.remove_snapshot, @@ -1035,7 +1035,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver): if len(data) < 1: # already removed?! - LOG.info(_LI('DRBD connection for %s already removed'), + LOG.info('DRBD connection for %s already removed', volume['id']) elif len(data) == 1: __, __, props, __ = data[0] @@ -1062,7 +1062,7 @@ class DrbdManageDrbdDriver(DrbdManageBaseDriver): self._check_result(res, ignore=[dm_exc.DM_ENOENT]) else: # more than one assignment? - LOG.error(_LE("DRBDmanage: too many assignments returned.")) + LOG.error("DRBDmanage: too many assignments returned.") return def remove_export(self, context, volume): diff --git a/cinder/volume/drivers/falconstor/fc.py b/cinder/volume/drivers/falconstor/fc.py index 3b2a6b5ae4e..b2af0d94ca9 100644 --- a/cinder/volume/drivers/falconstor/fc.py +++ b/cinder/volume/drivers/falconstor/fc.py @@ -20,7 +20,7 @@ This driver requires FSS-8.00-8865 or later. from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import interface import cinder.volume.driver from cinder.volume.drivers.falconstor import fss_common @@ -71,8 +71,8 @@ class FSSFCDriver(fss_common.FalconstorBaseDriver, def validate_connector(self, connector): """Check connector for at least one enabled FC protocol.""" if 'FC' == self._storage_protocol and 'wwpns' not in connector: - LOG.error(_LE('The connector does not contain the required ' - 'information.')) + LOG.error('The connector does not contain the required ' + 'information.') raise exception.InvalidConnectorException(missing='wwpns') @fczm_utils.add_fc_zone diff --git a/cinder/volume/drivers/falconstor/fss_common.py b/cinder/volume/drivers/falconstor/fss_common.py index dc96a64a69b..6b7cc5b2b19 100644 --- a/cinder/volume/drivers/falconstor/fss_common.py +++ b/cinder/volume/drivers/falconstor/fss_common.py @@ -27,7 +27,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.volume.drivers.falconstor import rest_proxy from cinder.volume.drivers.san import san @@ -67,7 +67,7 @@ class FalconstorBaseDriver(san.SanDriver): def do_setup(self, context): self.proxy.do_setup() - LOG.info(_LI('Activate FalconStor cinder volume driver.')) + LOG.info('Activate FalconStor cinder volume driver.') def check_for_setup_error(self): if self.proxy.session_id is None: @@ -196,7 +196,7 @@ class FalconstorBaseDriver(san.SanDriver): except rest_proxy.FSSHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False - LOG.warning(_LW("Volume deletion failed with message: %s"), + LOG.warning("Volume deletion failed with message: %s", err.reason) def create_snapshot(self, snapshot): @@ -214,7 +214,7 @@ class FalconstorBaseDriver(san.SanDriver): with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False LOG.error( - _LE("Snapshot deletion failed with message: %s"), + "Snapshot deletion failed with message: %s", err.reason) def create_volume_from_snapshot(self, volume, snapshot): @@ -231,10 +231,10 @@ class FalconstorBaseDriver(san.SanDriver): except rest_proxy.FSSHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False - LOG.error(_LE( + LOG.error( "Resizing %(id)s failed with message: %(msg)s. " - "Cleaning volume."), {'id': volume["id"], - 'msg': err.reason}) + "Cleaning volume.", {'id': volume["id"], + 'msg': err.reason}) if type(volume['metadata']) is dict: fss_metadata.update(volume['metadata']) @@ -286,7 +286,7 @@ class FalconstorBaseDriver(san.SanDriver): self._stats = data except Exception as exc: - LOG.error(_LE('Cannot get volume status %(exc)s.'), + LOG.error('Cannot get volume status %(exc)s.', {'exc': exc}) return self._stats diff --git a/cinder/volume/drivers/falconstor/rest_proxy.py b/cinder/volume/drivers/falconstor/rest_proxy.py index 2eb3375d951..2d11837a410 100644 --- a/cinder/volume/drivers/falconstor/rest_proxy.py +++ b/cinder/volume/drivers/falconstor/rest_proxy.py @@ -25,7 +25,7 @@ from oslo_utils import units from six.moves import http_client from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ FSS_BATCH = 'batch' FSS_PHYSICALRESOURCE = 'physicalresource' @@ -760,7 +760,7 @@ class RESTProxy(object): if (err.code == 2415984845 and "XML_ERROR_CLIENT_EXIST" in err.text): ctxt.reraise = False - LOG.warning(_LW('Assign volume failed with message: %(msg)s.'), + LOG.warning('Assign volume failed with message: %(msg)s.', {"msg": err.reason}) finally: lun = self.FSS._get_fc_client_info(client_id, vid) @@ -804,8 +804,8 @@ class RESTProxy(object): "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" in err.text): ctxt.reraise = False - LOG.warning(_LW('Disconnection failed with message: ' - "%(msg)s."), {"msg": err.reason}) + LOG.warning('Disconnection failed with message: %(msg)s.', + {"msg": err.reason}) return client_id def initialize_connection_iscsi(self, volume, connector, fss_hosts): @@ -842,7 +842,7 @@ class RESTProxy(object): "XML_ERROR_VIRTUAL_DEV_ASSIGNED_TO_iSCSI_TARGET" in err.text): ctxt.reraise = False - LOG.warning(_LW("Assign volume failed with message: %(msg)s."), + LOG.warning("Assign volume failed with message: %(msg)s.", {"msg": err.reason}) finally: (lun, target_name) = self.FSS._get_iscsi_target_info(client_id, @@ -872,8 +872,8 @@ class RESTProxy(object): "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" in err.text): ctxt.reraise = False - LOG.warning(_LW("Disconnection failed with message: " - "%(msg)s."), {"msg": err.reason}) + LOG.warning("Disconnection failed with message: %(msg)s.", + {"msg": err.reason}) finally: is_empty = self.FSS._check_host_mapping_status(client_id, target_id) @@ -914,8 +914,8 @@ class RESTProxy(object): except FSSHTTPError as err: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False - LOG.warning(_LW("Volume manage_existing_volume was unable " - "to rename the volume, error message: %s."), + LOG.warning("Volume manage_existing_volume was unable " + "to rename the volume, error message: %s.", err.reason) def unmanage(self, volume): @@ -925,8 +925,8 @@ class RESTProxy(object): vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) self.rename_vdev(vid, unmanaged_vol_name) except FSSHTTPError as err: - LOG.warning(_LW("Volume unmanage was unable to rename the volume," - " error message: %(msg)s."), {"msg": err.reason}) + LOG.warning("Volume unmanage was unable to rename the volume," + " error message: %(msg)s.", {"msg": err.reason}) class FSSRestCommon(object): @@ -956,11 +956,11 @@ class FSSRestCommon(object): connection = http_client.HTTPConnection(self.hostip, 80, timeout=60) if self.fss_debug: - LOG.info(_LI("[FSS_RESTAPI]====%(method)s@url=%(url)s ====" - "@request_body=%(body)s===") % { - "method": method, - "url": url, - "body": request_body}) + LOG.info("[FSS_RESTAPI]====%(method)s@url=%(url)s ====" + "@request_body=%(body)s===", + {"method": method, + "url": url, + "body": request_body}) attempt = 1 while True: @@ -976,7 +976,7 @@ class FSSRestCommon(object): pass if self.fss_debug: - LOG.info(_LI("[FSS_RESTAPI]==@json_data: %s =="), json_data) + LOG.info("[FSS_RESTAPI]==@json_data: %s ==", json_data) if response.status == 200: return json_data @@ -1002,7 +1002,7 @@ class FSSRestCommon(object): ) raise FSSHTTPError(err_target, err) attempt += 1 - LOG.warning(_LW("Retry with rc: %s."), err_code) + LOG.warning("Retry with rc: %s.", err_code) self._random_sleep(RETRY_INTERVAL) if err_code == 107: self.fss_login() diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_common.py b/cinder/volume/drivers/fujitsu/eternus_dx_common.py index c0283a19377..9e9c653f2e4 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx_common.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx_common.py @@ -27,7 +27,7 @@ import time from xml.etree.ElementTree import parse from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging @@ -155,8 +155,8 @@ class FJDXCommon(object): def __init__(self, prtcl, configuration=None): if not pywbemAvailable: - LOG.error(_LE('import pywbem failed!! ' - 'pywbem is necessary for this volume driver.')) + LOG.error('import pywbem failed!! ' + 'pywbem is necessary for this volume driver.') self.protocol = prtcl self.configuration = configuration @@ -227,9 +227,9 @@ class FJDXCommon(object): Size=self._pywbem_uint(volumesize, '64')) if rc == VOLUMENAME_IN_USE: # Element Name is in use - LOG.warning(_LW('create_volume, ' - 'volumename: %(volumename)s, ' - 'Element Name is in use.'), + LOG.warning('create_volume, ' + 'volumename: %(volumename)s, ' + 'Element Name is in use.', {'volumename': volumename}) vol_instance = self._find_lun(volume) element = vol_instance @@ -452,8 +452,8 @@ class FJDXCommon(object): vol_instance = self._find_lun(volume) if vol_instance is None: - LOG.info(_LI('_delete_volume_setting, volumename:%(volumename)s, ' - 'volume not found on ETERNUS. '), + LOG.info('_delete_volume_setting, volumename:%(volumename)s, ' + 'volume not found on ETERNUS.', {'volumename': volumename}) return False @@ -694,11 +694,11 @@ class FJDXCommon(object): target_lun = mapdata.get('target_lun', None) target_luns = mapdata.get('target_luns', None) - LOG.info(_LI('initialize_connection, ' - 'volume: %(volume)s, ' - 'target_lun: %(target_lun)s, ' - 'target_luns: %(target_luns)s, ' - 'Volume is already mapped.'), + LOG.info('initialize_connection, ' + 'volume: %(volume)s, ' + 'target_lun: %(target_lun)s, ' + 'target_luns: %(target_luns)s, ' + 'Volume is already mapped.', {'volume': volume['name'], 'target_lun': target_lun, 'target_luns': target_luns}) @@ -896,9 +896,9 @@ class FJDXCommon(object): pool['RemainingManagedSpace'] / units.Gi) else: # if pool information is unknown, set 0 GB to capacity information - LOG.warning(_LW('update_volume_stats, ' - 'eternus_pool:%(eternus_pool)s, ' - 'specified pool is not found.'), + LOG.warning('update_volume_stats, ' + 'eternus_pool:%(eternus_pool)s, ' + 'specified pool is not found.', {'eternus_pool': eternus_pool}) self.stats['total_capacity_gb'] = 0 self.stats['free_capacity_gb'] = 0 @@ -1527,8 +1527,8 @@ class FJDXCommon(object): cpsession_instance = self._get_eternus_instance( cpsession, LocalOnly=False) except Exception: - LOG.info(_LI('_delete_copysession, ' - 'The copysession was already completed.')) + LOG.info('_delete_copysession, ' + 'the copysession was already completed.') return copytype = cpsession_instance['CopyType'] @@ -1721,12 +1721,12 @@ class FJDXCommon(object): 'rc': rc}) if rc != 0 and rc != LUNAME_IN_USE: - LOG.warning(_LW('_map_lun, ' - 'lun_name: %(volume_uid)s, ' - 'Initiator: %(initiator)s, ' - 'target: %(target)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.'), + LOG.warning('_map_lun, ' + 'lun_name: %(volume_uid)s, ' + 'Initiator: %(initiator)s, ' + 'target: %(target)s, ' + 'Return code: %(rc)lu, ' + 'Error: %(errordesc)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'target': target['Name'], @@ -1754,12 +1754,12 @@ class FJDXCommon(object): 'rc': rc}) if rc != 0 and rc != LUNAME_IN_USE: - LOG.warning(_LW('_map_lun, ' - 'lun_name: %(volume_uid)s, ' - 'Initiator: %(initiator)s, ' - 'ag: %(ag)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.'), + LOG.warning('_map_lun, ' + 'lun_name: %(volume_uid)s, ' + 'Initiator: %(initiator)s, ' + 'ag: %(ag)s, ' + 'Return code: %(rc)lu, ' + 'Error: %(errordesc)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'ag': ag, @@ -1881,9 +1881,9 @@ class FJDXCommon(object): volumename = self._create_volume_name(volume['id']) vol_instance = self._find_lun(volume) if vol_instance is None: - LOG.info(_LI('_unmap_lun, ' - 'volumename:%(volumename)s, ' - 'volume not found.'), + LOG.info('_unmap_lun, ' + 'volumename:%(volumename)s, ' + 'volume not found.', {'volumename': volumename}) return False @@ -1892,9 +1892,9 @@ class FJDXCommon(object): if not force: aglist = self._find_affinity_group(connector, vol_instance) if not aglist: - LOG.info(_LI('_unmap_lun, ' - 'volumename: %(volumename)s, ' - 'volume is not mapped.'), + LOG.info('_unmap_lun, ' + 'volumename: %(volumename)s, ' + 'volume is not mapped.', {'volumename': volumename}) return False else: @@ -2091,8 +2091,8 @@ class FJDXCommon(object): if self._is_job_finished(conn, job): raise loopingcall.LoopingCallDone() if self.retries > JOB_RETRIES: - LOG.error(_LE("_wait_for_job_complete, " - "failed after %(retries)d tries."), + LOG.error("_wait_for_job_complete, " + "failed after %(retries)d tries.", {'retries': self.retries}) raise loopingcall.LoopingCallDone() @@ -2101,10 +2101,9 @@ class FJDXCommon(object): if not self.wait_for_job_called: if self._is_job_finished(conn, job): self.wait_for_job_called = True - except Exception as e: - LOG.error(_LE("Exception: %s"), e) + except Exception: exceptionMessage = _("Issue encountered waiting for job.") - LOG.error(exceptionMessage) + LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage) self.wait_for_job_called = False diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py b/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py index ca878227780..50362015a6f 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py @@ -21,7 +21,6 @@ iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series. """ import six -from cinder.i18n import _LI from cinder import interface from cinder.volume import driver from cinder.volume.drivers.fujitsu import eternus_dx_common @@ -51,8 +50,7 @@ class FJDXISCSIDriver(driver.ISCSIDriver): def create_volume(self, volume): """Create volume.""" - LOG.info(_LI('create_volume, ' - 'volume id: %s, Enter method.'), volume['id']) + LOG.info('create_volume, volume id: %s, Enter method.', volume['id']) element_path, metadata = self.common.create_volume(volume) @@ -64,14 +62,14 @@ class FJDXISCSIDriver(driver.ISCSIDriver): v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) - LOG.info(_LI('create_volume, info: %s, Exit method.'), metadata) + LOG.info('create_volume, info: %s, Exit method.', metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - LOG.info(_LI('create_volume_from_snapshot, ' - 'volume id: %(vid)s, snap id: %(sid)s, Enter method.'), + LOG.info('create_volume_from_snapshot, ' + 'volume id: %(vid)s, snap id: %(sid)s, Enter method.', {'vid': volume['id'], 'sid': snapshot['id']}) element_path, metadata = ( @@ -85,16 +83,16 @@ class FJDXISCSIDriver(driver.ISCSIDriver): v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) - LOG.info(_LI('create_volume_from_snapshot, ' - 'info: %s, Exit method.'), metadata) + LOG.info('create_volume_from_snapshot, ' + 'info: %s, Exit method.', metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" - LOG.info(_LI('create_cloned_volume, ' - 'target volume id: %(tid)s, ' - 'source volume id: %(sid)s, Enter method.'), + LOG.info('create_cloned_volume, ' + 'target volume id: %(tid)s, ' + 'source volume id: %(sid)s, Enter method.', {'tid': volume['id'], 'sid': src_vref['id']}) element_path, metadata = ( @@ -108,43 +106,39 @@ class FJDXISCSIDriver(driver.ISCSIDriver): v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) - LOG.info(_LI('create_cloned_volume, ' - 'info: %s, Exit method.'), metadata) + LOG.info('create_cloned_volume, info: %s, Exit method.', metadata) return {'provider_location': six.text_type(element_path), 'metadata': metadata} def delete_volume(self, volume): """Delete volume on ETERNUS.""" - LOG.info(_LI('delete_volume, ' - 'volume id: %s, Enter method.'), volume['id']) + LOG.info('delete_volume, volume id: %s, Enter method.', volume['id']) vol_exist = self.common.delete_volume(volume) - LOG.info(_LI('delete_volume, ' - 'delete: %s, Exit method.'), vol_exist) + LOG.info('delete_volume, delete: %s, Exit method.', vol_exist) return def create_snapshot(self, snapshot): """Creates a snapshot.""" - LOG.info(_LI('create_snapshot, ' - 'snap id: %(sid)s, volume id: %(vid)s, Enter method.'), + LOG.info('create_snapshot, snap id: %(sid)s, volume id: %(vid)s, ' + 'Enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) element_path, metadata = self.common.create_snapshot(snapshot) - LOG.info(_LI('create_snapshot, info: %s, Exit method.'), metadata) + LOG.info('create_snapshot, info: %s, Exit method.', metadata) return {'provider_location': six.text_type(element_path)} def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - LOG.info(_LI('delete_snapshot, ' - 'snap id: %(sid)s, volume id: %(vid)s, Enter method.'), + LOG.info('delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, ' + 'Enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) vol_exist = self.common.delete_snapshot(snapshot) - LOG.info(_LI('delete_snapshot, ' - 'delete: %s, Exit method.'), vol_exist) + LOG.info('delete_snapshot, delete: %s, Exit method.', vol_exist) return def ensure_export(self, context, volume): @@ -161,26 +155,24 @@ class FJDXISCSIDriver(driver.ISCSIDriver): def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" - LOG.info(_LI('initialize_connection, volume id: %(vid)s, ' - 'initiator: %(initiator)s, Enter method.'), + LOG.info('initialize_connection, volume id: %(vid)s, ' + 'initiator: %(initiator)s, Enter method.', {'vid': volume['id'], 'initiator': connector['initiator']}) info = self.common.initialize_connection(volume, connector) - LOG.info(_LI('initialize_connection, ' - 'info: %s, Exit method.'), info) + LOG.info('initialize_connection, info: %s, Exit method.', info) return info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" - LOG.info(_LI('terminate_connection, volume id: %(vid)s, ' - 'initiator: %(initiator)s, Enter method.'), + LOG.info('terminate_connection, volume id: %(vid)s, ' + 'initiator: %(initiator)s, Enter method.', {'vid': volume['id'], 'initiator': connector['initiator']}) map_exist = self.common.terminate_connection(volume, connector) - LOG.info(_LI('terminate_connection, ' - 'unmap: %s, Exit method.'), map_exist) + LOG.info('terminate_connection, unmap: %s, Exit method.', map_exist) return def get_volume_stats(self, refresh=False): @@ -201,10 +193,9 @@ class FJDXISCSIDriver(driver.ISCSIDriver): def extend_volume(self, volume, new_size): """Extend volume.""" - LOG.info(_LI('extend_volume, ' - 'volume id: %s, Enter method.'), volume['id']) + LOG.info('extend_volume, volume id: %s, Enter method.', volume['id']) used_pool_name = self.common.extend_volume(volume, new_size) - LOG.info(_LI('extend_volume, ' - 'used pool name: %s, Exit method.'), used_pool_name) + LOG.info('extend_volume, used pool name: %s, Exit method.', + used_pool_name) diff --git a/cinder/volume/drivers/fusionstorage/dsware.py b/cinder/volume/drivers/fusionstorage/dsware.py index 0ff4a84351d..2a562837cef 100644 --- a/cinder/volume/drivers/fusionstorage/dsware.py +++ b/cinder/volume/drivers/fusionstorage/dsware.py @@ -24,7 +24,7 @@ from oslo_log import log as logging from oslo_service import loopingcall from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.volume import driver @@ -86,7 +86,7 @@ class DSWAREDriver(driver.VolumeDriver): # lrk: check config file here. if not os.path.exists(fspythonapi.fsc_conf_file): msg = _("Dsware config file not exists!") - LOG.error(_LE("Dsware config file: %s not exists!"), + LOG.error("Dsware config file: %s not exists!", fspythonapi.fsc_conf_file) raise exception.VolumeBackendAPIException(data=msg) @@ -140,7 +140,7 @@ class DSWAREDriver(driver.VolumeDriver): pool_id = int(pool_info) # Query Dsware version failed! else: - LOG.error(_LE("Query Dsware version fail!")) + LOG.error("Query Dsware version fail!") msg = (_("Query Dsware version failed! Retcode is %s.") % retcode) raise exception.VolumeBackendAPIException(data=msg) @@ -149,7 +149,7 @@ class DSWAREDriver(driver.VolumeDriver): result = self.dsware_client.create_volume( volume_id, pool_id, volume_size, int(isThin)) except Exception as e: - LOG.exception(_LE("Create volume error, details is: %s."), e) + LOG.exception("Create volume error, details is: %s.", e) raise if result != 0: @@ -254,7 +254,7 @@ class DSWAREDriver(driver.VolumeDriver): 'status': current_volume['status']}) raise loopingcall.LoopingCallDone(retvalue=False) else: - LOG.warning(_LW('Can not find volume %s from Dsware.'), + LOG.warning('Can not find volume %s from Dsware.', new_volume_name) self.count += 1 if self.count > 10: @@ -386,7 +386,7 @@ class DSWAREDriver(driver.VolumeDriver): image_meta, volume_attach_path) except Exception as e: - LOG.error(_LE("Upload volume error, details: %s."), e) + LOG.error("Upload volume error, details: %s.", e) raise finally: if not already_attached: @@ -433,7 +433,7 @@ class DSWAREDriver(driver.VolumeDriver): snapshot_info = self.dsware_client.query_snap(snapshot_name) LOG.debug("Get snapshot, snapshot_info is : %s.", snapshot_info) if snapshot_info['result'] == SNAP_NOT_EXIST: - LOG.error(_LE('Snapshot: %s not found!'), snapshot_name) + LOG.error('Snapshot: %s not found!', snapshot_name) return False elif snapshot_info['result'] == 0: return True @@ -462,8 +462,7 @@ class DSWAREDriver(driver.VolumeDriver): vol_id = 'volume-%s' % snapshot['volume_id'] snapshot_id = snapshot['name'] if not self._get_volume(vol_id): - msg = _LE('Create Snapshot, but volume: %s not found!') - LOG.error(msg, vol_id) + LOG.error('Create Snapshot, but volume: %s not found!', vol_id) raise exception.VolumeNotFound(volume_id=vol_id) else: self._create_snapshot(snapshot_id, vol_id) diff --git a/cinder/volume/drivers/fusionstorage/fspythonapi.py b/cinder/volume/drivers/fusionstorage/fspythonapi.py index 95739202ee9..a88687028da 100644 --- a/cinder/volume/drivers/fusionstorage/fspythonapi.py +++ b/cinder/volume/drivers/fusionstorage/fspythonapi.py @@ -22,7 +22,6 @@ import six from oslo_log import log as logging -from cinder.i18n import _LE from cinder import utils LOG = logging.getLogger(__name__) @@ -291,8 +290,7 @@ class FSPythonApi(object): elif re.search('^create_time=', line): local_volume_info['create_time'] = line[len('create_time='):] else: - LOG.error(_LE("Analyze key not exist, key=%s."), - six.text_type(line)) + LOG.error("Analyze key not exist, key=%s.", line) return local_volume_info def query_volume(self, vol_name): @@ -375,8 +373,7 @@ class FSPythonApi(object): elif re.search('^create_time=', line): local_snap_info['create_time'] = line[len('create_time='):] else: - LOG.error(_LE("Analyze key not exist, key=%s."), - line) + LOG.error("Analyze key not exist, key=%s.", line) return local_snap_info @@ -435,8 +432,7 @@ class FSPythonApi(object): local_pool_info['alloc_capacity'] = line[ len('alloc_capacity='):] else: - LOG.error(_LE("Analyze key not exist, key=%s."), - six.text_type(line)) + LOG.error("Analyze key not exist, key=%s.", line) return local_pool_info def query_pool_info(self, pool_id): diff --git a/cinder/volume/drivers/hgst.py b/cinder/volume/drivers/hgst.py index d9639628281..b306b8e15d7 100644 --- a/cinder/volume/drivers/hgst.py +++ b/cinder/volume/drivers/hgst.py @@ -35,8 +35,6 @@ from oslo_utils import units from cinder import exception from cinder.i18n import _ -from cinder.i18n import _LE -from cinder.i18n import _LW from cinder.image import image_utils from cinder import interface from cinder.volume import driver @@ -112,8 +110,8 @@ class HGSTDriver(driver.VolumeDriver): def _log_cli_err(self, err): """Dumps the full command output to a logfile in error cases.""" - LOG.error(_LE("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n" - "err: %(stderr)s"), + LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n" + "err: %(stderr)s", {'cmd': err.cmd, 'code': err.exit_code, 'stdout': err.stdout, 'stderr': err.stderr}) @@ -256,7 +254,7 @@ class HGSTDriver(driver.VolumeDriver): self._execute(*params, run_as_root=True) # Cancel succeeded, the command was aborted # Send initial exception up the stack - LOG.error(_LE("VGC-CLUSTER command blocked and cancelled.")) + LOG.error("VGC-CLUSTER command blocked and cancelled.") # Can't throw it here, the except below would catch it! throw_err = True except Exception: @@ -347,7 +345,7 @@ class HGSTDriver(driver.VolumeDriver): avail = avail - 1 except processutils.ProcessExecutionError as err: # Could be cluster still starting up, return unknown for now - LOG.warning(_LW("Unable to poll cluster free space.")) + LOG.warning("Unable to poll cluster free space.") self._log_cli_err(err) cap = 'unknown' avail = 'unknown' @@ -413,12 +411,12 @@ class HGSTDriver(driver.VolumeDriver): try: self._execute(*params, run_as_root=True) except processutils.ProcessExecutionError as err: - LOG.warning(_LW("Unable to delete space %(space)s"), + LOG.warning("Unable to delete space %(space)s", {'space': volname}) self._log_cli_err(err) else: # This can be benign when we are deleting a snapshot - LOG.warning(_LW("Attempted to delete a space that's not there.")) + LOG.warning("Attempted to delete a space that's not there.") def _check_host_storage(self, server): if ":" not in server: diff --git a/cinder/volume/drivers/hitachi/hbsd_basiclib.py b/cinder/volume/drivers/hitachi/hbsd_basiclib.py index ccb1a3cd19d..9b22e21739c 100644 --- a/cinder/volume/drivers/hitachi/hbsd_basiclib.py +++ b/cinder/volume/drivers/hitachi/hbsd_basiclib.py @@ -23,7 +23,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import utils SMPL = 1 @@ -168,7 +168,7 @@ def set_msg(msg_id, **kwargs): def output_err(msg_id, **kwargs): msg = HBSD_ERR_MSG.get(msg_id) % kwargs - LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg}) + LOG.error("MSGID%(id)04d-E: %(msg)s", {'id': msg_id, 'msg': msg}) return msg diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index 60e0b7c227f..5e4e5186c36 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -25,7 +25,6 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib from cinder.volume.drivers.hitachi import hbsd_horcm as horcm @@ -252,21 +251,21 @@ class HBSDCommon(object): prefix = 'HSNM2 version' else: prefix = 'RAID Manager version' - LOG.info(_LI('\t%(prefix)-35s : %(version)s'), + LOG.info('\t%(prefix)-35s : %(version)s', {'prefix': prefix, 'version': version}) for param in essential_inherited_param: value = conf.safe_get(param) - LOG.info(_LI('\t%(param)-35s : %(value)s'), + LOG.info('\t%(param)-35s : %(value)s', {'param': param, 'value': value}) for opt in volume_opts: if not opt.secret: value = getattr(conf, opt.name) - LOG.info(_LI('\t%(name)-35s : %(value)s'), + LOG.info('\t%(name)-35s : %(value)s', {'name': opt.name, 'value': value}) if storage_protocol == 'iSCSI': value = getattr(conf, 'hitachi_group_request') - LOG.info(_LI('\t%(request)-35s : %(value)s'), + LOG.info('\t%(request)-35s : %(value)s', {'request': 'hitachi_group_request', 'value': value}) def check_param(self): @@ -407,13 +406,13 @@ class HBSDCommon(object): try: self.command.restart_pair_horcm() except Exception as e: - LOG.warning(_LW('Failed to restart horcm: %s'), e) + LOG.warning('Failed to restart horcm: %s', e) else: if (all_split or is_vvol) and restart: try: self.command.restart_pair_horcm() except Exception as e: - LOG.warning(_LW('Failed to restart horcm: %s'), e) + LOG.warning('Failed to restart horcm: %s', e) def copy_async_data(self, pvol, svol, is_vvol): path_list = [] @@ -727,7 +726,7 @@ class HBSDCommon(object): total_gb, free_gb = self.command.comm_get_dp_pool( self.configuration.hitachi_pool_id) except Exception as ex: - LOG.error(_LE('Failed to update volume status: %s'), ex) + LOG.error('Failed to update volume status: %s', ex) return None data['total_capacity_gb'] = total_gb diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index d6a3e5417e0..7a8ff9e19b2 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -26,7 +26,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils import cinder.volume.driver @@ -89,7 +89,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): for opt in volume_opts: if not opt.secret: value = getattr(self.configuration, opt.name) - LOG.info(_LI('\t%(name)-35s : %(value)s'), + LOG.info('\t%(name)-35s : %(value)s', {'name': opt.name, 'value': value}) self.common.command.output_param_to_log(self.configuration) @@ -184,7 +184,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): try: self._fill_group(hgs, port, host_grp_name, wwns_copy) except Exception as ex: - LOG.warning(_LW('Failed to add host group: %s'), ex) + LOG.warning('Failed to add host group: %s', ex) LOG.warning(basic_lib.set_msg( 308, port=port, name=host_grp_name)) diff --git a/cinder/volume/drivers/hitachi/hbsd_horcm.py b/cinder/volume/drivers/hitachi/hbsd_horcm.py index 9001b05492a..b694d8a7e95 100644 --- a/cinder/volume/drivers/hitachi/hbsd_horcm.py +++ b/cinder/volume/drivers/hitachi/hbsd_horcm.py @@ -28,7 +28,6 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib @@ -225,7 +224,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone() if self.shutdown_horcm(inst): - LOG.error(_LE("Failed to shutdown horcm.")) + LOG.error("Failed to shutdown horcm.") raise loopingcall.LoopingCallDone() @horcm_synchronized @@ -293,14 +292,14 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= EXEC_MAX_WAITTIME: - LOG.error(_LE("horcm command timeout.")) + LOG.error("horcm command timeout.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (ret == EX_ENAUTH and not re.search("-login %s %s" % (user, passwd), args)): _ret, _stdout, _stderr = self.comm_login() if _ret: - LOG.error(_LE("Failed to authenticate user.")) + LOG.error("Failed to authenticate user.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret in HORCM_ERROR: @@ -309,11 +308,11 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): if self.check_horcm(inst) != HORCM_RUNNING: _ret, _stdout, _stderr = self.start_horcm(inst) if _ret and _ret != HORCM_RUNNING: - LOG.error(_LE("Failed to start horcm.")) + LOG.error("Failed to start horcm.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) elif ret not in COMMAND_IO_TO_RAID: - LOG.error(_LE("Unexpected error occurs in horcm.")) + LOG.error("Unexpected error occurs in horcm.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_raidcom(self, cmd, args, printflag=True): @@ -847,7 +846,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib): try: self.comm_modify_ldev(ldev) except Exception as ex: - LOG.warning(_LW('Failed to discard zero page: %s'), ex) + LOG.warning('Failed to discard zero page: %s', ex) def comm_add_snapshot(self, pvol, svol): pool = self.conf.hitachi_thin_pool_id @@ -1316,7 +1315,7 @@ HORCM_CMD [basic_lib.PSUS], timeout, interval, check_svol=True) except Exception as ex: - LOG.warning(_LW('Failed to create pair: %s'), ex) + LOG.warning('Failed to create pair: %s', ex) try: self.comm_pairsplit(copy_group, ldev_name) @@ -1325,20 +1324,20 @@ HORCM_CMD [basic_lib.SMPL], timeout, self.conf.hitachi_async_copy_check_interval) except Exception as ex: - LOG.warning(_LW('Failed to create pair: %s'), ex) + LOG.warning('Failed to create pair: %s', ex) if self.is_smpl(copy_group, ldev_name): try: self.delete_pair_config(pvol, svol, copy_group, ldev_name) except Exception as ex: - LOG.warning(_LW('Failed to create pair: %s'), ex) + LOG.warning('Failed to create pair: %s', ex) if restart: try: self.restart_pair_horcm() except Exception as ex: - LOG.warning(_LW('Failed to restart horcm: %s'), ex) + LOG.warning('Failed to restart horcm: %s', ex) else: self.check_snap_count(pvol) @@ -1356,7 +1355,7 @@ HORCM_CMD pvol, svol, [basic_lib.SMPL], timeout, self.conf.hitachi_async_copy_check_interval) except Exception as ex: - LOG.warning(_LW('Failed to create pair: %s'), ex) + LOG.warning('Failed to create pair: %s', ex) def delete_pair(self, pvol, svol, is_vvol): timeout = basic_lib.DEFAULT_PROCESS_WAITTIME @@ -1395,7 +1394,7 @@ HORCM_CMD for opt in volume_opts: if not opt.secret: value = getattr(conf, opt.name) - LOG.info(_LI('\t%(name)-35s : %(value)s'), + LOG.info('\t%(name)-35s : %(value)s', {'name': opt.name, 'value': value}) def create_lock_file(self): diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index f33c15340d5..852ba16d459 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -25,7 +25,7 @@ from oslo_log import versionutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder import utils import cinder.volume.driver @@ -100,7 +100,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): for opt in volume_opts: if not opt.secret: value = getattr(self.configuration, opt.name) - LOG.info(_LI('\t%(name)-35s : %(value)s'), + LOG.info('\t%(name)-35s : %(value)s', {'name': opt.name, 'value': value}) def _delete_lun_iscsi(self, hostgroups, ldev): @@ -185,7 +185,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): {'port': port, 'gid': gid}) break if gid is None: - LOG.error(_LE('Failed to add target(port: %s)'), port) + LOG.error('Failed to add target(port: %s)', port) continue try: if added_hostgroup: diff --git a/cinder/volume/drivers/hitachi/hbsd_snm2.py b/cinder/volume/drivers/hitachi/hbsd_snm2.py index e2b4417ddde..e1b54ef0471 100644 --- a/cinder/volume/drivers/hitachi/hbsd_snm2.py +++ b/cinder/volume/drivers/hitachi/hbsd_snm2.py @@ -24,7 +24,6 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _LE, _LW from cinder import utils from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib @@ -73,7 +72,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib): raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if time.time() - start >= timeout: - LOG.error(_LE("snm2 command timeout.")) + LOG.error("snm2 command timeout.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) if (re.search('DMEC002047', stderr) @@ -87,7 +86,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib): or re.search('DMER0800CF', stderr) or re.search('DMER0800D[0-6D]', stderr) or re.search('DMES052602', stderr)): - LOG.error(_LE("Unexpected error occurs in snm2.")) + LOG.error("Unexpected error occurs in snm2.") raise loopingcall.LoopingCallDone((ret, stdout, stderr)) def exec_hsnm(self, command, args, printflag=True, noretry=False, @@ -142,8 +141,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib): used_list.append(int(line[2])) if int(line[3]) == ldev: hlu = int(line[2]) - LOG.warning(_LW('ldev(%(ldev)d) is already mapped ' - '(hlun: %(hlu)d)'), + LOG.warning('ldev(%(ldev)d) is already mapped ' + '(hlun: %(hlu)d)', {'ldev': ldev, 'hlu': hlu}) return hlu return None diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py index bae111bf11a..fa34b9b0f13 100644 --- a/cinder/volume/drivers/hitachi/hnas_backend.py +++ b/cinder/volume/drivers/hitachi/hnas_backend.py @@ -23,8 +23,8 @@ from oslo_log import log as logging from oslo_utils import units import six -from cinder.i18n import _, _LE from cinder import exception +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils @@ -256,7 +256,7 @@ class HNASSSHBackend(object): fs_list = self._get_fs_list() fs = fs_list.get(fs_label) if not fs: - LOG.error(_LE("Can't find file %(file)s in FS %(label)s"), + LOG.error("Can't find file %(file)s in FS %(label)s", {'file': src, 'label': fs_label}) msg = _('FS label: %(fs_label)s') % {'fs_label': fs_label} raise exception.InvalidParameterValue(err=msg) diff --git a/cinder/volume/drivers/hitachi/hnas_nfs.py b/cinder/volume/drivers/hitachi/hnas_nfs.py index 9a58c6ab712..683deb4c35c 100644 --- a/cinder/volume/drivers/hitachi/hnas_nfs.py +++ b/cinder/volume/drivers/hitachi/hnas_nfs.py @@ -28,7 +28,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils as cutils @@ -139,9 +139,9 @@ class HNASNFSDriver(nfs.NfsDriver): {'lbl': label, 'svc': svc['export']['fs']}) service = (svc['hdp'], svc['export']['path'], svc['export']['fs']) else: - LOG.info(_LI("Available services: %(svc)s"), + LOG.info("Available services: %(svc)s", {'svc': self.config['services'].keys()}) - LOG.error(_LE("No configuration found for service: %(lbl)s"), + LOG.error("No configuration found for service: %(lbl)s", {'lbl': label}) raise exception.ParameterNotFound(param=label) @@ -165,14 +165,14 @@ class HNASNFSDriver(nfs.NfsDriver): path = self._get_file_path(nfs_mount, volume.name) # Resize the image file on share to new size. - LOG.info(_LI("Checking file for resize.")) + LOG.info("Checking file for resize.") if not self._is_file_size_equal(path, new_size): - LOG.info(_LI("Resizing file to %(sz)sG"), {'sz': new_size}) + LOG.info("Resizing file to %(sz)sG", {'sz': new_size}) image_utils.resize_image(path, new_size) if self._is_file_size_equal(path, new_size): - LOG.info(_LI("LUN %(id)s extended to %(size)s GB."), + LOG.info("LUN %(id)s extended to %(size)s GB.", {'id': volume.id, 'size': new_size}) else: msg = _("Resizing image file failed.") @@ -201,8 +201,8 @@ class HNASNFSDriver(nfs.NfsDriver): snapshot_name = self._get_snapshot_name(snapshot) if self._file_not_present(nfs_mount, snapshot_name): - LOG.info(_LI("Creating volume %(vol)s from legacy " - "snapshot %(snap)s."), + LOG.info("Creating volume %(vol)s from legacy " + "snapshot %(snap)s.", {'vol': volume.name, 'snap': snapshot.name}) snapshot_name = snapshot.name @@ -321,7 +321,7 @@ class HNASNFSDriver(nfs.NfsDriver): 'thin_provisioning_support': thin support (True), } """ - LOG.info(_LI("Getting volume stats")) + LOG.info("Getting volume stats") _stats = super(HNASNFSDriver, self).get_volume_stats(refresh) _stats["vendor_name"] = 'Hitachi' @@ -349,18 +349,18 @@ class HNASNFSDriver(nfs.NfsDriver): def do_setup(self, context): """Perform internal driver setup.""" version_info = self.backend.get_version() - LOG.info(_LI("HNAS NFS driver.")) - LOG.info(_LI("HNAS model: %(mdl)s"), {'mdl': version_info['model']}) - LOG.info(_LI("HNAS version: %(ver)s"), + LOG.info("HNAS NFS driver.") + LOG.info("HNAS model: %(mdl)s", {'mdl': version_info['model']}) + LOG.info("HNAS version: %(ver)s", {'ver': version_info['version']}) - LOG.info(_LI("HNAS hardware: %(hw)s"), + LOG.info("HNAS hardware: %(hw)s", {'hw': version_info['hardware']}) - LOG.info(_LI("HNAS S/N: %(sn)s"), {'sn': version_info['serial']}) + LOG.info("HNAS S/N: %(sn)s", {'sn': version_info['serial']}) self.context = context self._load_shares_config( getattr(self.configuration, self.driver_prefix + '_shares_config')) - LOG.info(_LI("Review shares: %(shr)s"), {'shr': self.shares}) + LOG.info("Review shares: %(shr)s", {'shr': self.shares}) elist = self.backend.get_export_list() @@ -380,7 +380,7 @@ class HNASNFSDriver(nfs.NfsDriver): try: out, err = self._execute('showmount', '-e', server_ip) except processutils.ProcessExecutionError: - LOG.exception(_LE("NFS server %(srv)s not reachable!"), + LOG.exception("NFS server %(srv)s not reachable!", {'srv': server_ip}) raise @@ -391,8 +391,8 @@ class HNASNFSDriver(nfs.NfsDriver): if (len(export_list) < 1 or mountpoint_not_found or not export_configured): - LOG.error(_LE("Configured share %(share)s is not present" - "in %(srv)s."), + LOG.error("Configured share %(share)s is not present" + "in %(srv)s.", {'share': mountpoint, 'srv': server_ip}) msg = _('Section: %(svc_name)s') % {'svc_name': svc_name} raise exception.InvalidParameterValue(err=msg) @@ -411,7 +411,7 @@ class HNASNFSDriver(nfs.NfsDriver): self.pools.append(pool) LOG.debug("Configured pools: %(pool)s", {'pool': self.pools}) - LOG.info(_LI("HNAS NFS Driver loaded successfully.")) + LOG.info("HNAS NFS Driver loaded successfully.") def _clone_volume(self, src_vol, clone_name, src_name=None): """Clones mounted volume using the HNAS file_clone. @@ -428,8 +428,8 @@ class HNASNFSDriver(nfs.NfsDriver): src_name = src_vol.name # volume-ID snapshot-ID, /cinder - LOG.info(_LI("Cloning with volume_name %(vname)s, clone_name %(cname)s" - " ,export_path %(epath)s"), + LOG.info("Cloning with volume_name %(vname)s, clone_name %(cname)s" + " ,export_path %(epath)s", {'vname': src_name, 'cname': clone_name, 'epath': src_vol.provider_location}) @@ -453,7 +453,7 @@ class HNASNFSDriver(nfs.NfsDriver): volume.provider_location = fs_id - LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"), + LOG.info("Volume service: %(label)s. Casted to: %(loc)s", {'label': fslabel, 'loc': volume.provider_location}) self._do_create_volume(volume) @@ -478,7 +478,7 @@ class HNASNFSDriver(nfs.NfsDriver): try: vol_ref_share_ip = cutils.resolve_hostname(share_split[0]) except socket.gaierror as e: - LOG.exception(_LE('Invalid hostname %(host)s'), + LOG.exception('Invalid hostname %(host)s', {'host': share_split[0]}) LOG.debug('error: %(err)s', {'err': e.strerror}) raise @@ -563,8 +563,8 @@ class HNASNFSDriver(nfs.NfsDriver): (nfs_share, nfs_mount, vol_name ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - LOG.info(_LI("Asked to manage NFS volume %(vol)s, " - "with vol ref %(ref)s."), + LOG.info("Asked to manage NFS volume %(vol)s, " + "with vol ref %(ref)s.", {'vol': volume.id, 'ref': existing_vol_ref['source-name']}) @@ -676,13 +676,13 @@ class HNASNFSDriver(nfs.NfsDriver): self._try_execute("mv", vol_path, new_path, run_as_root=False, check_exit_code=True) - LOG.info(_LI("The volume with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted " - "and can be found in the new path %(cr)s."), + LOG.info("The volume with path %(old)s is no longer being " + "managed by Cinder. However, it was not deleted " + "and can be found in the new path %(cr)s.", {'old': vol_path, 'cr': new_path}) except (OSError, ValueError): - LOG.exception(_LE("The NFS Volume %(cr)s does not exist."), + LOG.exception("The NFS Volume %(cr)s does not exist.", {'cr': new_path}) def _get_file_size(self, file_path): @@ -746,8 +746,8 @@ class HNASNFSDriver(nfs.NfsDriver): (nfs_share, nfs_mount, src_snapshot_name ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) - LOG.info(_LI("Asked to manage NFS snapshot %(snap)s for volume " - "%(vol)s, with vol ref %(ref)s."), + LOG.info("Asked to manage NFS snapshot %(snap)s for volume " + "%(vol)s, with vol ref %(ref)s.", {'snap': snapshot.id, 'vol': snapshot.volume_id, 'ref': existing_ref['source-name']}) @@ -776,8 +776,8 @@ class HNASNFSDriver(nfs.NfsDriver): try: self._try_execute("mv", src_snap, dst_snap, run_as_root=False, check_exit_code=True) - LOG.info(_LI("Setting newly managed Cinder snapshot name " - "to %(snap)s."), {'snap': snapshot_name}) + LOG.info("Setting newly managed Cinder snapshot name " + "to %(snap)s.", {'snap': snapshot_name}) self._set_rw_permissions_for_all(dst_snap) except (OSError, processutils.ProcessExecutionError) as err: msg = (_("Failed to manage existing snapshot " @@ -806,7 +806,7 @@ class HNASNFSDriver(nfs.NfsDriver): snapshot_name = self._get_snapshot_name(snapshot) if self._file_not_present(snapshot.provider_location, snapshot_name): - LOG.info(_LI("Unmanaging legacy snapshot %(snap)s."), + LOG.info("Unmanaging legacy snapshot %(snap)s.", {'snap': snapshot.name}) snapshot_name = snapshot.name @@ -818,13 +818,13 @@ class HNASNFSDriver(nfs.NfsDriver): try: self._execute("mv", old_path, new_path, run_as_root=False, check_exit_code=True) - LOG.info(_LI("The snapshot with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted and " - "can be found in the new path %(cr)s."), + LOG.info("The snapshot with path %(old)s is no longer being " + "managed by Cinder. However, it was not deleted and " + "can be found in the new path %(cr)s.", {'old': old_path, 'cr': new_path}) except (OSError, ValueError): - LOG.exception(_LE("The NFS snapshot %(old)s does not exist."), + LOG.exception("The NFS snapshot %(old)s does not exist.", {'old': old_path}) def _get_volumes_from_export(self, export_path): @@ -971,8 +971,8 @@ class HNASNFSDriver(nfs.NfsDriver): origin = utils.extract_id_from_volume_name(origin) rsrc_inf['source_reference'] = {'id': origin} else: - LOG.warning(_LW("Could not determine the volume " - "that owns the snapshot %(snap)s"), + LOG.warning("Could not determine the volume " + "that owns the snapshot %(snap)s", {'snap': resource}) rsrc_inf['source_reference'] = {'id': 'unknown'} rsrc_inf['extra_info'] = ('Could not determine ' diff --git a/cinder/volume/drivers/hitachi/hnas_utils.py b/cinder/volume/drivers/hitachi/hnas_utils.py index 72d051e41b7..f18346211be 100644 --- a/cinder/volume/drivers/hitachi/hnas_utils.py +++ b/cinder/volume/drivers/hitachi/hnas_utils.py @@ -26,7 +26,7 @@ import six from xml.etree import ElementTree as ETree from cinder import exception -from cinder.i18n import _, _LW, _LE +from cinder.i18n import _ from cinder.volume import volume_types LOG = logging.getLogger(__name__) @@ -147,7 +147,7 @@ def _xml_read(root, element, check=None): # mandatory parameter not found if val is None and check: - LOG.error(_LE("Mandatory parameter not found: %(p)s"), {'p': element}) + LOG.error("Mandatory parameter not found: %(p)s", {'p': element}) raise exception.ParameterNotFound(param=element) # tag not found @@ -159,7 +159,7 @@ def _xml_read(root, element, check=None): if not val.strip(): if svc_tag_pattern.search(element): return "" - LOG.error(_LE("Parameter not found: %(param)s"), {'param': element}) + LOG.error("Parameter not found: %(param)s", {'param': element}) raise exception.ParameterNotFound(param=element) LOG.debug("%(element)s: %(val)s", @@ -185,11 +185,11 @@ def read_xml_config(xml_config_file, svc_params, optional_params): LOG.error(msg) raise exception.ConfigNotFound(message=msg) else: - LOG.warning(_LW("This XML configuration file %(xml)s is deprecated. " - "Please, move all the configurations to the " - "cinder.conf file. If you keep both configuration " - "files, the options set on cinder.conf will be " - "used."), {'xml': xml_config_file}) + LOG.warning("This XML configuration file %(xml)s is deprecated. " + "Please, move all the configurations to the " + "cinder.conf file. If you keep both configuration " + "files, the options set on cinder.conf will be " + "used.", {'xml': xml_config_file}) try: root = ETree.parse(xml_config_file).getroot() @@ -245,7 +245,7 @@ def read_xml_config(xml_config_file, svc_params, optional_params): # at least one service required! if not config['services'].keys(): - LOG.error(_LE("No service found in xml config file")) + LOG.error("No service found in xml config file") raise exception.ParameterNotFound(param="svc_0") return config diff --git a/cinder/volume/drivers/hitachi/vsp_utils.py b/cinder/volume/drivers/hitachi/vsp_utils.py index 5978b499256..93c887a858a 100644 --- a/cinder/volume/drivers/hitachi/vsp_utils.py +++ b/cinder/volume/drivers/hitachi/vsp_utils.py @@ -32,9 +32,6 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _LE -from cinder.i18n import _LI -from cinder.i18n import _LW from cinder import utils as cinder_utils @@ -75,410 +72,410 @@ class VSPMsg(enum.Enum): METHOD_START = { 'msg_id': 0, 'loglevel': base_logging.INFO, - 'msg': _LI('%(method)s starts. (config_group: %(config_group)s)'), + 'msg': '%(method)s starts. (config_group: %(config_group)s)', 'suffix': INFO_SUFFIX } OUTPUT_PARAMETER_VALUES = { 'msg_id': 1, 'loglevel': base_logging.INFO, - 'msg': _LI('The parameter of the storage backend. (config_group: ' - '%(config_group)s)'), + 'msg': 'The parameter of the storage backend. (config_group: ' + '%(config_group)s)', 'suffix': INFO_SUFFIX } METHOD_END = { 'msg_id': 2, 'loglevel': base_logging.INFO, - 'msg': _LI('%(method)s ended. (config_group: %(config_group)s)'), + 'msg': '%(method)s ended. (config_group: %(config_group)s)', 'suffix': INFO_SUFFIX } DRIVER_READY_FOR_USE = { 'msg_id': 3, 'loglevel': base_logging.INFO, - 'msg': _LI('The storage backend can be used. (config_group: ' - '%(config_group)s)'), + 'msg': 'The storage backend can be used. (config_group: ' + '%(config_group)s)', 'suffix': INFO_SUFFIX } DRIVER_INITIALIZATION_START = { 'msg_id': 4, 'loglevel': base_logging.INFO, - 'msg': _LI('Initialization of %(driver)s %(version)s started.'), + 'msg': 'Initialization of %(driver)s %(version)s started.', 'suffix': INFO_SUFFIX } SET_CONFIG_VALUE = { 'msg_id': 5, 'loglevel': base_logging.INFO, - 'msg': _LI('Set %(object)s to %(value)s.'), + 'msg': 'Set %(object)s to %(value)s.', 'suffix': INFO_SUFFIX } OBJECT_CREATED = { 'msg_id': 6, 'loglevel': base_logging.INFO, - 'msg': _LI('Created %(object)s. (%(details)s)'), + 'msg': 'Created %(object)s. (%(details)s)', 'suffix': INFO_SUFFIX } INVALID_LDEV_FOR_UNMAPPING = { 'msg_id': 302, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to specify a logical device for the volume ' - '%(volume_id)s to be unmapped.'), + 'msg': 'Failed to specify a logical device for the volume ' + '%(volume_id)s to be unmapped.', 'suffix': WARNING_SUFFIX } INVALID_LDEV_FOR_DELETION = { 'msg_id': 304, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to specify a logical device to be deleted. ' - '(method: %(method)s, id: %(id)s)'), + 'msg': 'Failed to specify a logical device to be deleted. ' + '(method: %(method)s, id: %(id)s)', 'suffix': WARNING_SUFFIX } DELETE_TARGET_FAILED = { 'msg_id': 306, 'loglevel': base_logging.WARNING, - 'msg': _LW('A host group or an iSCSI target could not be deleted. ' - '(port: %(port)s, gid: %(id)s)'), + 'msg': 'A host group or an iSCSI target could not be deleted. ' + '(port: %(port)s, gid: %(id)s)', 'suffix': WARNING_SUFFIX } CREATE_HOST_GROUP_FAILED = { 'msg_id': 308, 'loglevel': base_logging.WARNING, - 'msg': _LW('A host group could not be added. (port: %(port)s)'), + 'msg': 'A host group could not be added. (port: %(port)s)', 'suffix': WARNING_SUFFIX } CREATE_ISCSI_TARGET_FAILED = { 'msg_id': 309, 'loglevel': base_logging.WARNING, - 'msg': _LW('An iSCSI target could not be added. (port: %(port)s)'), + 'msg': 'An iSCSI target could not be added. (port: %(port)s)', 'suffix': WARNING_SUFFIX } UNMAP_LDEV_FAILED = { 'msg_id': 310, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to unmap a logical device. (LDEV: %(ldev)s)'), + 'msg': 'Failed to unmap a logical device. (LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX } DELETE_LDEV_FAILED = { 'msg_id': 313, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to delete a logical device. (LDEV: %(ldev)s)'), + 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX } MAP_LDEV_FAILED = { 'msg_id': 314, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to map a logical device. (LDEV: %(ldev)s, port: ' - '%(port)s, id: %(id)s, lun: %(lun)s)'), + 'msg': 'Failed to map a logical device. (LDEV: %(ldev)s, port: ' + '%(port)s, id: %(id)s, lun: %(lun)s)', 'suffix': WARNING_SUFFIX } DISCARD_ZERO_PAGE_FAILED = { 'msg_id': 315, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to perform a zero-page reclamation. (LDEV: ' - '%(ldev)s)'), + 'msg': 'Failed to perform a zero-page reclamation. (LDEV: ' + '%(ldev)s)', 'suffix': WARNING_SUFFIX } ADD_HBA_WWN_FAILED = { 'msg_id': 317, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' - 'wwn: %(wwn)s)'), + 'msg': 'Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' + 'wwn: %(wwn)s)', 'suffix': WARNING_SUFFIX } LDEV_NOT_EXIST = { 'msg_id': 319, 'loglevel': base_logging.WARNING, - 'msg': _LW('The logical device does not exist in the storage system. ' - '(LDEV: %(ldev)s)'), + 'msg': 'The logical device does not exist in the storage system. ' + '(LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX } HORCM_START_FAILED = { 'msg_id': 320, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to start HORCM. (inst: %(inst)s)'), + 'msg': 'Failed to start HORCM. (inst: %(inst)s)', 'suffix': WARNING_SUFFIX } HORCM_RESTART_FOR_SI_FAILED = { 'msg_id': 322, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to reload the configuration of full copy pair. ' - '(inst: %(inst)s)'), + 'msg': 'Failed to reload the configuration of full copy pair. ' + '(inst: %(inst)s)', 'suffix': WARNING_SUFFIX } HORCM_LOGIN_FAILED = { 'msg_id': 323, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to perform user authentication of HORCM. ' - '(user: %(user)s)'), + 'msg': 'Failed to perform user authentication of HORCM. ' + '(user: %(user)s)', 'suffix': WARNING_SUFFIX } DELETE_SI_PAIR_FAILED = { 'msg_id': 324, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)'), + 'msg': 'Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)', 'suffix': WARNING_SUFFIX } DELETE_TI_PAIR_FAILED = { 'msg_id': 325, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)'), + 'msg': 'Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)', 'suffix': WARNING_SUFFIX } WAIT_SI_PAIR_STATUS_FAILED = { 'msg_id': 326, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to change the status of full copy pair. (P-VOL: ' - '%(pvol)s, S-VOL: %(svol)s)'), + 'msg': 'Failed to change the status of full copy pair. (P-VOL: ' + '%(pvol)s, S-VOL: %(svol)s)', 'suffix': WARNING_SUFFIX } DELETE_DEVICE_GRP_FAILED = { 'msg_id': 327, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to delete the configuration of full copy pair. ' - '(P-VOL: %(pvol)s, S-VOL: %(svol)s)'), + 'msg': 'Failed to delete the configuration of full copy pair. ' + '(P-VOL: %(pvol)s, S-VOL: %(svol)s)', 'suffix': WARNING_SUFFIX } DISCONNECT_VOLUME_FAILED = { 'msg_id': 329, 'loglevel': base_logging.WARNING, - 'msg': _LW('Failed to detach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), + 'msg': 'Failed to detach the logical device. (LDEV: %(ldev)s, ' + 'reason: %(reason)s)', 'suffix': WARNING_SUFFIX } STORAGE_COMMAND_FAILED = { 'msg_id': 600, 'loglevel': base_logging.ERROR, - 'msg': _LE('The command %(cmd)s failed. (ret: %(ret)s, stdout: ' - '%(out)s, stderr: %(err)s)'), + 'msg': 'The command %(cmd)s failed. (ret: %(ret)s, stdout: ' + '%(out)s, stderr: %(err)s)', 'suffix': ERROR_SUFFIX } INVALID_PARAMETER = { 'msg_id': 601, 'loglevel': base_logging.ERROR, - 'msg': _LE('A parameter is invalid. (%(param)s)'), + 'msg': 'A parameter is invalid. (%(param)s)', 'suffix': ERROR_SUFFIX } INVALID_PARAMETER_VALUE = { 'msg_id': 602, 'loglevel': base_logging.ERROR, - 'msg': _LE('A parameter value is invalid. (%(meta)s)'), + 'msg': 'A parameter value is invalid. (%(meta)s)', 'suffix': ERROR_SUFFIX } HORCM_SHUTDOWN_FAILED = { 'msg_id': 608, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to shutdown HORCM. (inst: %(inst)s)'), + 'msg': 'Failed to shutdown HORCM. (inst: %(inst)s)', 'suffix': ERROR_SUFFIX } HORCM_RESTART_FAILED = { 'msg_id': 609, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to restart HORCM. (inst: %(inst)s)'), + 'msg': 'Failed to restart HORCM. (inst: %(inst)s)', 'suffix': ERROR_SUFFIX } SI_PAIR_STATUS_WAIT_TIMEOUT = { 'msg_id': 610, 'loglevel': base_logging.ERROR, - 'msg': _LE('The status change of full copy pair could not be ' - 'completed. (S-VOL: %(svol)s)'), + 'msg': 'The status change of full copy pair could not be ' + 'completed. (S-VOL: %(svol)s)', 'suffix': ERROR_SUFFIX } TI_PAIR_STATUS_WAIT_TIMEOUT = { 'msg_id': 611, 'loglevel': base_logging.ERROR, - 'msg': _LE('The status change of thin copy pair could not be ' - 'completed. (S-VOL: %(svol)s)'), + 'msg': 'The status change of thin copy pair could not be ' + 'completed. (S-VOL: %(svol)s)', 'suffix': ERROR_SUFFIX } INVALID_LDEV_STATUS_FOR_COPY = { 'msg_id': 612, 'loglevel': base_logging.ERROR, - 'msg': _LE('The source logical device to be replicated does not exist ' - 'in the storage system. (LDEV: %(ldev)s)'), + 'msg': 'The source logical device to be replicated does not exist ' + 'in the storage system. (LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } INVALID_LDEV_FOR_EXTENSION = { 'msg_id': 613, 'loglevel': base_logging.ERROR, - 'msg': _LE('The volume %(volume_id)s to be extended was not found.'), + 'msg': 'The volume %(volume_id)s to be extended was not found.', 'suffix': ERROR_SUFFIX } NO_HBA_WWN_ADDED_TO_HOST_GRP = { 'msg_id': 614, 'loglevel': base_logging.ERROR, - 'msg': _LE('No WWN is assigned. (port: %(port)s, gid: %(gid)s)'), + 'msg': 'No WWN is assigned. (port: %(port)s, gid: %(gid)s)', 'suffix': ERROR_SUFFIX } NO_AVAILABLE_MIRROR_UNIT = { 'msg_id': 615, 'loglevel': base_logging.ERROR, - 'msg': _LE('A pair could not be created. The maximum number of pair ' - 'is exceeded. (copy method: %(copy_method)s, P-VOL: ' - '%(pvol)s)'), + 'msg': 'A pair could not be created. The maximum number of pair ' + 'is exceeded. (copy method: %(copy_method)s, P-VOL: ' + '%(pvol)s)', 'suffix': ERROR_SUFFIX } UNABLE_TO_DELETE_PAIR = { 'msg_id': 616, 'loglevel': base_logging.ERROR, - 'msg': _LE('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)'), + 'msg': 'A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: ' + '%(svol)s)', 'suffix': ERROR_SUFFIX } INVALID_VOLUME_SIZE_FOR_COPY = { 'msg_id': 617, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to or greater than the size ' - 'of the original %(type)s. (new volume: %(volume_id)s)'), + 'msg': 'Failed to create a volume from a %(type)s. The size of ' + 'the new volume must be equal to or greater than the size ' + 'of the original %(type)s. (new volume: %(volume_id)s)', 'suffix': ERROR_SUFFIX } INVALID_VOLUME_TYPE_FOR_EXTEND = { 'msg_id': 618, 'loglevel': base_logging.ERROR, - 'msg': _LE('The volume %(volume_id)s could not be extended. The ' - 'volume type must be Normal.'), + 'msg': 'The volume %(volume_id)s could not be extended. The ' + 'volume type must be Normal.', 'suffix': ERROR_SUFFIX } INVALID_LDEV_FOR_CONNECTION = { 'msg_id': 619, 'loglevel': base_logging.ERROR, - 'msg': _LE('The volume %(volume_id)s to be mapped was not found.'), + 'msg': 'The volume %(volume_id)s to be mapped was not found.', 'suffix': ERROR_SUFFIX } POOL_INFO_RETRIEVAL_FAILED = { 'msg_id': 620, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to provide information about a pool. (pool: ' - '%(pool)s)'), + 'msg': 'Failed to provide information about a pool. (pool: ' + '%(pool)s)', 'suffix': ERROR_SUFFIX } INVALID_VOLUME_SIZE_FOR_TI = { 'msg_id': 621, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to the size of the original ' - '%(type)s when the new volume is created by ' - '%(copy_method)s. (new volume: %(volume_id)s)'), + 'msg': 'Failed to create a volume from a %(type)s. The size of ' + 'the new volume must be equal to the size of the original ' + '%(type)s when the new volume is created by ' + '%(copy_method)s. (new volume: %(volume_id)s)', 'suffix': ERROR_SUFFIX } INVALID_LDEV_FOR_VOLUME_COPY = { 'msg_id': 624, 'loglevel': base_logging.ERROR, - 'msg': _LE('The %(type)s %(id)s source to be replicated was not ' - 'found.'), + 'msg': 'The %(type)s %(id)s source to be replicated was not ' + 'found.', 'suffix': ERROR_SUFFIX } CREATE_HORCM_CONF_FILE_FAILED = { 'msg_id': 632, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to open a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)'), + 'msg': 'Failed to open a file. (file: %(file)s, ret: %(ret)s, ' + 'stderr: %(err)s)', 'suffix': ERROR_SUFFIX } CONNECT_VOLUME_FAILED = { 'msg_id': 634, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to attach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), + 'msg': 'Failed to attach the logical device. (LDEV: %(ldev)s, ' + 'reason: %(reason)s)', 'suffix': ERROR_SUFFIX } CREATE_LDEV_FAILED = { 'msg_id': 636, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to add the logical device.'), + 'msg': 'Failed to add the logical device.', 'suffix': ERROR_SUFFIX } ADD_PAIR_TARGET_FAILED = { 'msg_id': 638, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to add the pair target.'), + 'msg': 'Failed to add the pair target.', 'suffix': ERROR_SUFFIX } NO_MAPPING_FOR_LDEV = { 'msg_id': 639, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to map a logical device to any pair targets. ' - '(LDEV: %(ldev)s)'), + 'msg': 'Failed to map a logical device to any pair targets. ' + '(LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } POOL_NOT_FOUND = { 'msg_id': 640, 'loglevel': base_logging.ERROR, - 'msg': _LE('A pool could not be found. (pool: %(pool)s)'), + 'msg': 'A pool could not be found. (pool: %(pool)s)', 'suffix': ERROR_SUFFIX } NO_AVAILABLE_RESOURCE = { 'msg_id': 648, 'loglevel': base_logging.ERROR, - 'msg': _LE('There are no resources available for use. (resource: ' - '%(resource)s)'), + 'msg': 'There are no resources available for use. (resource: ' + '%(resource)s)', 'suffix': ERROR_SUFFIX } NO_CONNECTED_TARGET = { 'msg_id': 649, 'loglevel': base_logging.ERROR, - 'msg': _LE('The host group or iSCSI target was not found.'), + 'msg': 'The host group or iSCSI target was not found.', 'suffix': ERROR_SUFFIX } RESOURCE_NOT_FOUND = { 'msg_id': 650, 'loglevel': base_logging.ERROR, - 'msg': _LE('The resource %(resource)s was not found.'), + 'msg': 'The resource %(resource)s was not found.', 'suffix': ERROR_SUFFIX } LDEV_DELETION_WAIT_TIMEOUT = { 'msg_id': 652, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to delete a logical device. (LDEV: %(ldev)s)'), + 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } LDEV_CREATION_WAIT_TIMEOUT = { 'msg_id': 653, 'loglevel': base_logging.ERROR, - 'msg': _LE('The creation of a logical device could not be completed. ' - '(LDEV: %(ldev)s)'), + 'msg': 'The creation of a logical device could not be completed. ' + '(LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } INVALID_LDEV_ATTR_FOR_MANAGE = { 'msg_id': 702, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must be an unpaired %(ldevtype)s.'), + 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'must be an unpaired %(ldevtype)s.', 'suffix': ERROR_SUFFIX } INVALID_LDEV_SIZE_FOR_MANAGE = { 'msg_id': 703, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'size must be expressed in gigabytes.'), + 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'size must be expressed in gigabytes.', 'suffix': ERROR_SUFFIX } INVALID_LDEV_PORT_FOR_MANAGE = { 'msg_id': 704, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must not be mapped.'), + 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' + 'must not be mapped.', 'suffix': ERROR_SUFFIX } INVALID_LDEV_TYPE_FOR_UNMANAGE = { 'msg_id': 706, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to unmanage the volume %(volume_id)s. The volume ' - 'type must be %(volume_type)s.'), + 'msg': 'Failed to unmanage the volume %(volume_id)s. The volume ' + 'type must be %(volume_type)s.', 'suffix': ERROR_SUFFIX } INVALID_LDEV_FOR_MANAGE = { 'msg_id': 707, 'loglevel': base_logging.ERROR, - 'msg': _LE('No valid value is specified for "source-id". A valid LDEV ' - 'number must be specified in "source-id" to manage the ' - 'volume.'), + 'msg': 'No valid value is specified for "source-id". A valid LDEV ' + 'number must be specified in "source-id" to manage the ' + 'volume.', 'suffix': ERROR_SUFFIX } VOLUME_COPY_FAILED = { 'msg_id': 722, 'loglevel': base_logging.ERROR, - 'msg': _LE('Failed to copy a volume. (copy method: %(copy_method)s, ' - 'P-VOL: %(pvol)s, S-VOL: %(svol)s)'), + 'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, ' + 'P-VOL: %(pvol)s, S-VOL: %(svol)s)', 'suffix': ERROR_SUFFIX } @@ -615,14 +612,14 @@ def check_opt_value(conf, names): def output_storage_cli_info(name, version): """Output storage CLI info to the log file.""" - LOG.info(_LI('\t%(name)-35s%(version)s'), + LOG.info('\t%(name)-35s%(version)s', {'name': name + ' version: ', 'version': version}) def output_opt_info(conf, names): """Output parameter names and values to the log file.""" for name in names: - LOG.info(_LI('\t%(name)-35s%(attr)s'), + LOG.info('\t%(name)-35s%(attr)s', {'name': name + ': ', 'attr': getattr(conf, name)}) diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index fb935f1cfa2..ec5045d898a 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -60,7 +60,7 @@ from oslo_utils import units from cinder import context from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.objects import fields from cinder.volume import qos_specs from cinder.volume import utils as volume_utils @@ -434,13 +434,12 @@ class HPE3PARCommon(object): if self._replication_enabled and ( self.API_VERSION < REMOTE_COPY_API_VERSION): self._replication_enabled = False - msg = (_LE("The primary array must have an API version of " - "%(min_ver)s or higher, but is only on " - "%(current_ver)s, therefore replication is not " - "supported.") % - {'min_ver': REMOTE_COPY_API_VERSION, - 'current_ver': self.API_VERSION}) - LOG.error(msg) + LOG.error("The primary array must have an API version of " + "%(min_ver)s or higher, but is only on " + "%(current_ver)s, therefore replication is not " + "supported.", + {'min_ver': REMOTE_COPY_API_VERSION, + 'current_ver': self.API_VERSION}) except hpeexceptions.UnsupportedVersion as ex: # In the event we cannot contact the configured primary array, # we want to allow a failover if replication is enabled. @@ -451,17 +450,17 @@ class HPE3PARCommon(object): if context: # The context is None except at driver startup. - LOG.info(_LI("HPE3PARCommon %(common_ver)s," - "hpe3parclient %(rest_ver)s"), + LOG.info("HPE3PARCommon %(common_ver)s," + "hpe3parclient %(rest_ver)s", {"common_ver": self.VERSION, "rest_ver": hpe3parclient.get_version_string()}) if self.config.hpe3par_debug: self.client.debug_rest(True) if self.API_VERSION < SRSTATLD_API_VERSION: # Firmware version not compatible with srstatld - LOG.warning(_LW("srstatld requires " - "WSAPI version '%(srstatld_version)s' " - "version '%(version)s' is installed.") % + LOG.warning("srstatld requires " + "WSAPI version '%(srstatld_version)s' " + "version '%(version)s' is installed.", {'srstatld_version': SRSTATLD_API_VERSION, 'version': self.API_VERSION}) @@ -585,13 +584,11 @@ class HPE3PARCommon(object): cg_name = self._get_3par_vvs_name(group.id) self.client.deleteVolumeSet(cg_name) except hpeexceptions.HTTPNotFound: - err = (_LW("Virtual Volume Set '%s' doesn't exist on array.") % - cg_name) - LOG.warning(err) + LOG.warning("Virtual Volume Set '%s' doesn't exist on array.", + cg_name) except hpeexceptions.HTTPConflict as e: - err = (_LE("Conflict detected in Virtual Volume Set" - " %(volume_set)s: %(error)s")) - LOG.error(err, + LOG.error("Conflict detected in Virtual Volume Set" + " %(volume_set)s: %(error)s", {"volume_set": cg_name, "error": e}) @@ -602,10 +599,10 @@ class HPE3PARCommon(object): self.delete_volume(volume) volume_update['status'] = 'deleted' except Exception as ex: - LOG.error(_LE("There was an error deleting volume %(id)s: " - "%(error)s."), + LOG.error("There was an error deleting volume %(id)s: " + "%(error)s.", {'id': volume.id, - 'error': six.text_type(ex)}) + 'error': ex}) volume_update['status'] = 'error' volume_model_updates.append(volume_update) @@ -623,7 +620,7 @@ class HPE3PARCommon(object): try: self.client.addVolumeToVolumeSet(volume_set_name, volume_name) except hpeexceptions.HTTPNotFound: - msg = (_LE('Virtual Volume Set %s does not exist.') % + msg = (_('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) @@ -634,7 +631,7 @@ class HPE3PARCommon(object): self.client.removeVolumeFromVolumeSet( volume_set_name, volume_name) except hpeexceptions.HTTPNotFound: - msg = (_LE('Virtual Volume Set %s does not exist.') % + msg = (_('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) @@ -697,13 +694,13 @@ class HPE3PARCommon(object): except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. - LOG.warning(_LW("Delete Snapshot id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s"), + LOG.warning("Delete Snapshot id not found. Removing from " + "cinder: %(id)s Ex: %(msg)s", {'id': snapshot['id'], 'msg': ex}) snapshot_update['status'] = fields.SnapshotStatus.ERROR except Exception as ex: - LOG.error(_LE("There was an error deleting snapshot %(id)s: " - "%(error)s."), + LOG.error("There was an error deleting snapshot %(id)s: " + "%(error)s.", {'id': snapshot['id'], 'error': six.text_type(ex)}) snapshot_update['status'] = fields.SnapshotStatus.ERROR @@ -771,35 +768,34 @@ class HPE3PARCommon(object): # Ensure that snapCPG is set if 'snapCPG' not in vol: new_vals['snapCPG'] = vol['userCPG'] - LOG.info(_LI("Virtual volume %(disp)s '%(new)s' snapCPG " - "is empty so it will be set to: %(cpg)s"), + LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG " + "is empty so it will be set to: %(cpg)s", {'disp': display_name, 'new': new_vol_name, 'cpg': new_vals['snapCPG']}) # Update the existing volume with the new name and comments. self.client.modifyVolume(target_vol_name, new_vals) - LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."), + LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_vol_name}) retyped = False model_update = None if volume_type: - LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " - "being retyped."), + LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.", {'disp': display_name, 'new': new_vol_name}) try: retyped, model_update = self._retype_from_no_type(volume, volume_type) - LOG.info(_LI("Virtual volume %(disp)s successfully retyped to " - "%(new_type)s."), + LOG.info("Virtual volume %(disp)s successfully retyped to " + "%(new_type)s.", {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: with excutils.save_and_reraise_exception(): - LOG.warning(_LW("Failed to manage virtual volume %(disp)s " - "due to error during retype."), + LOG.warning("Failed to manage virtual volume %(disp)s " + "due to error during retype.", {'disp': display_name}) # Try to undo the rename and clear the new comment. self.client.modifyVolume( @@ -811,8 +807,7 @@ class HPE3PARCommon(object): if retyped and model_update: updates.update(model_update) - LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " - "now being managed."), + LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and @@ -884,12 +879,12 @@ class HPE3PARCommon(object): # Update the existing snapshot with the new name and comments. self.client.modifyVolume(target_snap_name, new_vals) - LOG.info(_LI("Snapshot '%(ref)s' renamed to '%(new)s'."), + LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_snap_name}) updates = {'display_name': display_name} - LOG.info(_LI("Snapshot %(disp)s '%(new)s' is now being managed."), + LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_snap_name}) # Return display name to update the name displayed in the GUI. @@ -956,8 +951,8 @@ class HPE3PARCommon(object): new_vol_name = self._get_3par_unm_name(volume['id']) self.client.modifyVolume(vol_name, {'newName': new_vol_name}) - LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. " - "Volume renamed to '%(new)s'."), + LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. " + "Volume renamed to '%(new)s'.", {'disp': volume['display_name'], 'vol': vol_name, 'new': new_vol_name}) @@ -982,8 +977,8 @@ class HPE3PARCommon(object): new_snap_name = self._get_3par_ums_name(snapshot['id']) self.client.modifyVolume(snap_name, {'newName': new_snap_name}) - LOG.info(_LI("Snapshot %(disp)s '%(vol)s' is no longer managed. " - "Snapshot renamed to '%(new)s'."), + LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. " + "Snapshot renamed to '%(new)s'.", {'disp': snapshot['display_name'], 'vol': snap_name, 'new': new_snap_name}) @@ -1049,8 +1044,8 @@ class HPE3PARCommon(object): growth_size_mib, _convert_to_base=True) else: - LOG.error(_LE("Error extending volume: %(vol)s. " - "Exception: %(ex)s"), + LOG.error("Error extending volume: %(vol)s. " + "Exception: %(ex)s", {'vol': volume_name, 'ex': ex}) return model_update @@ -1280,9 +1275,9 @@ class HPE3PARCommon(object): interval, history) except Exception as ex: - LOG.warning(_LW("Exception at getCPGStatData() " - "for cpg: '%(cpg_name)s' " - "Reason: '%(reason)s'") % + LOG.warning("Exception at getCPGStatData() " + "for cpg: '%(cpg_name)s' " + "Reason: '%(reason)s'", {'cpg_name': cpg_name, 'reason': ex}) if 'numTDVVs' in cpg: total_volumes = int( @@ -1401,7 +1396,7 @@ class HPE3PARCommon(object): break if found_vlun is None: - LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"), + LOG.info("3PAR vlun %(name)s not found on host %(host)s", {'name': volume_name, 'host': hostname}) return found_vlun @@ -1435,10 +1430,8 @@ class HPE3PARCommon(object): volume_vluns.append(vlun) if not volume_vluns: - msg = ( - _LW("3PAR vlun for volume %(name)s not found on " - "host %(host)s"), {'name': volume_name, 'host': hostname}) - LOG.warning(msg) + LOG.warning("3PAR vlun for volume %(name)s not found on host " + "%(host)s", {'name': volume_name, 'host': hostname}) return # VLUN Type of MATCHED_SET 4 requires the port to be provided @@ -1484,9 +1477,9 @@ class HPE3PARCommon(object): # for future needs (e.g. export volume to host set). # The log info explains why the host was left alone. - LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, " - "but the host '%(host)s' was not deleted " - "because: %(reason)s"), + LOG.info("3PAR vlun for volume '%(name)s' was deleted, " + "but the host '%(host)s' was not deleted " + "because: %(reason)s", {'name': volume_name, 'host': hostname, 'reason': ex.get_description()}) @@ -1573,7 +1566,7 @@ class HPE3PARCommon(object): self.client.createQoSRules(vvs_name, qosRule) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error creating QOS rule %s"), qosRule) + LOG.error("Error creating QOS rule %s", qosRule) def get_flash_cache_policy(self, hpe3par_keys): if hpe3par_keys is not None: @@ -1603,11 +1596,11 @@ class HPE3PARCommon(object): try: self.client.modifyVolumeSet(vvs_name, flashCachePolicy=flash_cache) - LOG.info(_LI("Flash Cache policy set to %s"), flash_cache) + LOG.info("Flash Cache policy set to %s", flash_cache) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error setting Flash Cache policy " - "to %s - exception"), flash_cache) + LOG.error("Error setting Flash Cache policy " + "to %s - exception", flash_cache) def _add_volume_to_volume_set(self, volume, volume_name, cpg, vvs_name, qos, flash_cache): @@ -1731,13 +1724,13 @@ class HPE3PARCommon(object): # The cpg was specified in a volume type extra spec so it # needs to be validated that it's in the correct domain. # log warning here - msg = _LW("'hpe3par:cpg' is not supported as an extra spec " - "in a volume type. CPG's are chosen by " - "the cinder scheduler, as a pool, from the " - "cinder.conf entry 'hpe3par_cpg', which can " - "be a list of CPGs.") + msg = ("'hpe3par:cpg' is not supported as an extra spec " + "in a volume type. CPG's are chosen by " + "the cinder scheduler, as a pool, from the " + "cinder.conf entry 'hpe3par_cpg', which can " + "be a list of CPGs.") versionutils.report_deprecated_feature(LOG, msg) - LOG.info(_LI("Using pool %(pool)s instead of %(cpg)s"), + LOG.info("Using pool %(pool)s instead of %(cpg)s", {'pool': pool, 'cpg': cpg}) cpg = pool @@ -1875,7 +1868,7 @@ class HPE3PARCommon(object): except exception.InvalidInput as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) # v2 replication check @@ -1889,16 +1882,16 @@ class HPE3PARCommon(object): LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.Invalid(ex.get_description()) except exception.InvalidInput as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise except exception.CinderException as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise except Exception as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg, @@ -1990,7 +1983,7 @@ class HPE3PARCommon(object): # let the snapshot die in an hour optional['expirationHours'] = 1 - LOG.info(_LI("Creating temp snapshot %(snap)s from volume %(vol)s"), + LOG.info("Creating temp snapshot %(snap)s from volume %(vol)s", {'snap': snap_name, 'vol': vol_name}) self.client.createSnapshot(snap_name, vol_name, optional) @@ -2076,7 +2069,7 @@ class HPE3PARCommon(object): except hpeexceptions.HTTPNotFound: raise exception.NotFound() except Exception as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) def delete_volume(self, volume): @@ -2108,10 +2101,10 @@ class HPE3PARCommon(object): # the volume once it stops the copy. self.client.stopOnlinePhysicalCopy(volume_name) else: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise else: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise except hpeexceptions.HTTPConflict as ex: if ex.get_code() == 34: @@ -2158,7 +2151,7 @@ class HPE3PARCommon(object): if snap.startswith('tss-'): # looks like we found a temp snapshot. LOG.info( - _LI("Found a temporary snapshot %(name)s"), + "Found a temporary snapshot %(name)s", {'name': snap}) try: self.client.deleteVolume(snap) @@ -2177,23 +2170,23 @@ class HPE3PARCommon(object): msg = _("Volume has children and cannot be deleted!") raise exception.VolumeIsBusy(message=msg) else: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.VolumeIsBusy(message=ex.get_description()) except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. - LOG.warning(_LW("Delete volume id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s"), + LOG.warning("Delete volume id not found. Removing from " + "cinder: %(id)s Ex: %(msg)s", {'id': volume['id'], 'msg': ex}) except hpeexceptions.HTTPForbidden as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotAuthorized(ex.get_description()) except hpeexceptions.HTTPConflict as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.VolumeIsBusy(message=ex.get_description()) except Exception as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) def create_volume_from_snapshot(self, volume, snapshot, snap_name=None, @@ -2252,8 +2245,8 @@ class HPE3PARCommon(object): {'id': volume['id'], 'size': growth_size}) self.client.growVolume(volume_name, growth_size_mib) except Exception as ex: - LOG.error(_LE("Error extending volume %(id)s. " - "Ex: %(ex)s"), + LOG.error("Error extending volume %(id)s. " + "Ex: %(ex)s", {'id': volume['id'], 'ex': ex}) # Delete the volume if unable to grow it self.client.deleteVolume(volume_name) @@ -2272,7 +2265,7 @@ class HPE3PARCommon(object): except Exception as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) # v2 replication check @@ -2282,13 +2275,13 @@ class HPE3PARCommon(object): model_update['provider_location'] = self.client.id except hpeexceptions.HTTPForbidden as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotFound() except Exception as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return model_update @@ -2326,10 +2319,10 @@ class HPE3PARCommon(object): self.client.createSnapshot(snap_name, vol_name, optional) except hpeexceptions.HTTPForbidden as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotFound() def migrate_volume(self, volume, host): @@ -2360,8 +2353,8 @@ class HPE3PARCommon(object): try: ret = self.retype(volume, volume_type, None, host) except Exception as e: - LOG.info(_LI('3PAR driver cannot perform migration. ' - 'Retype exception: %s'), e) + LOG.info('3PAR driver cannot perform migration. ' + 'Retype exception: %s', e) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, ' 'status=%(status)s.', dbg) @@ -2389,11 +2382,11 @@ class HPE3PARCommon(object): try: volumeMods = {'newName': original_name} self.client.modifyVolume(current_name, volumeMods) - LOG.info(_LI("Volume name changed from %(tmp)s to %(orig)s"), + LOG.info("Volume name changed from %(tmp)s to %(orig)s", {'tmp': current_name, 'orig': original_name}) except Exception as e: - LOG.error(_LE("Changing the volume name from %(tmp)s to " - "%(orig)s failed because %(reason)s"), + LOG.error("Changing the volume name from %(tmp)s to " + "%(orig)s failed because %(reason)s", {'tmp': current_name, 'orig': original_name, 'reason': e}) name_id = new_volume['_name_id'] or new_volume['id'] @@ -2473,23 +2466,20 @@ class HPE3PARCommon(object): # Rename the new volume to the original name self.client.modifyVolume(temp_vol_name, {'newName': volume_name}) - LOG.info(_LI('Completed: convert_to_base_volume: ' - 'id=%s.'), volume['id']) + LOG.info('Completed: convert_to_base_volume: ' + 'id=%s.', volume['id']) except hpeexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array.") % volume_name LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.Invalid(ex.get_description()) - except exception.InvalidInput as ex: - LOG.error(_LE("Exception: %s"), ex) - raise except exception.CinderException as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise except Exception as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg) @@ -2502,13 +2492,13 @@ class HPE3PARCommon(object): snap_name = self._get_3par_snap_name(snapshot['id']) self.client.deleteVolume(snap_name) except hpeexceptions.HTTPForbidden as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. - LOG.warning(_LW("Delete Snapshot id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s"), + LOG.warning("Delete Snapshot id not found. Removing from " + "cinder: %(id)s Ex: %(msg)s", {'id': snapshot['id'], 'msg': ex}) except hpeexceptions.HTTPConflict as ex: if (ex.get_code() == 32): @@ -2518,7 +2508,7 @@ class HPE3PARCommon(object): for snap in snaps: if snap.startswith('tss-'): LOG.info( - _LI("Found a temporary snapshot %(name)s"), + "Found a temporary snapshot %(name)s", {'name': snap}) try: self.client.deleteVolume(snap) @@ -2537,7 +2527,7 @@ class HPE3PARCommon(object): msg = _("Snapshot has children and cannot be deleted!") raise exception.SnapshotIsBusy(message=msg) else: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) raise exception.SnapshotIsBusy(message=ex.get_description()) def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): @@ -2585,24 +2575,24 @@ class HPE3PARCommon(object): # return out of the terminate connection in order for things # to be updated correctly. if self._active_backend_id: - LOG.warning(_LW("Because the host is currently in a " - "failed-over state, the volume will not " - "be properly detached from the primary " - "array. The detach will be considered a " - "success as far as Cinder is concerned. " - "The volume can now be attached to the " - "secondary target.")) + LOG.warning("Because the host is currently in a " + "failed-over state, the volume will not " + "be properly detached from the primary " + "array. The detach will be considered a " + "success as far as Cinder is concerned. " + "The volume can now be attached to the " + "secondary target.") return else: # use the wwn to see if we can find the hostname hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn) # no 3par host, re-throw if hostname is None: - LOG.error(_LE("Exception: %s"), e) + LOG.error("Exception: %s", e) raise else: # not a 'host does not exist' HTTPNotFound exception, re-throw - LOG.error(_LE("Exception: %s"), e) + LOG.error("Exception: %s", e) raise # try again with name retrieved from 3par @@ -2635,9 +2625,9 @@ class HPE3PARCommon(object): if old_tpvv == new_tpvv and old_tdvv == new_tdvv: if new_cpg != old_cpg: - LOG.info(_LI("Modifying %(volume_name)s userCPG " - "from %(old_cpg)s" - " to %(new_cpg)s"), + LOG.info("Modifying %(volume_name)s userCPG " + "from %(old_cpg)s" + " to %(new_cpg)s", {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) _response, body = self.client.modifyVolume( @@ -2656,18 +2646,18 @@ class HPE3PARCommon(object): else: if new_tpvv: cop = self.CONVERT_TO_THIN - LOG.info(_LI("Converting %(volume_name)s to thin provisioning " - "with userCPG=%(new_cpg)s"), + LOG.info("Converting %(volume_name)s to thin provisioning " + "with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) elif new_tdvv: cop = self.CONVERT_TO_DEDUP - LOG.info(_LI("Converting %(volume_name)s to thin dedup " - "provisioning with userCPG=%(new_cpg)s"), + LOG.info("Converting %(volume_name)s to thin dedup " + "provisioning with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) else: cop = self.CONVERT_TO_FULL - LOG.info(_LI("Converting %(volume_name)s to full provisioning " - "with userCPG=%(new_cpg)s"), + LOG.info("Converting %(volume_name)s to full provisioning " + "with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) try: @@ -2682,8 +2672,8 @@ class HPE3PARCommon(object): # Cannot retype with snapshots because we don't want to # use keepVV and have straggling volumes. Log additional # info and then raise. - LOG.info(_LI("tunevv failed because the volume '%s' " - "has snapshots."), volume_name) + LOG.info("tunevv failed because the volume '%s' " + "has snapshots.", volume_name) raise task_id = body['taskid'] @@ -2940,8 +2930,8 @@ class HPE3PARCommon(object): """Force failover to a secondary replication target.""" # Ensure replication is enabled before we try and failover. if not self._replication_enabled: - msg = _LE("Issuing a fail-over failed because replication is " - "not properly configured.") + msg = _("Issuing a fail-over failed because replication is " + "not properly configured.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -2988,12 +2978,12 @@ class HPE3PARCommon(object): {'volume_id': volume['id'], 'updates': {'replication_status': 'failed-over'}}) except Exception as ex: - msg = (_LE("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target."), - {'error': six.text_type(ex), - 'volume': volume['id']}) + LOG.error("There was a problem with the failover " + "(%(error)s) and it was unsuccessful. " + "Volume '%(volume)s will not be available " + "on the failed over target.", + {'error': ex, + 'volume': volume['id']}) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], @@ -3112,32 +3102,29 @@ class HPE3PARCommon(object): wsapi_version = cl.getWsApiVersion()['build'] if wsapi_version < REMOTE_COPY_API_VERSION: - msg = (_LW("The secondary array must have an API " - "version of %(min_ver)s or higher. Array " - "'%(target)s' is on %(target_ver)s, " - "therefore it will not be added as a valid " - "replication target.") % - {'target': array_name, - 'min_ver': REMOTE_COPY_API_VERSION, - 'target_ver': wsapi_version}) - LOG.warning(msg) + LOG.warning("The secondary array must have an API " + "version of %(min_ver)s or higher. Array " + "'%(target)s' is on %(target_ver)s, " + "therefore it will not be added as a " + "valid replication target.", + {'target': array_name, + 'min_ver': REMOTE_COPY_API_VERSION, + 'target_ver': wsapi_version}) elif not self._is_valid_replication_array(remote_array): - msg = (_LW("'%s' is not a valid replication array. " - "In order to be valid, backend_id, " - "replication_mode, " - "hpe3par_api_url, hpe3par_username, " - "hpe3par_password, cpg_map, san_ip, " - "san_login, and san_password " - "must be specified. If the target is " - "managed, managed_backend_name must be set " - "as well.") % array_name) - LOG.warning(msg) + LOG.warning("'%s' is not a valid replication array. " + "In order to be valid, backend_id, " + "replication_mode, " + "hpe3par_api_url, hpe3par_username, " + "hpe3par_password, cpg_map, san_ip, " + "san_login, and san_password " + "must be specified. If the target is " + "managed, managed_backend_name must be " + "set as well.", array_name) else: replication_targets.append(remote_array) except Exception: - msg = (_LE("Could not log in to 3PAR array (%s) with the " - "provided credentials.") % array_name) - LOG.error(msg) + LOG.error("Could not log in to 3PAR array (%s) with the " + "provided credentials.", array_name) finally: self._destroy_replication_client(cl) @@ -3160,8 +3147,8 @@ class HPE3PARCommon(object): rep_flag = True # Make sure there is at least one replication target. if len(self._replication_targets) < 1: - LOG.error(_LE("There must be at least one valid replication " - "device configured.")) + LOG.error("There must be at least one valid replication " + "device configured.") rep_flag = False return rep_flag @@ -3170,17 +3157,17 @@ class HPE3PARCommon(object): # Make sure replication_mode is set to either sync|periodic. mode = self._get_remote_copy_mode_num(mode) if not mode: - LOG.error(_LE("Extra spec replication:mode must be set and must " - "be either 'sync' or 'periodic'.")) + LOG.error("Extra spec replication:mode must be set and must " + "be either 'sync' or 'periodic'.") rep_flag = False else: # If replication:mode is periodic, replication_sync_period must be # set between 300 - 31622400 seconds. if mode == self.PERIODIC and ( sync_num < 300 or sync_num > 31622400): - LOG.error(_LE("Extra spec replication:sync_period must be " - "greater than 299 and less than 31622401 " - "seconds.")) + LOG.error("Extra spec replication:sync_period must be " + "greater than 299 and less than 31622401 " + "seconds.") rep_flag = False return rep_flag @@ -3590,8 +3577,8 @@ class ModifyVolumeTask(flow_utils.CinderTask): if new_snap_cpg != old_snap_cpg: # Modify the snap_cpg. This will fail with snapshots. - LOG.info(_LI("Modifying %(volume_name)s snap_cpg from " - "%(old_snap_cpg)s to %(new_snap_cpg)s."), + LOG.info("Modifying %(volume_name)s snap_cpg from " + "%(old_snap_cpg)s to %(new_snap_cpg)s.", {'volume_name': volume_name, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg}) @@ -3601,7 +3588,7 @@ class ModifyVolumeTask(flow_utils.CinderTask): 'comment': json.dumps(comment_dict)}) self.needs_revert = True else: - LOG.info(_LI("Modifying %s comments."), volume_name) + LOG.info("Modifying %s comments.", volume_name) common.client.modifyVolume( volume_name, {'comment': json.dumps(comment_dict)}) @@ -3610,8 +3597,8 @@ class ModifyVolumeTask(flow_utils.CinderTask): def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, **kwargs): if self.needs_revert: - LOG.info(_LI("Retype revert %(volume_name)s snap_cpg from " - "%(new_snap_cpg)s back to %(old_snap_cpg)s."), + LOG.info("Retype revert %(volume_name)s snap_cpg from " + "%(new_snap_cpg)s back to %(old_snap_cpg)s.", {'volume_name': volume_name, 'new_snap_cpg': new_snap_cpg, 'old_snap_cpg': old_snap_cpg}) @@ -3620,7 +3607,7 @@ class ModifyVolumeTask(flow_utils.CinderTask): volume_name, {'snapCPG': old_snap_cpg, 'comment': old_comment}) except Exception as ex: - LOG.error(_LE("Exception during snapCPG revert: %s"), ex) + LOG.error("Exception during snapCPG revert: %s", ex) class TuneVolumeTask(flow_utils.CinderTask): @@ -3692,8 +3679,8 @@ class ModifySpecsTask(flow_utils.CinderTask): except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: - LOG.error(_LE("Unexpected error when retype() tried to " - "deleteVolumeSet(%s)"), vvs_name) + LOG.error("Unexpected error when retype() tried to " + "deleteVolumeSet(%s)", vvs_name) raise if new_vvs or new_qos or new_flash_cache: @@ -3714,21 +3701,21 @@ class ModifySpecsTask(flow_utils.CinderTask): except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: - LOG.error(_LE("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)"), vvs_name) + LOG.error("Unexpected error when retype() revert " + "tried to deleteVolumeSet(%s)", vvs_name) except Exception: - LOG.error(_LE("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)"), vvs_name) + LOG.error("Unexpected error when retype() revert " + "tried to deleteVolumeSet(%s)", vvs_name) if old_vvs is not None or old_qos is not None: try: common._add_volume_to_volume_set( volume, volume_name, old_cpg, old_vvs, old_qos) except Exception as ex: - LOG.error(_LE("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Original volume set/QOS settings may not " - "have been fully restored."), + LOG.error("%(exception)s: Exception during revert of " + "retype for volume %(volume_name)s. " + "Original volume set/QOS settings may not " + "have been fully restored.", {'exception': ex, 'volume_name': volume_name}) if new_vvs is not None and old_vvs != new_vvs: @@ -3736,10 +3723,10 @@ class ModifySpecsTask(flow_utils.CinderTask): common.client.removeVolumeFromVolumeSet( new_vvs, volume_name) except Exception as ex: - LOG.error(_LE("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Failed to remove from new volume set " - "%(new_vvs)s."), + LOG.error("%(exception)s: Exception during revert of " + "retype for volume %(volume_name)s. " + "Failed to remove from new volume set " + "%(new_vvs)s.", {'exception': ex, 'volume_name': volume_name, 'new_vvs': new_vvs}) diff --git a/cinder/volume/drivers/hpe/hpe_3par_fc.py b/cinder/volume/drivers/hpe/hpe_3par_fc.py index b9676929c31..9b9be24ea27 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_fc.py +++ b/cinder/volume/drivers/hpe/hpe_3par_fc.py @@ -38,7 +38,7 @@ from oslo_log import log as logging from oslo_utils.excutils import save_and_reraise_exception from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver @@ -137,10 +137,10 @@ class HPE3PARFCDriver(driver.ManageableVD, common.client_login() except Exception: if common._replication_enabled: - LOG.warning(_LW("The primary array is not reachable at this " - "time. Since replication is enabled, " - "listing replication targets and failing over " - "a volume can still be performed.")) + LOG.warning("The primary array is not reachable at this " + "time. Since replication is enabled, " + "listing replication targets and failing over " + "a volume can still be performed.") pass else: raise @@ -378,8 +378,8 @@ class HPE3PARFCDriver(driver.ManageableVD, common.client.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: # No more exports for this host. - LOG.info(_LI("Need to remove FC Zone, building initiator " - "target map")) + LOG.info("Need to remove FC Zone, building initiator " + "target map") target_wwns, init_targ_map, _numPaths = \ self._build_initiator_target_map(common, connector) @@ -455,7 +455,7 @@ class HPE3PARFCDriver(driver.ManageableVD, optional={'domain': domain, 'persona': persona_id}) except hpeexceptions.HTTPConflict as path_conflict: - msg = _LE("Create FC host caught HTTP conflict code: %s") + msg = "Create FC host caught HTTP conflict code: %s" LOG.exception(msg, path_conflict.get_code()) with save_and_reraise_exception(reraise=False) as ctxt: if path_conflict.get_code() is EXISTENT_PATH: @@ -480,8 +480,8 @@ class HPE3PARFCDriver(driver.ManageableVD, try: common.client.modifyHost(hostname, mod_request) except hpeexceptions.HTTPConflict as path_conflict: - msg = _LE("Modify FC Host %(hostname)s caught " - "HTTP conflict code: %(code)s") + msg = ("Modify FC Host %(hostname)s caught " + "HTTP conflict code: %(code)s") LOG.exception(msg, {'hostname': hostname, 'code': path_conflict.get_code()}) diff --git a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py index c1530265d1b..50aa0e084df 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py +++ b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py @@ -39,7 +39,7 @@ from oslo_log import log as logging from oslo_utils.excutils import save_and_reraise_exception from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver @@ -148,10 +148,10 @@ class HPE3PARISCSIDriver(driver.ManageableVD, common.client_login() except Exception: if common._replication_enabled: - LOG.warning(_LW("The primary array is not reachable at this " - "time. Since replication is enabled, " - "listing replication targets and failing over " - "a volume can still be performed.")) + LOG.warning("The primary array is not reachable at this " + "time. Since replication is enabled, " + "listing replication targets and failing over " + "a volume can still be performed.") pass else: raise @@ -218,7 +218,7 @@ class HPE3PARISCSIDriver(driver.ManageableVD, elif len(ip) == 2: temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} else: - LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr) + LOG.warning("Invalid IP address format '%s'", ip_addr) # add the single value iscsi_ip_address option to the IP dictionary. # This way we can see if it's a valid iSCSI IP. If it's not valid, @@ -250,9 +250,9 @@ class HPE3PARISCSIDriver(driver.ManageableVD, # lets see if there are invalid iSCSI IPs left in the temp dict if len(temp_iscsi_ip) > 0: - LOG.warning(_LW("Found invalid iSCSI IP address(s) in " - "configuration option(s) hpe3par_iscsi_ips or " - "iscsi_ip_address '%s.'"), + LOG.warning("Found invalid iSCSI IP address(s) in " + "configuration option(s) hpe3par_iscsi_ips or " + "iscsi_ip_address '%s.'", (", ".join(temp_iscsi_ip))) if not len(iscsi_ip_list) > 0: @@ -408,9 +408,9 @@ class HPE3PARISCSIDriver(driver.ManageableVD, target_iqns.append(port['iSCSIName']) target_luns.append(vlun['lun']) else: - LOG.warning(_LW("iSCSI IP: '%s' was not found in " - "hpe3par_iscsi_ips list defined in " - "cinder.conf."), iscsi_ip) + LOG.warning("iSCSI IP: '%s' was not found in " + "hpe3par_iscsi_ips list defined in " + "cinder.conf.", iscsi_ip) info = {'driver_volume_type': 'iscsi', 'data': {'target_portals': target_portals, @@ -446,8 +446,8 @@ class HPE3PARISCSIDriver(driver.ManageableVD, vlun = existing_vlun if least_used_nsp is None: - LOG.warning(_LW("Least busy iSCSI port not found, " - "using first iSCSI port in list.")) + LOG.warning("Least busy iSCSI port not found, " + "using first iSCSI port in list.") iscsi_ip = list(iscsi_ips)[0] else: iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common) @@ -535,7 +535,7 @@ class HPE3PARISCSIDriver(driver.ManageableVD, optional={'domain': domain, 'persona': persona_id}) except hpeexceptions.HTTPConflict as path_conflict: - msg = _LE("Create iSCSI host caught HTTP conflict code: %s") + msg = "Create iSCSI host caught HTTP conflict code: %s" with save_and_reraise_exception(reraise=False) as ctxt: if path_conflict.get_code() is EXISTENT_PATH: # Handle exception : EXISTENT_PATH - host WWN/iSCSI @@ -617,9 +617,9 @@ class HPE3PARISCSIDriver(driver.ManageableVD, host = common._get_3par_host(hostname) elif (not host['initiatorChapEnabled'] and common._client_conf['hpe3par_iscsi_chap_enabled']): - LOG.warning(_LW("Host exists without CHAP credentials set and " - "has iSCSI attachments but CHAP is enabled. " - "Updating host with new CHAP credentials.")) + LOG.warning("Host exists without CHAP credentials set and " + "has iSCSI attachments but CHAP is enabled. " + "Updating host with new CHAP credentials.") self._set_3par_chaps( common, hostname, @@ -649,12 +649,12 @@ class HPE3PARISCSIDriver(driver.ManageableVD, host_info = common.client.getHost(chap_username) if not host_info['initiatorChapEnabled']: - LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled.")) + LOG.warning("Host has no CHAP key, but CHAP is enabled.") except hpeexceptions.HTTPNotFound: chap_password = volume_utils.generate_password(16) - LOG.warning(_LW("No host or VLUNs exist. Generating new " - "CHAP key.")) + LOG.warning("No host or VLUNs exist. Generating new " + "CHAP key.") else: # Get a list of all iSCSI VLUNs and see if there is already a CHAP # key assigned to one of them. Use that CHAP key if present, @@ -682,12 +682,12 @@ class HPE3PARISCSIDriver(driver.ManageableVD, "but CHAP is enabled. Skipping.", vlun['remoteName']) else: - LOG.warning(_LW("Non-iSCSI VLUN detected.")) + LOG.warning("Non-iSCSI VLUN detected.") if not chap_exists: chap_password = volume_utils.generate_password(16) - LOG.warning(_LW("No VLUN contained CHAP credentials. " - "Generating new CHAP key.")) + LOG.warning("No VLUN contained CHAP credentials. " + "Generating new CHAP key.") # Add CHAP credentials to the volume metadata vol_name = common._get_3par_vol_name(volume['id']) @@ -720,7 +720,7 @@ class HPE3PARISCSIDriver(driver.ManageableVD, vol_name = common._get_3par_vol_name(volume['id']) common.client.getVolume(vol_name) except hpeexceptions.HTTPNotFound: - LOG.error(_LE("Volume %s doesn't exist on array."), vol_name) + LOG.error("Volume %s doesn't exist on array.", vol_name) else: metadata = common.client.getAllVolumeMetaData(vol_name) diff --git a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py b/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py index 9a9729f9918..3ce8514ec0e 100644 --- a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py +++ b/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py @@ -43,7 +43,7 @@ from oslo_utils import units from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder import utils as cinder_utils @@ -331,9 +331,9 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): """Set up LeftHand client.""" if not hpelefthandclient: # Checks if client was successfully imported - ex_msg = (_("HPELeftHand client is not installed. Please" - " install using 'pip install " - "python-lefthandclient'.")) + ex_msg = _("HPELeftHand client is not installed. Please" + " install using 'pip install " + "python-lefthandclient'.") LOG.error(ex_msg) raise exception.VolumeDriverException(ex_msg) @@ -356,12 +356,12 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): try: self.api_version = client.getApiVersion() - LOG.info(_LI("HPELeftHand API version %s"), self.api_version) + LOG.info("HPELeftHand API version %s", self.api_version) if self.api_version < MIN_API_VERSION: - LOG.warning(_LW("HPELeftHand API is version %(current)s. " - "A minimum version of %(min)s is needed for " - "manage/unmanage support."), + LOG.warning("HPELeftHand API is version %(current)s. " + "A minimum version of %(min)s is needed for " + "manage/unmanage support.", {'current': self.api_version, 'min': MIN_API_VERSION}) finally: @@ -446,7 +446,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): volume_info = client.getVolumeByName(volume['name']) client.deleteVolume(volume_info['id']) except hpeexceptions.HTTPNotFound: - LOG.error(_LE("Volume did not exist. It will not be deleted")) + LOG.error("Volume did not exist. It will not be deleted") except Exception as ex: raise exception.VolumeBackendAPIException(ex) finally: @@ -493,10 +493,10 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): self.delete_volume(volume) volume_update['status'] = 'deleted' except Exception as ex: - LOG.error(_LE("There was an error deleting volume %(id)s: " - "%(error)s."), + LOG.error("There was an error deleting volume %(id)s: " + "%(error)s.", {'id': volume.id, - 'error': six.text_type(ex)}) + 'error': ex}) volume_update['status'] = 'error' volume_model_updates.append(volume_update) @@ -532,8 +532,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): volume_info = client.getVolumeByName(volume_name) except Exception as ex: error = six.text_type(ex) - LOG.error(_LE("Could not find volume with name %(name)s. " - "Error: %(error)s"), + LOG.error("Could not find volume with name %(name)s. " + "Error: %(error)s", {'name': volume_name, 'error': error}) raise exception.VolumeBackendAPIException(data=error) @@ -558,7 +558,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): client.createSnapshotSet(source_volume_id, snap_set, optional) except Exception as ex: error = six.text_type(ex) - LOG.error(_LE("Could not create snapshot set. Error: '%s'"), + LOG.error("Could not create snapshot set. Error: '%s'", error) raise exception.VolumeBackendAPIException( data=error) @@ -591,12 +591,12 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): in_use_msg = ('cannot be deleted because it is a clone ' 'point') if in_use_msg in ex.get_description(): - LOG.error(_LE("The snapshot cannot be deleted because " - "it is a clone point.")) + LOG.error("The snapshot cannot be deleted because " + "it is a clone point.") snapshot_update['status'] = fields.SnapshotStatus.ERROR except Exception as ex: - LOG.error(_LE("There was an error deleting snapshot %(id)s: " - "%(error)s."), + LOG.error("There was an error deleting snapshot %(id)s: " + "%(error)s.", {'id': snapshot['id'], 'error': six.text_type(ex)}) snapshot_update['status'] = fields.SnapshotStatus.ERROR @@ -632,7 +632,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): snap_info = client.getSnapshotByName(snapshot['name']) client.deleteSnapshot(snap_info['id']) except hpeexceptions.HTTPNotFound: - LOG.error(_LE("Snapshot did not exist. It will not be deleted")) + LOG.error("Snapshot did not exist. It will not be deleted") except hpeexceptions.HTTPServerError as ex: in_use_msg = 'cannot be deleted because it is a clone point' if in_use_msg in ex.get_description(): @@ -782,13 +782,13 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # return out of the terminate connection in order for things # to be updated correctly. if self._active_backend_id: - LOG.warning(_LW("Because the host is currently in a " - "failed-over state, the volume will not " - "be properly detached from the primary " - "array. The detach will be considered a " - "success as far as Cinder is concerned. " - "The volume can now be attached to the " - "secondary target.")) + LOG.warning("Because the host is currently in a " + "failed-over state, the volume will not " + "be properly detached from the primary " + "array. The detach will be considered a " + "success as far as Cinder is concerned. " + "The volume can now be attached to the " + "secondary target.") return else: raise exception.VolumeBackendAPIException(ex) @@ -866,8 +866,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): if key in valid_keys: prefix = key.split(":") if prefix[0] == "hplh": - LOG.warning(_LW("The 'hplh' prefix is deprecated. Use " - "'hpelh' instead.")) + LOG.warning("The 'hplh' prefix is deprecated. Use " + "'hpelh' instead.") extra_specs_of_interest[key] = value return extra_specs_of_interest @@ -884,8 +884,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): client_value = value_map[value] client_options[client_key] = client_value except KeyError: - LOG.error(_LE("'%(value)s' is an invalid value " - "for extra spec '%(key)s'"), + LOG.error("'%(value)s' is an invalid value " + "for extra spec '%(key)s'", {'value': value, 'key': key}) return client_options @@ -906,11 +906,11 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): server_info = client.getServerByName(connector['host']) chap_secret = server_info['chapTargetSecret'] if not chap_enabled and chap_secret: - LOG.warning(_LW('CHAP secret exists for host %s but CHAP is ' - 'disabled'), connector['host']) + LOG.warning('CHAP secret exists for host %s but CHAP is ' + 'disabled', connector['host']) if chap_enabled and chap_secret is None: - LOG.warning(_LW('CHAP is enabled, but server secret not ' - 'configured on server %s'), connector['host']) + LOG.warning('CHAP is enabled, but server secret not ' + 'configured on server %s', connector['host']) return server_info except hpeexceptions.HTTPNotFound: # server does not exist, so create one @@ -984,7 +984,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): except hpeexceptions.HTTPNotFound: raise exception.VolumeNotFound(volume_id=volume['id']) except Exception as ex: - LOG.warning(_LW("%s"), ex) + LOG.warning("%s", ex) finally: self._logout(client) @@ -1031,20 +1031,20 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): virtual_ips = cluster_info['virtualIPAddresses'] if driver != self.__class__.__name__: - LOG.info(_LI("Cannot provide backend assisted migration for " - "volume: %s because volume is from a different " - "backend."), volume['name']) + LOG.info("Cannot provide backend assisted migration for " + "volume: %s because volume is from a different " + "backend.", volume['name']) return false_ret if vip != virtual_ips[0]['ipV4Address']: - LOG.info(_LI("Cannot provide backend assisted migration for " - "volume: %s because cluster exists in different " - "management group."), volume['name']) + LOG.info("Cannot provide backend assisted migration for " + "volume: %s because cluster exists in different " + "management group.", volume['name']) return false_ret except hpeexceptions.HTTPNotFound: - LOG.info(_LI("Cannot provide backend assisted migration for " - "volume: %s because cluster exists in different " - "management group."), volume['name']) + LOG.info("Cannot provide backend assisted migration for " + "volume: %s because cluster exists in different " + "management group.", volume['name']) return false_ret finally: self._logout(client) @@ -1056,9 +1056,9 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # can't migrate if server is attached if volume_info['iscsiSessions'] is not None: - LOG.info(_LI("Cannot provide backend assisted migration " - "for volume: %s because the volume has been " - "exported."), volume['name']) + LOG.info("Cannot provide backend assisted migration " + "for volume: %s because the volume has been " + "exported.", volume['name']) return false_ret # can't migrate if volume has snapshots @@ -1067,20 +1067,20 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): 'fields=snapshots,snapshots[resource[members[name]]]') LOG.debug('Snapshot info: %s', snap_info) if snap_info['snapshots']['resource'] is not None: - LOG.info(_LI("Cannot provide backend assisted migration " - "for volume: %s because the volume has " - "snapshots."), volume['name']) + LOG.info("Cannot provide backend assisted migration " + "for volume: %s because the volume has " + "snapshots.", volume['name']) return false_ret options = {'clusterName': cluster} client.modifyVolume(volume_info['id'], options) except hpeexceptions.HTTPNotFound: - LOG.info(_LI("Cannot provide backend assisted migration for " - "volume: %s because volume does not exist in this " - "management group."), volume['name']) + LOG.info("Cannot provide backend assisted migration for " + "volume: %s because volume does not exist in this " + "management group.", volume['name']) return false_ret except hpeexceptions.HTTPServerError as ex: - LOG.error(_LE("Exception: %s"), ex) + LOG.error("Exception: %s", ex) return false_ret finally: self._logout(client) @@ -1109,11 +1109,11 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): volume_info = client.getVolumeByName(current_name) volumeMods = {'name': original_name} client.modifyVolume(volume_info['id'], volumeMods) - LOG.info(_LI("Volume name changed from %(tmp)s to %(orig)s."), + LOG.info("Volume name changed from %(tmp)s to %(orig)s.", {'tmp': current_name, 'orig': original_name}) except Exception as e: - LOG.error(_LE("Changing the volume name from %(tmp)s to " - "%(orig)s failed because %(reason)s."), + LOG.error("Changing the volume name from %(tmp)s to " + "%(orig)s failed because %(reason)s.", {'tmp': current_name, 'orig': original_name, 'reason': e}) name_id = new_volume['_name_id'] or new_volume['id'] @@ -1172,7 +1172,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): finally: self._logout(client) - LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."), + LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_vol_name}) display_name = None @@ -1180,8 +1180,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): display_name = volume['display_name'] if volume_type: - LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " - "being retyped."), + LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.", {'disp': display_name, 'new': new_vol_name}) try: @@ -1190,14 +1189,14 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): volume_type, volume_type['extra_specs'], volume['host']) - LOG.info(_LI("Virtual volume %(disp)s successfully retyped to " - "%(new_type)s."), + LOG.info("Virtual volume %(disp)s successfully retyped to " + "%(new_type)s.", {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: with excutils.save_and_reraise_exception(): - LOG.warning(_LW("Failed to manage virtual volume %(disp)s " - "due to error during retype."), + LOG.warning("Failed to manage virtual volume %(disp)s " + "due to error during retype.", {'disp': display_name}) # Try to undo the rename and clear the new comment. client = self._login() @@ -1210,8 +1209,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): updates = {'display_name': display_name} - LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is " - "now being managed."), + LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and @@ -1293,7 +1291,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): "Snapshot '%s'.") % snapshot_info['id']) LOG.error(err) - LOG.info(_LI("Snapshot '%(ref)s' renamed to '%(new)s'."), + LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_snap_name}) display_name = None @@ -1302,8 +1300,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): updates = {'display_name': display_name} - LOG.info(_LI("Snapshot %(disp)s '%(new)s' is " - "now being managed."), + LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_snap_name}) return updates @@ -1393,8 +1390,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): finally: self._logout(client) - LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. " - "Volume renamed to '%(new)s'."), + LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. " + "Volume renamed to '%(new)s'.", {'disp': volume['display_name'], 'vol': volume['name'], 'new': new_vol_name}) @@ -1424,8 +1421,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): new_snap_name = 'ums-' + six.text_type(snapshot['id']) options = {'name': new_snap_name} client.modifySnapshot(snapshot_info['id'], options) - LOG.info(_LI("Snapshot %(disp)s '%(vol)s' is no longer managed. " - "Snapshot renamed to '%(new)s'."), + LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. " + "Snapshot renamed to '%(new)s'.", {'disp': snapshot['display_name'], 'vol': snapshot['name'], 'new': new_snap_name}) @@ -1493,9 +1490,9 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): "_Pri") client.stopRemoteSnapshotSchedule(name) except Exception: - LOG.warning(_LW("The primary array is currently " - "offline, remote copy has been " - "automatically paused.")) + LOG.warning("The primary array is currently " + "offline, remote copy has been " + "automatically paused.") finally: self._logout(client) @@ -1525,13 +1522,12 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): 'provider_location': prov_location['provider_location']}}) except Exception as ex: - msg = (_LE("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target."), - {'error': six.text_type(ex), - 'volume': volume['id']}) - LOG.error(msg) + LOG.error("There was a problem with the failover " + "(%(error)s) and it was unsuccessful. " + "Volume '%(volume)s will not be available " + "on the failed over target.", + {'error': six.text_type(ex), + 'volume': volume['id']}) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) @@ -1592,32 +1588,29 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): remote_array['cluster_vip'] = virtual_ips[0]['ipV4Address'] if api_version < MIN_API_VERSION: - msg = (_LW("The secondary array must have an API " - "version of %(min_ver)s or higher. " - "Array '%(target)s' is on %(target_ver)s, " - "therefore it will not be added as a valid " - "replication target.") % - {'min_ver': MIN_API_VERSION, - 'target': array_name, - 'target_ver': api_version}) - LOG.warning(msg) + LOG.warning("The secondary array must have an API " + "version of %(min_ver)s or higher. " + "Array '%(target)s' is on %(target_ver)s, " + "therefore it will not be added as a " + "valid replication target.", + {'min_ver': MIN_API_VERSION, + 'target': array_name, + 'target_ver': api_version}) elif not self._is_valid_replication_array(remote_array): - msg = (_LW("'%s' is not a valid replication array. " - "In order to be valid, backend_id, " - "hpelefthand_api_url, " - "hpelefthand_username, " - "hpelefthand_password, and " - "hpelefthand_clustername, " - "must be specified. If the target is " - "managed, managed_backend_name must be set " - "as well.") % array_name) - LOG.warning(msg) + LOG.warning("'%s' is not a valid replication array. " + "In order to be valid, backend_id, " + "hpelefthand_api_url, " + "hpelefthand_username, " + "hpelefthand_password, and " + "hpelefthand_clustername, " + "must be specified. If the target is " + "managed, managed_backend_name must be " + "set as well.", array_name) else: replication_targets.append(remote_array) except Exception: - msg = (_LE("Could not log in to LeftHand array (%s) with " - "the provided credentials.") % array_name) - LOG.error(msg) + LOG.error("Could not log in to LeftHand array (%s) with " + "the provided credentials.", array_name) finally: self._destroy_replication_client(cl) @@ -1667,13 +1660,12 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # The secondary array was not able to execute the fail-back # properly. The replication status is now in an unknown # state, so we will treat it as an error. - msg = (_LE("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target."), - {'error': six.text_type(ex), - 'volume': volume['id']}) - LOG.error(msg) + LOG.error("There was a problem with the failover " + "(%(error)s) and it was unsuccessful. " + "Volume '%(volume)s will not be available " + "on the failed over target.", + {'error': ex, + 'volume': volume['id']}) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) @@ -1715,9 +1707,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): is_ready = False break except Exception as ex: - LOG.error(_LW("There was a problem when trying to determine if " - "the volume can be failed-back: %s") % - six.text_type(ex)) + LOG.error("There was a problem when trying to determine if " + "the volume can be failed-back: %s", ex) is_ready = False finally: self._destroy_replication_client(cl) @@ -1745,8 +1736,8 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): rep_flag = True # Make sure there is at least one replication target. if len(self._replication_targets) < 1: - LOG.error(_LE("There must be at least one valid replication " - "device configured.")) + LOG.error("There must be at least one valid replication " + "device configured.") rep_flag = False return rep_flag @@ -1820,10 +1811,10 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # If there is no extra_spec value for replication sync period, we # will default it to the required minimum and log a warning. replication_sync_period = self.MIN_REP_SYNC_PERIOD - LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs."), + LOG.warning("There was no extra_spec value for %(spec_name)s, " + "so the default value of %(def_val)s will be " + "used. To overwrite this, set this value in the " + "volume type extra_specs.", {'spec_name': self.EXTRA_SPEC_REP_SYNC_PERIOD, 'def_val': self.MIN_REP_SYNC_PERIOD}) @@ -1841,10 +1832,10 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # If there is no extra_spec value for retention count, we # will default it and log a warning. retention_count = self.DEFAULT_RETENTION_COUNT - LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs."), + LOG.warning("There was no extra_spec value for %(spec_name)s, " + "so the default value of %(def_val)s will be " + "used. To overwrite this, set this value in the " + "volume type extra_specs.", {'spec_name': self.EXTRA_SPEC_REP_RETENTION_COUNT, 'def_val': self.DEFAULT_RETENTION_COUNT}) @@ -1863,10 +1854,10 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver): # will default it and log a warning. remote_retention_count = self.DEFAULT_REMOTE_RETENTION_COUNT spec_name = self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT - LOG.warning(_LW("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs."), + LOG.warning("There was no extra_spec value for %(spec_name)s, " + "so the default value of %(def_val)s will be " + "used. To overwrite this, set this value in the " + "volume type extra_specs.", {'spec_name': spec_name, 'def_val': self.DEFAULT_REMOTE_RETENTION_COUNT}) diff --git a/cinder/volume/drivers/huawei/huawei_driver.py b/cinder/volume/drivers/huawei/huawei_driver.py index 9853c932310..f1f43fa6f91 100644 --- a/cinder/volume/drivers/huawei/huawei_driver.py +++ b/cinder/volume/drivers/huawei/huawei_driver.py @@ -27,7 +27,7 @@ from oslo_utils import units from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver @@ -153,7 +153,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): self.metro_flag = True else: self.metro_flag = False - LOG.warning(_LW("Remote device not configured in cinder.conf")) + LOG.warning("Remote device not configured in cinder.conf") # init replication manager if replica_client_conf: self.replica_client = rest_client.RestClient(self.configuration, @@ -295,12 +295,12 @@ class HuaweiBaseDriver(driver.VolumeDriver): if words and len(words) == 2 and words[0] in ('', ''): opts[key] = words[1].lower() elif key == 'replication_type': - LOG.error(_LE("Extra specs must be specified as " - "replication_type=' sync' or " - "' async'.")) + LOG.error("Extra specs must be specified as " + "replication_type=' sync' or " + "' async'.") else: - LOG.error(_LE("Extra specs must be specified as " - "capabilities:%s=' True'."), key) + LOG.error("Extra specs must be specified as " + "capabilities:%s=' True'.", key) if ((scope in opts_capability) and (key in opts_value) @@ -329,7 +329,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } - LOG.info(_LI('volume: %(volume)s, lun params: %(params)s.'), + LOG.info('volume: %(volume)s, lun params: %(params)s.', {'volume': volume.id, 'params': params}) return params @@ -394,7 +394,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): metro_info = metro.create_hypermetro(lun_id, lun_params) model_update['metadata'].update(metro_info) except exception.VolumeBackendAPIException as err: - LOG.error(_LE('Create hypermetro error: %s.'), err) + LOG.error('Create hypermetro error: %s.', err) self._delete_lun_with_check(lun_id) raise @@ -405,7 +405,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): replica_model) model_update.update(replica_info) except Exception as err: - LOG.exception(_LE('Create replication volume error.')) + LOG.exception('Create replication volume error.') self._delete_lun_with_check(lun_id) raise @@ -467,7 +467,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): try: metro.delete_hypermetro(volume) except exception.VolumeBackendAPIException as err: - LOG.error(_LE('Delete hypermetro error: %s.'), err) + LOG.error('Delete hypermetro error: %s.', err) # We have checked the LUN WWN above, # no need to check again here. self._delete_volume(volume) @@ -480,7 +480,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): self.replica.delete_replica(volume) except exception.VolumeBackendAPIException as err: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Delete replication error.")) + LOG.exception("Delete replication error.") self._delete_volume(volume) self._delete_volume(volume) @@ -525,7 +525,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): try: result = self.client.get_lun_migration_task() except Exception: - LOG.error(_LE("Get LUN migration error.")) + LOG.error("Get LUN migration error.") return False if 'data' in result: @@ -583,7 +583,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): try: self.client.rename_lun(lun_id, original_name) except exception.VolumeBackendAPIException: - LOG.error(_LE('Unable to rename lun %s on array.'), current_name) + LOG.error('Unable to rename lun %s on array.', current_name) return {'_name_id': new_volume.name_id} LOG.debug("Renamed lun from %(current_name)s to %(original_name)s " @@ -693,7 +693,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): lun_id = lun_info['ID'] if qos: - LOG.info(_LI('QoS: %s.'), qos) + LOG.info('QoS: %s.', qos) SmartQos = smartx.SmartQos(self.client) SmartQos.add(qos, lun_id) if opts: @@ -740,9 +740,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): tgt_lun_id = model_update['provider_location'] luncopy_name = huawei_utils.encode_name(volume.id) - LOG.info(_LI( + LOG.info( 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' - 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'), + 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.', {'src_lun_id': snapshot_id, 'tgt_lun_id': tgt_lun_id, 'copy_name': luncopy_name}) @@ -792,9 +792,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): # Delete snapshot. self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException: - LOG.warning(_LW( + LOG.warning( 'Failure deleting the snapshot %(snapshot_id)s ' - 'of volume %(volume_id)s.'), + 'of volume %(volume_id)s.', {'snapshot_id': snapshot.id, 'volume_id': src_vref.id},) @@ -855,9 +855,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): new_size = int(new_size) * units.Gi / 512 if new_size == old_size: - LOG.info(_LI("New size is equal to the real size from backend" - " storage, no need to extend." - " realsize: %(oldsize)s, newsize: %(newsize)s."), + LOG.info("New size is equal to the real size from backend" + " storage, no need to extend." + " realsize: %(oldsize)s, newsize: %(newsize)s.", {'oldsize': old_size, 'newsize': new_size}) return @@ -871,9 +871,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume.id) - LOG.info(_LI( + LOG.info( 'Extend volume: %(volumename)s, ' - 'oldsize: %(oldsize)s, newsize: %(newsize)s.'), + 'oldsize: %(oldsize)s, newsize: %(newsize)s.', {'volumename': volume_name, 'oldsize': old_size, 'newsize': new_size}) @@ -905,9 +905,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): snapshotname = huawei_utils.encode_name(snapshot.id) volume_name = huawei_utils.encode_name(snapshot.volume_id) - LOG.info(_LI( + LOG.info( 'stop_snapshot: snapshot name: %(snapshot)s, ' - 'volume name: %(volume)s.'), + 'volume name: %(volume)s.', {'snapshot': snapshotname, 'volume': volume_name},) @@ -919,7 +919,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): self.client.stop_snapshot(snapshot_id) self.client.delete_snapshot(snapshot_id) else: - LOG.warning(_LW("Can't find snapshot on the array.")) + LOG.warning("Can't find snapshot on the array.") def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" @@ -944,8 +944,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): model_update.update({'replication_status': 'disabled', 'replication_driver_data': None}) except exception.VolumeBackendAPIException: - LOG.exception(_LE('Retype volume error. ' - 'Delete replication failed.')) + LOG.exception('Retype volume error. ' + 'Delete replication failed.') return False try: @@ -954,14 +954,14 @@ class HuaweiBaseDriver(driver.VolumeDriver): "change %(change_opts)s.", {"lun_id": lun_id, "change_opts": change_opts}) if not self._migrate_volume(volume, host, new_type): - LOG.warning(_LW("Storage-assisted migration failed during " - "retype.")) + LOG.warning("Storage-assisted migration failed during " + "retype.") return False else: # Modify lun to change policy self.modify_lun(lun_id, change_opts) except exception.VolumeBackendAPIException: - LOG.exception(_LE('Retype volume error.')) + LOG.exception('Retype volume error.') return False if replica_enabled_change and replica_enabled_change[1] == 'true': @@ -974,8 +974,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): lun_info, replica_type_change[1]) model_update.update(replica_info) except exception.VolumeBackendAPIException: - LOG.exception(_LE('Retype volume error. ' - 'Create replication failed.')) + LOG.exception('Retype volume error. ' + 'Create replication failed.') return False return (True, model_update) @@ -991,9 +991,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): self.client.remove_lun_from_partition(lun_id, old_id) if new_id: self.client.add_lun_to_partition(lun_id, new_id) - LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) success."), + LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) success.", {"lun_id": lun_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) @@ -1008,9 +1008,9 @@ class HuaweiBaseDriver(driver.VolumeDriver): self.client.remove_lun_from_cache(lun_id, old_id) if new_id: self.client.add_lun_to_cache(lun_id, new_id) - LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) successfully."), + LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " + "(name: %(old_name)s, id: %(old_id)s) to " + "(name: %(new_name)s, id: %(new_id)s) successfully.", {'lun_id': lun_id, 'old_id': old_id, "old_name": old_name, 'new_id': new_id, "new_name": new_name}) @@ -1018,8 +1018,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): if change_opts.get('policy'): old_policy, new_policy = change_opts['policy'] self.client.change_lun_smarttier(lun_id, new_policy) - LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from " - "%(old_policy)s to %(new_policy)s success."), + LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " + "%(old_policy)s to %(new_policy)s success.", {'lun_id': lun_id, 'old_policy': old_policy, 'new_policy': new_policy}) @@ -1034,8 +1034,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): if new_qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(new_qos, lun_id) - LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from " - "%(old_qos_value)s to %(new_qos)s success."), + LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " + "%(old_qos_value)s to %(new_qos)s success.", {'lun_id': lun_id, 'old_qos_value': old_qos_value, 'new_qos': new_qos}) @@ -1349,7 +1349,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): # Can't check whether the LUN has SplitMirror with it, # just pass the check and log it. split_mirrors = [] - LOG.warning(_LW('No license for SplitMirror.')) + LOG.warning('No license for SplitMirror.') else: msg = _("Failed to get SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) @@ -1376,7 +1376,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): # Can't check whether the LUN has migration task with it, # just pass the check and log it. migration_tasks = [] - LOG.warning(_LW('No license for migration.')) + LOG.warning('No license for migration.') else: msg = _("Failed to get migration task.") raise exception.VolumeBackendAPIException(data=msg) @@ -1482,7 +1482,7 @@ class HuaweiBaseDriver(driver.VolumeDriver): model_update.update(replica_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Manage exist volume failed.")) + LOG.exception("Manage exist volume failed.") return model_update @@ -1657,8 +1657,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Create cgsnapshot.""" - LOG.info(_LI('Create cgsnapshot for consistency group' - ': %(group_id)s'), + LOG.info('Create cgsnapshot for consistency group' + ': %(group_id)s', {'group_id': cgsnapshot.consistencygroup_id}) model_update = {} @@ -1689,16 +1689,16 @@ class HuaweiBaseDriver(driver.VolumeDriver): added_snapshots_info.append(info) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Create cgsnapshots failed. " - "Cgsnapshot id: %s."), cgsnapshot.id) + LOG.error("Create cgsnapshots failed. " + "Cgsnapshot id: %s.", cgsnapshot.id) snapshot_ids = [added_snapshot['ID'] for added_snapshot in added_snapshots_info] try: self.client.activate_snapshot(snapshot_ids) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Active cgsnapshots failed. " - "Cgsnapshot id: %s."), cgsnapshot.id) + LOG.error("Active cgsnapshots failed. " + "Cgsnapshot id: %s.", cgsnapshot.id) model_update['status'] = 'available' @@ -1706,8 +1706,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete consistency group snapshot.""" - LOG.info(_LI('Delete cgsnapshot %(snap_id)s for consistency group: ' - '%(group_id)s'), + LOG.info('Delete cgsnapshot %(snap_id)s for consistency group: ' + '%(group_id)s', {'snap_id': cgsnapshot.id, 'group_id': cgsnapshot.consistencygroup_id}) @@ -1722,8 +1722,8 @@ class HuaweiBaseDriver(driver.VolumeDriver): 'status': 'deleted'}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Delete cg snapshots failed. " - "Cgsnapshot id: %s"), cgsnapshot.id) + LOG.error("Delete cg snapshots failed. " + "Cgsnapshot id: %s", cgsnapshot.id) return model_update, snapshots_model_update @@ -1913,18 +1913,18 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): """Map a volume to a host and return target iSCSI information.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] - LOG.info(_LI( + LOG.info( 'initiator name: %(initiator_name)s, ' - 'LUN ID: %(lun_id)s.'), + 'LUN ID: %(lun_id)s.', {'initiator_name': initiator_name, 'lun_id': lun_id}) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) - LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' - 'target_ip: %(target_ip)s, ' - 'portgroup_id: %(portgroup_id)s.'), + LOG.info('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' + 'target_ip: %(target_ip)s, ' + 'portgroup_id: %(portgroup_id)s.', {'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id},) @@ -1948,7 +1948,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): hostlun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) - LOG.info(_LI("initialize_connection, host lun id is: %s."), + LOG.info("initialize_connection, host lun id is: %s.", hostlun_id) chapinfo = self.client.find_chap_info(self.client.iscsi_info, @@ -1977,7 +1977,7 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): properties['auth_username'] = chap_username properties['auth_password'] = chap_password - LOG.info(_LI("initialize_connection success. Return data: %s."), + LOG.info("initialize_connection success. Return data: %s.", properties) return {'driver_volume_type': 'iscsi', 'data': properties} @@ -1989,9 +1989,9 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): host_name = connector['host'] lungroup_id = None - LOG.info(_LI( + LOG.info( 'terminate_connection: initiator name: %(ini)s, ' - 'LUN ID: %(lunid)s.'), + 'LUN ID: %(lunid)s.', {'ini': initiator_name, 'lunid': lun_id},) @@ -2025,9 +2025,9 @@ class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): lun_id, lun_type) else: - LOG.warning(_LW("LUN is not in lungroup. " - "LUN ID: %(lun_id)s. " - "Lungroup id: %(lungroup_id)s."), + LOG.warning("LUN is not in lungroup. " + "LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", {"lun_id": lun_id, "lungroup_id": lungroup_id}) @@ -2115,9 +2115,9 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): def initialize_connection(self, volume, connector): lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] - LOG.info(_LI( + LOG.info( 'initialize_connection, initiator: %(wwpns)s,' - ' LUN ID: %(lun_id)s.'), + ' LUN ID: %(lun_id)s.', {'wwpns': wwns, 'lun_id': lun_id},) @@ -2191,7 +2191,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) - LOG.info(_LI("initialize_connection, metadata is: %s."), metadata) + LOG.info("initialize_connection, metadata is: %s.", metadata) if 'hypermetro_id' in metadata: loc_tgt_wwn = fc_info['data']['target_wwn'] local_ini_tgt_map = fc_info['data']['initiator_target_map'] @@ -2227,7 +2227,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): fc_info['data']['target_lun'] = same_host_id hyperm.rmt_client.logout() - LOG.info(_LI("Return FC info is: %s."), fc_info) + LOG.info("Return FC info is: %s.", fc_info) return fc_info def _get_same_hostid(self, loc_fc_info, rmt_fc_info): @@ -2243,7 +2243,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): same_host_id = i break - LOG.info(_LI("The same hostid is: %s."), same_host_id) + LOG.info("The same hostid is: %s.", same_host_id) if not same_host_id: msg = _("Can't find the same host id from arrays.") LOG.error(msg) @@ -2262,8 +2262,8 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): left_lunnum = -1 lungroup_id = None view_id = None - LOG.info(_LI('terminate_connection: wwpns: %(wwns)s, ' - 'LUN ID: %(lun_id)s.'), + LOG.info('terminate_connection: wwpns: %(wwns)s, ' + 'LUN ID: %(lun_id)s.', {'wwns': wwns, 'lun_id': lun_id}) host_name = huawei_utils.encode_host_name(host_name) @@ -2282,14 +2282,14 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): lun_id, lun_type) else: - LOG.warning(_LW("LUN is not in lungroup. " - "LUN ID: %(lun_id)s. " - "Lungroup id: %(lungroup_id)s."), + LOG.warning("LUN is not in lungroup. " + "LUN ID: %(lun_id)s. " + "Lungroup id: %(lungroup_id)s.", {"lun_id": lun_id, "lungroup_id": lungroup_id}) else: - LOG.warning(_LW("Can't find lun on the array.")) + LOG.warning("Can't find lun on the array.") if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if int(left_lunnum) > 0: @@ -2332,7 +2332,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) - LOG.info(_LI("Detach Volume, metadata is: %s."), metadata) + LOG.info("Detach Volume, metadata is: %s.", metadata) if 'hypermetro_id' in metadata: hyperm = hypermetro.HuaweiHyperMetro(self.client, @@ -2340,7 +2340,7 @@ class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): self.configuration) hyperm.disconnect_volume_fc(volume, connector) - LOG.info(_LI("terminate_connection, return data is: %s."), + LOG.info("terminate_connection, return data is: %s.", fc_info) return fc_info diff --git a/cinder/volume/drivers/huawei/hypermetro.py b/cinder/volume/drivers/huawei/hypermetro.py index 535dd938bf9..9608ef53d94 100644 --- a/cinder/volume/drivers/huawei/hypermetro.py +++ b/cinder/volume/drivers/huawei/hypermetro.py @@ -17,7 +17,7 @@ from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils @@ -58,8 +58,8 @@ class HuaweiHyperMetro(object): local_lun_id, remote_lun_id) - LOG.info(_LI("Hypermetro id: %(metro_id)s. " - "Remote lun id: %(remote_lun_id)s."), + LOG.info("Hypermetro id: %(metro_id)s. " + "Remote lun id: %(remote_lun_id)s.", {'metro_id': hypermetro['ID'], 'remote_lun_id': remote_lun_id}) @@ -105,9 +105,9 @@ class HuaweiHyperMetro(object): wwns = connector['wwpns'] volume_name = huawei_utils.encode_name(volume.id) - LOG.info(_LI( + LOG.info( 'initialize_connection_fc, initiator: %(wwpns)s,' - ' volume name: %(volume)s.'), + ' volume name: %(volume)s.', {'wwpns': wwns, 'volume': volume_name}) @@ -177,7 +177,7 @@ class HuaweiHyperMetro(object): 'map_info': map_info}, } - LOG.info(_LI('Remote return FC info is: %s.'), fc_info) + LOG.info('Remote return FC info is: %s.', fc_info) return fc_info @@ -192,9 +192,9 @@ class HuaweiHyperMetro(object): lungroup_id = None view_id = None - LOG.info(_LI('terminate_connection_fc: volume name: %(volume)s, ' - 'wwpns: %(wwns)s, ' - 'lun_id: %(lunid)s.'), + LOG.info('terminate_connection_fc: volume name: %(volume)s, ' + 'wwpns: %(wwns)s, ' + 'lun_id: %(lunid)s.', {'volume': volume_name, 'wwns': wwns, 'lunid': lun_id},) @@ -217,9 +217,9 @@ class HuaweiHyperMetro(object): self.rmt_client.remove_lun_from_lungroup( lungroup_id, lun_id) else: - LOG.warning(_LW("Lun is not in lungroup. " - "Lun id: %(lun_id)s, " - "lungroup id: %(lungroup_id)s"), + LOG.warning("Lun is not in lungroup. " + "Lun id: %(lun_id)s, " + "lungroup id: %(lungroup_id)s", {"lun_id": lun_id, "lungroup_id": lungroup_id}) @@ -269,7 +269,7 @@ class HuaweiHyperMetro(object): pass def create_consistencygroup(self, group): - LOG.info(_LI("Create Consistency Group: %(group)s."), + LOG.info("Create Consistency Group: %(group)s.", {'group': group.id}) group_name = huawei_utils.encode_name(group.id) domain_name = self.configuration.metro_domain_name @@ -282,7 +282,7 @@ class HuaweiHyperMetro(object): self.client.create_metrogroup(group_name, group.id, domain_id) def delete_consistencygroup(self, context, group, volumes): - LOG.info(_LI("Delete Consistency Group: %(group)s."), + LOG.info("Delete Consistency Group: %(group)s.", {'group': group.id}) model_update = {} volumes_model_update = [] @@ -301,8 +301,8 @@ class HuaweiHyperMetro(object): def update_consistencygroup(self, context, group, add_volumes, remove_volumes): - LOG.info(_LI("Update Consistency Group: %(group)s. " - "This adds or removes volumes from a CG."), + LOG.info("Update Consistency Group: %(group)s. " + "This adds or removes volumes from a CG.", {'group': group.id}) model_update = {} model_update['status'] = group.status diff --git a/cinder/volume/drivers/huawei/replication.py b/cinder/volume/drivers/huawei/replication.py index 5c7eec2d106..ec14e5d42ef 100644 --- a/cinder/volume/drivers/huawei/replication.py +++ b/cinder/volume/drivers/huawei/replication.py @@ -20,7 +20,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LW, _LE +from cinder.i18n import _ from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils @@ -195,7 +195,7 @@ class ReplicaCommonDriver(object): try: self.op.split(replica_id) except Exception as err: - LOG.warning(_LW('Split replication exception: %s.'), err) + LOG.warning('Split replication exception: %s.', err) try: self.wait_expect_state(replica_id, running_status) @@ -344,7 +344,7 @@ class ReplicaPairManager(object): info = self.rmt_client.get_array_info() return info.get('wwn') except Exception as err: - LOG.warning(_LW('Get remote array wwn failed. Error: %s.'), err) + LOG.warning('Get remote array wwn failed. Error: %s.', err) return None def get_remote_device_by_wwn(self, wwn): @@ -352,7 +352,7 @@ class ReplicaPairManager(object): try: devices = self.local_client.get_remote_devices() except Exception as err: - LOG.warning(_LW('Get remote devices failed. Error: %s.'), err) + LOG.warning('Get remote devices failed. Error: %s.', err) for device in devices: if device.get('WWN') == wwn: @@ -381,7 +381,7 @@ class ReplicaPairManager(object): def update_replica_capability(self, stats): is_rmt_dev_available = self.check_remote_available() if not is_rmt_dev_available: - LOG.warning(_LW('Remote device is unavailable.')) + LOG.warning('Remote device is unavailable.') return stats for pool in stats['pools']: @@ -490,7 +490,7 @@ class ReplicaPairManager(object): pair_id = pair_info['ID'] except Exception as err: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Create pair failed. Error: %s.'), err) + LOG.error('Create pair failed. Error: %s.', err) self._delete_rmt_lun(rmt_lun_id) # step4, start sync manually. If replication type is sync, @@ -500,7 +500,7 @@ class ReplicaPairManager(object): self.local_driver.sync(pair_id, wait_complete) except Exception as err: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Start synchronization failed. Error: %s.'), err) + LOG.error('Start synchronization failed. Error: %s.', err) self._delete_pair(pair_id) self._delete_rmt_lun(rmt_lun_id) @@ -559,14 +559,14 @@ class ReplicaPairManager(object): drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: - LOG.warning(_LW("No pair id in volume %s."), v.id) + LOG.warning("No pair id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: - LOG.warning(_LW("No remote lun id in volume %s."), v.id) + LOG.warning("No remote lun id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue @@ -609,14 +609,14 @@ class ReplicaPairManager(object): drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: - LOG.warning(_LW("No pair id in volume %s."), v.id) + LOG.warning("No pair id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: - LOG.warning(_LW("No remote lun id in volume %s."), v.id) + LOG.warning("No remote lun id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue diff --git a/cinder/volume/drivers/huawei/rest_client.py b/cinder/volume/drivers/huawei/rest_client.py index d3d7a87ec23..0a6b631abf1 100644 --- a/cinder/volume/drivers/huawei/rest_client.py +++ b/cinder/volume/drivers/huawei/rest_client.py @@ -25,7 +25,7 @@ from six.moves import http_cookiejar from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.huawei import constants @@ -84,18 +84,18 @@ class RestClient(object): res = urllib.request.urlopen(req).read().decode("utf-8") # nosec if not log_filter_flag: - LOG.info(_LI('\n\n\n\nRequest URL: %(url)s\n\n' - 'Call Method: %(method)s\n\n' - 'Request Data: %(data)s\n\n' - 'Response Data:%(res)s\n\n'), + LOG.info('\n\n\n\nRequest URL: %(url)s\n\n' + 'Call Method: %(method)s\n\n' + 'Request Data: %(data)s\n\n' + 'Response Data:%(res)s\n\n', {'url': url, 'method': method, 'data': data, 'res': res}) except Exception as err: - LOG.error(_LE('Bad response from server: %(url)s.' - ' Error: %(err)s'), {'url': url, 'err': err}) + LOG.error('Bad response from server: %(url)s.' + ' Error: %(err)s', {'url': url, 'err': err}) json_msg = ('{"error":{"code": %s,"description": "Connect to ' 'server error."}}') % constants.ERROR_CONNECT_TO_SERVER res_json = json.loads(json_msg) @@ -104,7 +104,7 @@ class RestClient(object): try: res_json = json.loads(res) except Exception as err: - LOG.error(_LE('JSON transfer error: %s.'), err) + LOG.error('JSON transfer error: %s.', err) raise return res_json @@ -123,8 +123,8 @@ class RestClient(object): log_filter_flag=True) if (result['error']['code'] != 0) or ("data" not in result): - LOG.error(_LE("Login error. URL: %(url)s\n" - "Reason: %(reason)s."), + LOG.error("Login error. URL: %(url)s\n" + "Reason: %(reason)s.", {"url": item_url, "reason": result}) continue @@ -153,7 +153,7 @@ class RestClient(object): try: self.login() except Exception as err: - LOG.warning(_LW('Login failed. Error: %s.'), err) + LOG.warning('Login failed. Error: %s.', err) @utils.synchronized('huawei_cinder_call') def call(self, url, data=None, method=None, log_filter_flag=False): @@ -168,7 +168,7 @@ class RestClient(object): error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): - LOG.error(_LE("Can't open the recent url, relogin.")) + LOG.error("Can't open the recent url, relogin.") device_id = self.login() if device_id is not None: @@ -374,8 +374,8 @@ class RestClient(object): """Create a luncopy.""" url = "/luncopy" if copyspeed not in constants.LUN_COPY_SPEED_TYPES: - LOG.warning(_LW('The copy speed %(copyspeed)s is not valid, ' - 'using default value %(default)s instead.'), + LOG.warning('The copy speed %(copyspeed)s is not valid, ' + 'using default value %(default)s instead.', {'copyspeed': copyspeed, 'default': constants.LUN_COPY_SPEED_MEDIUM}) copyspeed = constants.LUN_COPY_SPEED_MEDIUM @@ -450,9 +450,9 @@ class RestClient(object): view_id = self.find_mapping_view(mapping_view_name) map_info = {} - LOG.info(_LI( + LOG.info( 'do_mapping, lun_group: %(lun_group)s, ' - 'view_id: %(view_id)s, lun_id: %(lun_id)s.'), + 'view_id: %(view_id)s, lun_id: %(lun_id)s.', {'lun_group': lungroup_id, 'view_id': view_id, 'lun_id': lun_id}) @@ -494,9 +494,9 @@ class RestClient(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE( + LOG.error( 'Error occurred when adding hostgroup and lungroup to ' - 'view. Remove lun from lungroup now.')) + 'view. Remove lun from lungroup now.') self.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) return map_info @@ -548,10 +548,10 @@ class RestClient(object): """Check if host exists on the array, or create it.""" hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id: - LOG.info(_LI( + LOG.info( 'create_hostgroup_with_check. ' 'hostgroup name: %(name)s, ' - 'hostgroup id: %(id)s'), + 'hostgroup id: %(id)s', {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id @@ -559,9 +559,9 @@ class RestClient(object): try: hostgroup_id = self._create_hostgroup(hostgroup_name) except Exception: - LOG.info(_LI( + LOG.info( 'Failed to create hostgroup: %(name)s. ' - 'Please check if it exists on the array.'), + 'Please check if it exists on the array.', {'name': hostgroup_name}) hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id is None: @@ -572,11 +572,11 @@ class RestClient(object): LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info(_LI( + LOG.info( 'create_hostgroup_with_check. ' 'Create hostgroup success. ' 'hostgroup name: %(name)s, ' - 'hostgroup id: %(id)s'), + 'hostgroup id: %(id)s', {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id @@ -648,7 +648,7 @@ class RestClient(object): host_lun_id = hostassoinfo['HostLUNID'] break except Exception as err: - LOG.error(_LE("JSON transfer data error. %s."), err) + LOG.error("JSON transfer data error. %s.", err) raise return host_lun_id @@ -663,10 +663,10 @@ class RestClient(object): def add_host_with_check(self, host_name, host_name_before_hash): host_id = self.get_host_id_by_name(host_name) if host_id: - LOG.info(_LI( + LOG.info( 'add_host_with_check. ' 'host name: %(name)s, ' - 'host id: %(id)s'), + 'host id: %(id)s', {'name': host_name, 'id': host_id}) return host_id @@ -674,9 +674,9 @@ class RestClient(object): try: host_id = self._add_host(host_name, host_name_before_hash) except Exception: - LOG.info(_LI( + LOG.info( 'Failed to create host: %(name)s. ' - 'Check if it exists on the array.'), + 'Check if it exists on the array.', {'name': host_name}) host_id = self.get_host_id_by_name(host_name) if not host_id: @@ -687,11 +687,11 @@ class RestClient(object): LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info(_LI( + LOG.info( 'add_host_with_check. ' 'create host success. ' 'host name: %(name)s, ' - 'host id: %(id)s'), + 'host id: %(id)s', {'name': host_name, 'id': host_id}) return host_id @@ -831,13 +831,13 @@ class RestClient(object): multipath_type = self._find_alua_info(self.iscsi_info, initiator_name) if chapinfo: - LOG.info(_LI('Use CHAP when adding initiator to host.')) + LOG.info('Use CHAP when adding initiator to host.') self._use_chap(chapinfo, initiator_name, host_id) else: self._add_initiator_to_host(initiator_name, host_id) if multipath_type: - LOG.info(_LI('Use ALUA when adding initiator to host.')) + LOG.info('Use ALUA when adding initiator to host.') self._use_alua(initiator_name, multipath_type) def find_chap_info(self, iscsi_info, initiator_name): @@ -1170,7 +1170,7 @@ class RestClient(object): LOG.debug('Request ip info is: %s.', ip_info) split_list = ip_info.split(".") newstr = split_list[1] + split_list[2] - LOG.info(_LI('New str info is: %s.'), newstr) + LOG.info('New str info is: %s.', newstr) if ip_info: if newstr[0] == 'A': @@ -1185,7 +1185,7 @@ class RestClient(object): iqn_suffix = iqn_suffix[i:] break iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsi_ip - LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn) + LOG.info('_get_tgt_iqn: iSCSI target iqn is: %s.', iqn) return iqn def get_fc_target_wwpns(self, wwn): @@ -1271,7 +1271,7 @@ class RestClient(object): constants.STATUS_HEALTH and item['RUNNINGSTATUS'] == constants.STATUS_RUNNING): target_ip = item['IPV4ADDR'] - LOG.info(_LI('_get_tgt_ip_from_portgroup: Get ip: %s.'), + LOG.info('_get_tgt_ip_from_portgroup: Get ip: %s.', target_ip) target_ips.append(target_ip) @@ -1318,7 +1318,7 @@ class RestClient(object): # Deal with the remote tgt ip. if 'remote_target_ip' in connector: target_ips.append(connector['remote_target_ip']) - LOG.info(_LI('Get the default ip: %s.'), target_ips) + LOG.info('Get the default ip: %s.', target_ips) for ip in target_ips: target_iqn = self._get_tgt_iqn_from_rest(ip) @@ -1358,7 +1358,7 @@ class RestClient(object): info_list = [] target_ips = [] if result['error']['code'] != 0: - LOG.warning(_LW("Can't find target port info from rest.")) + LOG.warning("Can't find target port info from rest.") return target_ips elif not result['data']: @@ -1372,7 +1372,7 @@ class RestClient(object): info_list.append(item['ID']) if not info_list: - LOG.warning(_LW("Can't find target port info from rest.")) + LOG.warning("Can't find target port info from rest.") return target_ips for info in info_list: @@ -1391,7 +1391,7 @@ class RestClient(object): target_iqn = None if result['error']['code'] != 0: - LOG.warning(_LW("Can't find target iqn from rest.")) + LOG.warning("Can't find target iqn from rest.") return target_iqn ip_pattern = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') if 'data' in result: @@ -1403,7 +1403,7 @@ class RestClient(object): break if not target_iqn: - LOG.warning(_LW("Can't find target iqn from rest.")) + LOG.warning("Can't find target iqn from rest.") return target_iqn split_list = target_iqn.split(",") @@ -1963,7 +1963,7 @@ class RestClient(object): if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): - LOG.error(_LE("Can not open the recent url, login again.")) + LOG.error("Can not open the recent url, login again.") self.login() result = self.call(url, None, "GET") diff --git a/cinder/volume/drivers/huawei/smartx.py b/cinder/volume/drivers/huawei/smartx.py index 2569e57073e..b7a968b8202 100644 --- a/cinder/volume/drivers/huawei/smartx.py +++ b/cinder/volume/drivers/huawei/smartx.py @@ -18,7 +18,7 @@ from oslo_utils import excutils from cinder import context from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder.volume.drivers.huawei import constants from cinder.volume import qos_specs @@ -45,7 +45,7 @@ class SmartQos(object): io_type_flag = None ctxt = context.get_admin_context() kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - LOG.info(_LI('The QoS sepcs is: %s.'), kvs) + LOG.info('The QoS sepcs is: %s.', kvs) for k, v in kvs.items(): if k not in constants.HUAWEI_VALID_KEYS: continue diff --git a/cinder/volume/drivers/ibm/flashsystem_common.py b/cinder/volume/drivers/ibm/flashsystem_common.py index 06434e47a2a..572953f137d 100644 --- a/cinder/volume/drivers/ibm/flashsystem_common.py +++ b/cinder/volume/drivers/ibm/flashsystem_common.py @@ -36,7 +36,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san @@ -248,7 +248,7 @@ class FlashSystemDriver(san.SanDriver, self.configuration.volume_dd_blocksize) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to copy %(src)s to %(dest)s.'), + LOG.error('Failed to copy %(src)s to %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) finally: if not dest_map: @@ -337,8 +337,8 @@ class FlashSystemDriver(san.SanDriver, # Try to delete volume only if found on the storage vdisk_defined = self._is_vdisk_defined(name) if not vdisk_defined: - LOG.warning(_LW('warning: Tried to delete vdisk %s but ' - 'it does not exist.'), name) + LOG.warning('warning: Tried to delete vdisk %s but ' + 'it does not exist.', name) return ssh_cmd = ['svctask', 'rmvdisk', '-force', name] @@ -374,8 +374,7 @@ class FlashSystemDriver(san.SanDriver, try: out, err = self._ssh(ssh_cmd) except processutils.ProcessExecutionError: - LOG.warning(_LW('Failed to run command: ' - '%s.'), ssh_cmd) + LOG.warning('Failed to run command: %s.', ssh_cmd) # Does not raise exception when command encounters error. # Only return and the upper logic decides what to do. return None @@ -695,8 +694,8 @@ class FlashSystemDriver(san.SanDriver, return (map_flag, int(result_lun)) def _log_cli_output_error(self, function, cmd, out, err): - LOG.error(_LE('%(fun)s: Failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n'), + LOG.error('%(fun)s: Failed with unexpected CLI output.\n ' + 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n', {'fun': function, 'cmd': cmd, 'out': six.text_type(out), @@ -765,7 +764,7 @@ class FlashSystemDriver(san.SanDriver, # try to map one volume to multiple hosts out, err = self._ssh(ssh_cmd) - LOG.info(_LI('Volume %s is mapping to multiple hosts.'), + LOG.info('Volume %s is mapping to multiple hosts.', vdisk_name) self._assert_ssh_return( 'successfully created' in out, @@ -808,7 +807,7 @@ class FlashSystemDriver(san.SanDriver, LOG.debug('enter: _remove_device') if not properties or not device: - LOG.warning(_LW('_remove_device: invalid properties or device.')) + LOG.warning('_remove_device: invalid properties or device.') return use_multipath = self.configuration.use_multipath_for_image_xfer @@ -829,8 +828,8 @@ class FlashSystemDriver(san.SanDriver, # Try to rename volume only if found on the storage vdisk_defined = self._is_vdisk_defined(vdisk_name) if not vdisk_defined: - LOG.warning(_LW('warning: Tried to rename vdisk %s but ' - 'it does not exist.'), vdisk_name) + LOG.warning('warning: Tried to rename vdisk %s but ' + 'it does not exist.', vdisk_name) return ssh_cmd = [ 'svctask', 'chvdisk', '-name', new_name, vdisk_name] @@ -841,7 +840,7 @@ class FlashSystemDriver(san.SanDriver, '_rename_vdisk %(name)s' % {'name': vdisk_name}, ssh_cmd, out, err) - LOG.info(_LI('Renamed %(vdisk)s to %(newname)s .'), + LOG.info('Renamed %(vdisk)s to %(newname)s .', {'vdisk': vdisk_name, 'newname': new_name}) def _scan_device(self, properties): @@ -881,23 +880,23 @@ class FlashSystemDriver(san.SanDriver, # name was given, but only one mapping exists, we can use that. mapping_data = self._get_vdiskhost_mappings(vdisk_name) if not mapping_data: - LOG.warning(_LW('_unmap_vdisk_from_host: No mapping of volume ' - '%(vol_name)s to any host found.'), + LOG.warning('_unmap_vdisk_from_host: No mapping of volume ' + '%(vol_name)s to any host found.', {'vol_name': vdisk_name}) return host_name if host_name is None: if len(mapping_data) > 1: - LOG.warning(_LW('_unmap_vdisk_from_host: Multiple mappings of ' - 'volume %(vdisk_name)s found, no host ' - 'specified.'), + LOG.warning('_unmap_vdisk_from_host: Multiple mappings of ' + 'volume %(vdisk_name)s found, no host ' + 'specified.', {'vdisk_name': vdisk_name}) return else: host_name = list(mapping_data.keys())[0] else: if host_name not in mapping_data: - LOG.error(_LE('_unmap_vdisk_from_host: No mapping of volume ' - '%(vol_name)s to host %(host_name)s found.'), + LOG.error('_unmap_vdisk_from_host: No mapping of volume ' + '%(vol_name)s to host %(host_name)s found.', {'vol_name': vdisk_name, 'host_name': host_name}) return host_name diff --git a/cinder/volume/drivers/ibm/flashsystem_fc.py b/cinder/volume/drivers/ibm/flashsystem_fc.py index b5ff82e3eb5..30f89629a80 100644 --- a/cinder/volume/drivers/ibm/flashsystem_fc.py +++ b/cinder/volume/drivers/ibm/flashsystem_fc.py @@ -32,7 +32,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.ibm import flashsystem_common as fscommon @@ -168,7 +168,7 @@ class FlashSystemFCDriver(fscommon.FlashSystemDriver): map(str.lower, map(str, connector['wwpns']))): return host else: - LOG.warning(_LW('Host %(host)s was not found on backend storage.'), + LOG.warning('Host %(host)s was not found on backend storage.', {'host': hname}) return None @@ -201,7 +201,7 @@ class FlashSystemFCDriver(fscommon.FlashSystemDriver): if 'unconfigured' != s: wwpns.add(i) node['WWPN'] = list(wwpns) - LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'), + LOG.info('WWPN on node %(node)s: %(wwpn)s.', {'node': node['id'], 'wwpn': node['WWPN']}) def _get_vdisk_map_properties( @@ -303,9 +303,9 @@ class FlashSystemFCDriver(fscommon.FlashSystemDriver): except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) - LOG.error(_LE('initialize_connection: Failed to collect ' - 'return properties for volume %(vol)s and ' - 'connector %(conn)s.'), + LOG.error('initialize_connection: Failed to collect ' + 'return properties for volume %(vol)s and ' + 'connector %(conn)s.', {'vol': volume, 'conn': connector}) LOG.debug( @@ -394,7 +394,6 @@ class FlashSystemFCDriver(fscommon.FlashSystemDriver): def validate_connector(self, connector): """Check connector.""" if 'FC' == self._protocol and 'wwpns' not in connector: - msg = _LE('The connector does not contain the ' + LOG.error('The connector does not contain the ' 'required information: wwpns is missing') - LOG.error(msg) raise exception.InvalidConnectorException(missing='wwpns') diff --git a/cinder/volume/drivers/ibm/flashsystem_iscsi.py b/cinder/volume/drivers/ibm/flashsystem_iscsi.py index 0e5ee3c4a54..edacb988891 100644 --- a/cinder/volume/drivers/ibm/flashsystem_iscsi.py +++ b/cinder/volume/drivers/ibm/flashsystem_iscsi.py @@ -32,7 +32,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.ibm import flashsystem_common as fscommon @@ -173,7 +173,7 @@ class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): connector) return host else: - LOG.warning(_LW('Host %(host)s was not found on backend storage.'), + LOG.warning('Host %(host)s was not found on backend storage.', {'host': hname}) return None @@ -208,8 +208,8 @@ class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] - LOG.warning(_LW('_get_vdisk_map_properties: Did not find a ' - 'preferred node for vdisk %s.'), vdisk_name) + LOG.warning('_get_vdisk_map_properties: Did not find a ' + 'preferred node for vdisk %s.', vdisk_name) properties = { 'target_discovered': False, 'target_lun': lun_id, @@ -269,8 +269,8 @@ class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) - LOG.error(_LE('Failed to collect return properties for ' - 'volume %(vol)s and connector %(conn)s.'), + LOG.error('Failed to collect return properties for ' + 'volume %(vol)s and connector %(conn)s.', {'vol': volume, 'conn': connector}) LOG.debug( @@ -408,8 +408,7 @@ class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): if 'iSCSI' == self._protocol and 'initiator' in connector: valid = True if not valid: - msg = _LE('The connector does not contain the ' + LOG.error('The connector does not contain the ' 'required information: initiator is missing') - LOG.error(msg) raise exception.InvalidConnectorException(missing=( 'initiator')) diff --git a/cinder/volume/drivers/ibm/gpfs.py b/cinder/volume/drivers/ibm/gpfs.py index 106f99366cd..964d4c01d33 100644 --- a/cinder/volume/drivers/ibm/gpfs.py +++ b/cinder/volume/drivers/ibm/gpfs.py @@ -29,7 +29,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields @@ -143,7 +143,7 @@ class GPFSDriver(driver.CloneableImageVD, (out, err) = self.gpfs_execute('mmgetstate', '-Y') return out except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmgetstate command, error: %s.'), + LOG.error('Failed to issue mmgetstate command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -154,7 +154,7 @@ class GPFSDriver(driver.CloneableImageVD, state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] if gpfs_state != 'active': - LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out) + LOG.error('GPFS is not active. Detailed output: %s.', out) raise exception.VolumeBackendAPIException( data=_('GPFS is not running, state: %s.') % gpfs_state) @@ -166,8 +166,8 @@ class GPFSDriver(driver.CloneableImageVD, filesystem = lines[1].split()[0] return filesystem except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue df command for path %(path)s, ' - 'error: %(error)s.'), + LOG.error('Failed to issue df command for path %(path)s, ' + 'error: %(error)s.', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -181,7 +181,7 @@ class GPFSDriver(driver.CloneableImageVD, cluster_id = lines[1].split(':')[value_token] return cluster_id except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'), + LOG.error('Failed to issue mmlsconfig command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -191,8 +191,8 @@ class GPFSDriver(driver.CloneableImageVD, try: (out, err) = self.gpfs_execute('mmlsattr', '-L', path) except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, ' - 'error: %(error)s'), + LOG.error('Failed to issue mmlsattr command on path %(path)s, ' + 'error: %(error)s', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -231,8 +231,8 @@ class GPFSDriver(driver.CloneableImageVD, LOG.debug('Updated storage pool with mmchattr to %s.', new_pool) return True except processutils.ProcessExecutionError as exc: - LOG.info(_LI('Could not update storage pool with mmchattr to ' - '%(pool)s, error: %(error)s'), + LOG.info('Could not update storage pool with mmchattr to ' + '%(pool)s, error: %(error)s', {'pool': new_pool, 'error': exc.stderr}) return False @@ -246,8 +246,8 @@ class GPFSDriver(driver.CloneableImageVD, try: (out, err) = self.gpfs_execute('mmlsfs', filesystem, '-V', '-Y') except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, ' - 'error: %(error)s.'), + LOG.error('Failed to issue mmlsfs command for path %(path)s, ' + 'error: %(error)s.', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -267,7 +267,7 @@ class GPFSDriver(driver.CloneableImageVD, 'minreleaseLeveldaemon', '-Y') except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'), + LOG.error('Failed to issue mmlsconfig command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -284,9 +284,9 @@ class GPFSDriver(driver.CloneableImageVD, try: self.gpfs_execute('mmlsattr', directory) except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsattr command ' - 'for path %(path)s, ' - 'error: %(error)s.'), + LOG.error('Failed to issue mmlsattr command ' + 'for path %(path)s, ' + 'error: %(error)s.', {'path': directory, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -361,10 +361,10 @@ class GPFSDriver(driver.CloneableImageVD, if _gpfs_cluster_release_level >= GPFS_ENC_MIN_RELEASE: self._encryption_state = self._get_gpfs_encryption_status() else: - LOG.info(_LI('Downlevel GPFS Cluster Detected. GPFS ' - 'encryption-at-rest feature not enabled in cluster ' - 'daemon level %(cur)s - must be at least at ' - 'level %(min)s.'), + LOG.info('Downlevel GPFS Cluster Detected. GPFS ' + 'encryption-at-rest feature not enabled in cluster ' + 'daemon level %(cur)s - must be at least at ' + 'level %(min)s.', {'cur': _gpfs_cluster_release_level, 'min': GPFS_ENC_MIN_RELEASE}) @@ -755,7 +755,7 @@ class GPFSDriver(driver.CloneableImageVD, encryption_status = lines[1].split(':')[value_token] return encryption_status except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Failed to issue mmlsfs command, error: %s.'), + LOG.error('Failed to issue mmlsfs command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -920,8 +920,8 @@ class GPFSDriver(driver.CloneableImageVD, try: image_utils.resize_image(vol_path, new_size, run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.error(_LE("Failed to resize volume " - "%(volume_id)s, error: %(error)s."), + LOG.error("Failed to resize volume " + "%(volume_id)s, error: %(error)s.", {'volume_id': volume['id'], 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) @@ -1001,9 +1001,9 @@ class GPFSDriver(driver.CloneableImageVD, self.gpfs_execute('mv', local_path, new_path) return (True, None) except processutils.ProcessExecutionError as exc: - LOG.error(_LE('Driver-based migration of volume %(vol)s failed. ' - 'Move from %(src)s to %(dst)s failed with error: ' - '%(error)s.'), + LOG.error('Driver-based migration of volume %(vol)s failed. ' + 'Move from %(src)s to %(dst)s failed with error: ' + '%(error)s.', {'vol': volume['name'], 'src': local_path, 'dst': new_path, @@ -1202,8 +1202,8 @@ class GPFSDriver(driver.CloneableImageVD, except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR) - LOG.error(_LE("Failed to create the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s."), + LOG.error("Failed to create the snapshot %(snap)s of " + "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: @@ -1224,8 +1224,8 @@ class GPFSDriver(driver.CloneableImageVD, except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) - LOG.error(_LE("Failed to delete the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s."), + LOG.error("Failed to delete the snapshot %(snap)s of " + "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: diff --git a/cinder/volume/drivers/ibm/ibm_storage/certificate.py b/cinder/volume/drivers/ibm/ibm_storage/certificate.py index 60f606e3fd1..cc9accc704c 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/certificate.py +++ b/cinder/volume/drivers/ibm/ibm_storage/certificate.py @@ -18,8 +18,6 @@ import tempfile from oslo_log import log as logging -from cinder.i18n import _LE - LOG = logging.getLogger(__name__) @@ -49,7 +47,7 @@ class CertificateCollector(object): os.write(self.tmp_fd, cert_file.read()) cert_file.close() except Exception: - LOG.exception(_LE("Failed to process certificate")) + LOG.exception("Failed to process certificate") os.close(self.tmp_fd) fsize = os.path.getsize(self.tmp_path) if fsize > 0: diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py index 9a0d7c07bb5..acbe02d6f7b 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py +++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py @@ -23,7 +23,7 @@ from requests.packages.urllib3 import connection from requests.packages.urllib3 import connectionpool from requests.packages.urllib3 import poolmanager -from cinder.i18n import _LW, _ +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -108,9 +108,9 @@ class DS8KHTTPSConnection(connection.VerifiedHTTPSConnection): RECENT_DATE = datetime.date(2014, 1, 1) is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: - msg = _LW('System time is way off (before %s). This will probably ' - 'lead to SSL verification errors.') - LOG.warning(msg, RECENT_DATE) + LOG.warning('System time is way off (before %s). This will ' + 'probably lead to SSL verification errors.', + RECENT_DATE) # Wrap socket using verification with the root certs in # trusted_root_certs diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py index 8e7c2068684..4a2a865fd35 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py +++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py @@ -24,7 +24,7 @@ import string from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder.objects import fields import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import cryptish @@ -137,12 +137,10 @@ class DS8KCommonHelper(object): {'host': self._get_value('san_ip')}) raise restclient.APIException(data=msg) self.backend['rest_version'] = self._get_version()['bundle_version'] - msg = _LI("Connection to DS8K storage system %(host)s has been " - "established successfully, the version of REST is %(rest)s.") - LOG.info(msg, { - 'host': self._get_value('san_ip'), - 'rest': self.backend['rest_version'] - }) + LOG.info("Connection to DS8K storage system %(host)s has been " + "established successfully, the version of REST is %(rest)s.", + {'host': self._get_value('san_ip'), + 'rest': self.backend['rest_version']}) def _get_storage_information(self): storage_info = self.get_systems() @@ -200,8 +198,8 @@ class DS8KCommonHelper(object): self._storage_pools = self.get_pools() for pid, p in self._storage_pools.items(): if p['stgtype'] != ptype: - msg = _LE('The stgtype of pool %(pool)s is %(ptype)s.') - LOG.error(msg, {'pool': pid, 'ptype': p['stgtype']}) + LOG.error('The stgtype of pool %(pool)s is %(ptype)s.', + {'pool': pid, 'ptype': p['stgtype']}) err = _('Param [san_clustername] is invalid.') raise exception.InvalidParameterValue(err=err) @@ -259,8 +257,8 @@ class DS8KCommonHelper(object): def _find_lss(self, node, excluded_lss): fileds = ['id', 'type', 'addrgrp', 'group', 'configvols'] existing_lss = self.get_all_lss(fileds) - msg = _LI("existing LSS IDs are: %s.") - LOG.info(msg, ','.join([lss['id'] for lss in existing_lss])) + LOG.info("existing LSS IDs are: %s.", + ','.join([lss['id'] for lss in existing_lss])) if excluded_lss: existing_lss = [lss for lss in existing_lss @@ -283,9 +281,9 @@ class DS8KCommonHelper(object): lss = sorted(existing_lss, key=lambda k: int(k['configvols']))[0] if int(lss['configvols']) < LSS_VOL_SLOTS: lss_id = lss['id'] - msg = _LI('_find_from_existing_lss: choose %(lss)s. ' - 'now it has %(num)s volumes.') - LOG.info(msg, {'lss': lss_id, 'num': lss['configvols']}) + LOG.info('_find_from_existing_lss: choose %(lss)s. ' + 'now it has %(num)s volumes.', + {'lss': lss_id, 'num': lss['configvols']}) return lss_id def _find_from_unexisting_lss(self, node, existing_lss): @@ -302,8 +300,7 @@ class DS8KCommonHelper(object): if addrgrp not in addrgrps and lss not in fulllss: lss_id = ("%02x" % lss).upper() break - msg = _LI('_find_from_unexisting_lss: choose %s.') - LOG.info(msg, lss_id) + LOG.info('_find_from_unexisting_lss: choose %s.', lss_id) return lss_id def create_lun(self, lun): @@ -327,11 +324,10 @@ class DS8KCommonHelper(object): for lun in luns: if lun.ds_id is None: # create_lun must have failed and not returned the id - LOG.error(_LE("delete_lun: volume id is None.")) + LOG.error("delete_lun: volume id is None.") continue if not self.lun_exists(lun.ds_id): - msg = _LE("delete_lun: volume %s not found.") - LOG.error(msg, lun.ds_id) + LOG.error("delete_lun: volume %s not found.", lun.ds_id) continue lun_ids.append(lun.ds_id) @@ -343,8 +339,7 @@ class DS8KCommonHelper(object): else: lun_ids_str = ','.join(lun_ids) lun_ids = [] - msg = _LI("Deleting volumes: %s.") - LOG.info(msg, lun_ids_str) + LOG.error("Deleting volumes: %s.", lun_ids_str) self._delete_lun(lun_ids_str) def get_lss_in_pprc_paths(self): @@ -354,10 +349,9 @@ class DS8KCommonHelper(object): paths = self.get_pprc_paths() except restclient.APIException: paths = [] - LOG.exception(_LE("Can not get the LSS")) + LOG.exception("Can not get the LSS") lss_ids = set(p['source_lss_id'] for p in paths) - msg = _LI('LSS in PPRC paths are: %s.') - LOG.info(msg, ','.join(lss_ids)) + LOG.info('LSS in PPRC paths are: %s.', ','.join(lss_ids)) return lss_ids def _find_host(self, vol_id): @@ -367,8 +361,7 @@ class DS8KCommonHelper(object): vol_ids = [vol['volume_id'] for vol in host['mappings_briefs']] if vol_id in vol_ids: host_ids.append(host['id']) - msg = _LI('_find_host: host IDs are: %s.') - LOG.info(msg, host_ids) + LOG.info('_find_host: host IDs are: %s.', host_ids) return host_ids def wait_flashcopy_finished(self, src_luns, tgt_luns): @@ -398,8 +391,7 @@ class DS8KCommonHelper(object): return finished def wait_pprc_copy_finished(self, vol_ids, state, delete=True): - msg = _LI("Wait for PPRC pair to enter into state %s") - LOG.info(msg, state) + LOG.info("Wait for PPRC pair to enter into state %s", state) vol_ids = sorted(vol_ids) min_vol_id = min(vol_ids) max_vol_id = max(vol_ids) @@ -483,7 +475,7 @@ class DS8KCommonHelper(object): else: msg = _('More than one host defined for requested ports.') raise restclient.APIException(message=msg) - LOG.info(_LI('Volume will be attached to host %s.'), host_id) + LOG.info('Volume will be attached to host %s.', host_id) # Create missing host ports if unknown_ports or unconfigured_ports: @@ -528,7 +520,7 @@ class DS8KCommonHelper(object): }) if not defined_hosts: - LOG.info(_LI('Could not find host.')) + LOG.info('Could not find host.') return None elif len(defined_hosts) > 1: raise restclient.APIException(_('More than one host found.')) @@ -537,14 +529,14 @@ class DS8KCommonHelper(object): mappings = self._get_mappings(host_id) lun_ids = [ m['lunid'] for m in mappings if m['volume']['id'] == vol_id] - msg = _LI('Volumes attached to host %(host)s are %(vols)s.') - LOG.info(msg, {'host': host_id, 'vols': ','.join(lun_ids)}) + LOG.info('Volumes attached to host %(host)s are %(vols)s.', + {'host': host_id, 'vols': ','.join(lun_ids)}) for lun_id in lun_ids: self._delete_mappings(host_id, lun_id) if not lun_ids: - msg = _LW("Volume %(vol)s is already not mapped to " - "host %(host)s.") - LOG.warning(msg, {'vol': vol_id, 'host': host.name}) + LOG.warning("Volume %(vol)s is already not mapped to " + "host %(host)s.", + {'vol': vol_id, 'host': host.name}) # if this host only has volumes that have been detached, # remove the host and its ports ret_info = { @@ -574,8 +566,9 @@ class DS8KCommonHelper(object): self.delete_lun(luns) except restclient.APIException: model_update['status'] = fields.GroupStatus.ERROR_DELETING - msg = _LE("Failed to delete the volumes in group %(group)s") - LOG.exception(msg, {'group': group.id}) + LOG.exception( + "Failed to delete the volumes in group %(group)s", + {'group': group.id}) for lun in luns: volumes_model_update.append({ @@ -783,7 +776,7 @@ class DS8KReplicationSourceHelper(DS8KCommonHelper): # prefer to choose the non-existing one firstly fileds = ['id', 'type', 'addrgrp', 'group', 'configvols'] existing_lss = self.get_all_lss(fileds) - LOG.info(_LI("existing LSS IDs are %s"), + LOG.info("existing LSS IDs are %s", ','.join([lss['id'] for lss in existing_lss])) lss_id = self._find_from_unexisting_lss(node, existing_lss) if not lss_id: @@ -922,8 +915,8 @@ class DS8KECKDHelper(DS8KCommonHelper): ckd_lss = set(lss['id'] for lss in all_lss if lss['type'] == 'ckd') unexisting_lcu = set(dev_mapping.keys()) - ckd_lss if unexisting_lcu: - msg = _LI('LCUs %s do not exist in DS8K, they will be created.') - LOG.info(msg, ','.join(unexisting_lcu)) + LOG.info('LCUs %s do not exist in DS8K, they will be created.', + ','.join(unexisting_lcu)) for lcu in unexisting_lcu: try: self._create_lcu(self.backend['ssid_prefix'], lcu) diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py index a496c53996f..f9e11c30e7f 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py +++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py @@ -64,7 +64,7 @@ from oslo_config import cfg from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder.objects import fields from cinder.utils import synchronized import cinder.volume.drivers.ibm.ibm_storage as storage @@ -310,7 +310,7 @@ class DS8KProxy(proxy.IBMStorageProxy): @proxy._trace_time def setup(self, ctxt): - LOG.info(_LI("Initiating connection to IBM DS8K storage system.")) + LOG.info("Initiating connection to IBM DS8K storage system.") connection_type = self.configuration.safe_get('connection_type') replication_devices = self.configuration.safe_get('replication_device') if connection_type == storage.XIV_CONNECTION_TYPE_FC: @@ -426,8 +426,8 @@ class DS8KProxy(proxy.IBMStorageProxy): pool, find_new_pid, excluded_lss) return self._helper.create_lun(lun) except restclient.LssFullException: - msg = _LW("LSS %s is full, find another one.") - LOG.warning(msg, lun.lss_pair['source'][1]) + LOG.warning("LSS %s is full, find another one.", + lun.lss_pair['source'][1]) excluded_lss.append(lun.lss_pair['source'][1]) @proxy.logger @@ -683,7 +683,7 @@ class DS8KProxy(proxy.IBMStorageProxy): def initialize_connection(self, volume, connector, **kwargs): """Attach a volume to the host.""" vol_id = Lun(volume).ds_id - LOG.info(_LI('Attach the volume %s.'), vol_id) + LOG.info('Attach the volume %s.', vol_id) return self._helper.initialize_connection(vol_id, connector, **kwargs) @synchronized('OpenStackCinderIBMDS8KMutexConnect-', external=True) @@ -692,7 +692,7 @@ class DS8KProxy(proxy.IBMStorageProxy): def terminate_connection(self, volume, connector, force=False, **kwargs): """Detach a volume from a host.""" vol_id = Lun(volume).ds_id - LOG.info(_LI('Detach the volume %s.'), vol_id) + LOG.info('Detach the volume %s.', vol_id) return self._helper.terminate_connection(vol_id, connector, force, **kwargs) @@ -725,7 +725,7 @@ class DS8KProxy(proxy.IBMStorageProxy): self._clone_group(src_luns, tgt_luns, cg_enabled) except restclient.APIException: model_update['status'] = fields.GroupStatus.ERROR - LOG.exception(_LE('Failed to create group snapshot.')) + LOG.exception('Failed to create group snapshot.') for tgt_lun in tgt_luns: snapshot_model_update = tgt_lun.get_volume_update() @@ -753,8 +753,8 @@ class DS8KProxy(proxy.IBMStorageProxy): self._helper.delete_lun(snapshots) except restclient.APIException as e: model_update['status'] = fields.GroupStatus.ERROR_DELETING - LOG.error(_LE("Failed to delete group snapshot. " - "Error: %(err)s"), + LOG.error("Failed to delete group snapshot. " + "Error: %(err)s", {'err': e}) for snapshot in snapshots: @@ -816,8 +816,7 @@ class DS8KProxy(proxy.IBMStorageProxy): self._clone_group(src_luns, tgt_luns, cg_enabled) except restclient.APIException: model_update['status'] = fields.GroupStatus.ERROR - msg = _LE("Failed to create group from group snapshot.") - LOG.exception(msg) + LOG.exception("Failed to create group from group snapshot.") for tgt_lun in tgt_luns: volume_model_update = tgt_lun.get_volume_update() @@ -923,14 +922,13 @@ class DS8KProxy(proxy.IBMStorageProxy): volume_update_list = [] if secondary_id == strings.PRIMARY_BACKEND_ID: if not self._active_backend_id: - msg = _LI("Host has been failed back. doesn't need " - "to fail back again.") - LOG.info(msg) + LOG.info("Host has been failed back. doesn't need " + "to fail back again.") return self._active_backend_id, volume_update_list else: if self._active_backend_id: - msg = _LI("Host has been failed over to %s.") - LOG.info(msg, self._active_backend_id) + LOG.info("Host has been failed over to %s.", + self._active_backend_id) return self._active_backend_id, volume_update_list backend_id = self._replication._target_helper.backend['id'] @@ -980,13 +978,13 @@ class DS8KProxy(proxy.IBMStorageProxy): 'updates': volume_update} volume_update_list.append(model_update) else: - LOG.info(_LI("No volume has replication capability.")) + LOG.info("No volume has replication capability.") if secondary_id != strings.PRIMARY_BACKEND_ID: - LOG.info(_LI("Switch to the target %s"), secondary_id) + LOG.info("Switch to the target %s", secondary_id) self._switch_backend_connection(secondary_id) self._active_backend_id = secondary_id else: - LOG.info(_LI("Switch to the primary %s"), secondary_id) + LOG.info("Switch to the primary %s", secondary_id) self._switch_backend_connection(self._active_backend_id) self._active_backend_id = "" diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py index dd48e05e713..f29f436d8a2 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py +++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py @@ -21,7 +21,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder.utils import synchronized import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper @@ -90,9 +90,8 @@ class MetroMirrorManager(object): try: self._target.get_systems() except restclient.TimeoutException as e: - msg = _LI("REST request time out, backend may be not available " - "any more. Exception: %s") - LOG.info(msg, e) + LOG.info("REST request time out, backend may be not available " + "any more. Exception: %s", e) return False return True @@ -157,8 +156,7 @@ class MetroMirrorManager(object): paths = [p for p in paths if p['target_system_wwnn'] in self._target.backend['storage_wwnn']] else: - msg = _LI("No PPRC paths found in primary DS8K.") - LOG.info(msg) + LOG.info("No PPRC paths found in primary DS8K.") return PPRC_PATH_NOT_EXIST, None # get the paths whose port pairs have been set in configuration file. @@ -170,9 +168,8 @@ class MetroMirrorManager(object): if not (set(port_pairs) & set(expected_port_pairs)): paths.remove(path) if not paths: - msg = _LI("Existing PPRC paths do not use port pairs that " - "are set.") - LOG.info(msg) + LOG.info("Existing PPRC paths do not use port pairs that " + "are set.") return PPRC_PATH_NOT_EXIST, None # abandon PPRC paths according to volume type(fb/ckd) @@ -187,8 +184,7 @@ class MetroMirrorManager(object): source_lss_set = source_lss_set & fb_lss paths = [p for p in paths if p['source_lss_id'] in source_lss_set] if not paths: - msg = _LI("No source LSS in PPRC paths has correct volume type.") - LOG.info(msg) + LOG.info("No source LSS in PPRC paths has correct volume type.") return PPRC_PATH_NOT_EXIST, None # if the group property of lss doesn't match pool node, @@ -213,8 +209,7 @@ class MetroMirrorManager(object): paths = [p for p in paths if p['target_lss_id'] not in discarded_tgt_lss] if not paths: - msg = _LI("No PPRC paths can be re-used.") - LOG.info(msg) + LOG.info("No PPRC paths can be re-used.") return PPRC_PATH_NOT_EXIST, None # abandon unhealthy PPRC paths. @@ -224,9 +219,8 @@ class MetroMirrorManager(object): if len(failed_port_pairs) == len(path['port_pairs']): paths.remove(path) if not paths: - msg = _LI("PPRC paths between primary and target DS8K " - "are unhealthy.") - LOG.info(msg) + LOG.info("PPRC paths between primary and target DS8K " + "are unhealthy.") return PPRC_PATH_UNHEALTHY, None return PPRC_PATH_HEALTHY, paths @@ -238,8 +232,8 @@ class MetroMirrorManager(object): pid = (self._source.backend['storage_wwnn'] + '_' + src_lss + ':' + self._target.backend['storage_wwnn'] + '_' + tgt_lss) state = self._is_pprc_paths_healthy(pid) - msg = _LI("The state of PPRC path %(path)s is %(state)s.") - LOG.info(msg, {'path': pid, 'state': state}) + LOG.info("The state of PPRC path %(path)s is %(state)s.", + {'path': pid, 'state': state}) if state == PPRC_PATH_HEALTHY: return @@ -250,8 +244,8 @@ class MetroMirrorManager(object): 'target_lss_id': tgt_lss, 'port_pairs': self._target.backend['port_pairs'] } - msg = _LI("PPRC path %(src)s:%(tgt)s will be created.") - LOG.info(msg, {'src': src_lss, 'tgt': tgt_lss}) + LOG.info("PPRC path %(src)s:%(tgt)s will be created.", + {'src': src_lss, 'tgt': tgt_lss}) self._source.create_pprc_path(pathData) # check the state of the pprc path @@ -300,7 +294,7 @@ class MetroMirrorManager(object): LOG.debug("Creating pprc pair, pairData is %s.", pairData) self._source.create_pprc_pair(pairData) self._source.wait_pprc_copy_finished([lun.ds_id], 'full_duplex') - LOG.info(_LI("The state of PPRC pair has become full_duplex.")) + LOG.info("The state of PPRC pair has become full_duplex.") def delete_pprc_pairs(self, lun): self._source.delete_pprc_pair(lun.ds_id) @@ -316,12 +310,10 @@ class MetroMirrorManager(object): target_vol_id = ( lun.replication_driver_data[backend_id]['vol_hex_id']) if not self._target.lun_exists(target_vol_id): - msg = _LI("Target volume %(volid)s doesn't exist in " - "DS8K %(storage)s.") - LOG.info(msg, { - 'volid': target_vol_id, - 'storage': self._target.backend['storage_unit'] - }) + LOG.info("Target volume %(volid)s doesn't exist in " + "DS8K %(storage)s.", + {'volid': target_vol_id, + 'storage': self._target.backend['storage_unit']}) continue vol_pairs.append({ @@ -340,12 +332,12 @@ class MetroMirrorManager(object): "options": ["failover"] } - LOG.info(_LI("Begin to fail over to %s"), + LOG.info("Begin to fail over to %s", self._target.backend['storage_unit']) self._target.create_pprc_pair(pairData) self._target.wait_pprc_copy_finished(target_vol_ids, 'suspended', False) - LOG.info(_LI("Failover from %(src)s to %(tgt)s is finished."), { + LOG.info("Failover from %(src)s to %(tgt)s is finished.", { 'src': self._source.backend['storage_unit'], 'tgt': self._target.backend['storage_unit'] }) @@ -357,12 +349,10 @@ class MetroMirrorManager(object): target_vol_id = ( lun.replication_driver_data[backend_id]['vol_hex_id']) if not self._target.lun_exists(target_vol_id): - msg = _LE("Target volume %(volume)s doesn't exist in " - "DS8K %(storage)s.") - LOG.info(msg, { - 'volume': lun.ds_id, - 'storage': self._target.backend['storage_unit'] - }) + LOG.info("Target volume %(volume)s doesn't exist in " + "DS8K %(storage)s.", + {'volume': lun.ds_id, + 'storage': self._target.backend['storage_unit']}) continue pprc_id = (self._source.backend['storage_unit'] + '_' + @@ -376,11 +366,11 @@ class MetroMirrorManager(object): "type": "metro_mirror", "options": ["failback"]} - LOG.info(_LI("Begin to run failback in %s."), + LOG.info("Begin to run failback in %s.", self._source.backend['storage_unit']) self._source.do_failback(pairData) self._source.wait_pprc_copy_finished(vol_ids, 'full_duplex', False) - LOG.info(_LI("Run failback in %s is finished."), + LOG.info("Run failback in %s is finished.", self._source.backend['storage_unit']) @@ -510,9 +500,8 @@ class Replication(object): @proxy.logger def _delete_replica(self, lun): if not lun.replication_driver_data: - msg = _LE("No replica ID for lun %s, maybe there is something " - "wrong when creating the replica for lun.") - LOG.error(msg, lun.ds_id) + LOG.error("No replica ID for lun %s, maybe there is something " + "wrong when creating the replica for lun.", lun.ds_id) return None for backend_id, backend in lun.replication_driver_data.items(): diff --git a/cinder/volume/drivers/ibm/ibm_storage/proxy.py b/cinder/volume/drivers/ibm/ibm_storage/proxy.py index 694d6cabf46..7b53aab0b31 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/proxy.py +++ b/cinder/volume/drivers/ibm/ibm_storage/proxy.py @@ -21,7 +21,7 @@ import platform from oslo_log import log as logging from oslo_utils import timeutils -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import version import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import strings @@ -192,7 +192,7 @@ class IBMStorageProxy(object): Handled by ISCSiDriver """ - LOG.info(_LI("The copy_volume_to_image feature is not implemented.")) + LOG.info("The copy_volume_to_image feature is not implemented.") raise NotImplementedError() @_trace_time @@ -396,10 +396,10 @@ class IBMStorageProxy(object): LOG.debug('Replication device found: %(dev)s', {'dev': dev}) backend_id = dev.get('backend_id', None) if backend_id is None: - LOG.error(_LE("Replication is missing backend_id: %(dev)s"), + LOG.error("Replication is missing backend_id: %(dev)s", {'dev': dev}) elif self.targets.get(backend_id, None): - LOG.error(_LE("Multiple entries for replication %(dev)s"), + LOG.error("Multiple entries for replication %(dev)s", {'dev': dev}) else: self.targets[backend_id] = {} diff --git a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py index 5435285a75a..b92fb934e15 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py +++ b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py @@ -29,10 +29,8 @@ if pyxcli: from pyxcli.mirroring import volume_recovery_manager from pyxcli import transports -from cinder.volume import qos_specs -from cinder.volume import volume_types -from cinder.i18n import _, _LE, _LW, _LI from cinder import context +from cinder.i18n import _ from cinder import objects from cinder import volume as c_volume import cinder.volume.drivers.ibm.ibm_storage as storage @@ -40,6 +38,8 @@ from cinder.volume.drivers.ibm.ibm_storage import certificate from cinder.volume.drivers.ibm.ibm_storage import cryptish from cinder.volume.drivers.ibm.ibm_storage import proxy from cinder.volume.drivers.ibm.ibm_storage import strings +from cinder.volume import qos_specs +from cinder.volume import volume_types OPENSTACK_PRODUCT_NAME = "OpenStack" PERF_CLASS_NAME_PREFIX = "cinder-qos" @@ -76,15 +76,15 @@ CONNECTIVITY_FC_NO_TARGETS = _("Unable to detect FC connection between the " "that zoning is set up correctly.") # terminate connection strings - used in logging -TERMINATE_CONNECTION_BASE_ERROR = _LE("Unable to terminate the connection " - "for volume '%(volume)s': %(error)s.") -TERMINATE_CONNECTION_HOST_ERROR = _LE("Terminate connection for volume " - "'%(volume)s': for volume '%(volume)s': " - "%(host)s %(error)s.") +TERMINATE_CONNECTION_BASE_ERROR = ("Unable to terminate the connection " + "for volume '%(volume)s': %(error)s.") +TERMINATE_CONNECTION_HOST_ERROR = ("Terminate connection for volume " + "'%(volume)s': for volume '%(volume)s': " + "%(host)s %(error)s.") # delete volume strings - used in logging -DELETE_VOLUME_BASE_ERROR = _LE("Unable to delete volume '%(volume)s': " - "%(error)s.") +DELETE_VOLUME_BASE_ERROR = ("Unable to delete volume '%(volume)s': " + "%(error)s.") # manage volume strings - used in exceptions MANAGE_VOLUME_BASE_ERROR = _("Unable to manage the volume '%(volume)s': " @@ -128,10 +128,10 @@ class XIVProxy(proxy.IBMStorageProxy): active_backend_id = strings.PRIMARY_BACKEND_ID proxy.IBMStorageProxy.__init__( self, storage_info, logger, exception, driver, active_backend_id) - LOG.info(_LI("__init__: storage_info: %(keys)s"), + LOG.info("__init__: storage_info: %(keys)s", {'keys': self.storage_info}) if active_backend_id: - LOG.info(_LI("__init__: active_backend_id: %(id)s"), + LOG.info("__init__: active_backend_id: %(id)s", {'id': active_backend_id}) self.ibm_storage_cli = None self.meta['ibm_storage_portal'] = None @@ -144,8 +144,8 @@ class XIVProxy(proxy.IBMStorageProxy): @proxy._trace_time def setup(self, context): """Connect ssl client.""" - LOG.info(_LI("Setting up connection to %(title)s...\n" - "Active backend_id: '%(id)s'."), + LOG.info("Setting up connection to %(title)s...\n" + "Active backend_id: '%(id)s'.", {'title': strings.TITLE, 'id': self.active_backend_id}) @@ -178,8 +178,8 @@ class XIVProxy(proxy.IBMStorageProxy): if remote_id: self._update_active_schedule_objects() self._update_remote_schedule_objects() - LOG.info(_LI("Connection to the IBM storage " - "system established successfully.")) + LOG.info("Connection to the IBM storage " + "system established successfully.") def _get_schedule_from_rpo(self, rpo): return [rate for rate in self.async_rates @@ -258,8 +258,8 @@ class XIVProxy(proxy.IBMStorageProxy): return local_ibm_storage_cli = self._init_xcli(strings.PRIMARY_BACKEND_ID) if not local_ibm_storage_cli: - LOG.error(_LE('Failed to connect to main backend. ' - 'Cannot retrieve main backend system_id')) + LOG.error('Failed to connect to main backend. ' + 'Cannot retrieve main backend system_id') return system_id = local_ibm_storage_cli.cmd.config_get().as_dict( 'name')['system_id'].value @@ -509,8 +509,8 @@ class XIVProxy(proxy.IBMStorageProxy): self._replication_create(volume, replication_info) except Exception as e: details = self._get_code_and_status_or_message(e) - msg = (_LE('Failed _replication_create for ' - 'volume %(vol)s: %(err)s'), + msg = ('Failed _replication_create for ' + 'volume %(vol)s: %(err)s', {'vol': volume['name'], 'err': details}) LOG.error(msg) if cg: @@ -553,7 +553,7 @@ class XIVProxy(proxy.IBMStorageProxy): {'rep': replication_info}) target, params = self._get_replication_target_params() - LOG.info(_LI('Target %(target)s: %(params)s'), + LOG.info('Target %(target)s: %(params)s', {'target': target, 'params': six.text_type(params)}) try: @@ -580,7 +580,7 @@ class XIVProxy(proxy.IBMStorageProxy): LOG.debug('schedule %(sched)s: for rpo %(rpo)s', {'sched': schedule, 'rpo': replication_info['rpo']}) else: - LOG.error(_LE('Failed to find schedule for rpo %(rpo)s'), + LOG.error('Failed to find schedule for rpo %(rpo)s', {'rpo': replication_info['rpo']}) # will fail in the next step try: @@ -641,7 +641,7 @@ class XIVProxy(proxy.IBMStorageProxy): # Don't throw error here, allow the cinder volume manager # to set the volume as deleted if it's not available # on the XIV box - LOG.info(_LI("Volume '%(volume)s' not found on storage"), + LOG.info("Volume '%(volume)s' not found on storage", {'volume': vol_name}) def _silent_delete_volume(self, volume): @@ -667,8 +667,8 @@ class XIVProxy(proxy.IBMStorageProxy): self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except errors.XCLIError as e: - LOG.error(_LE("Failed removing volume %(vol)s from " - "consistency group %(cg)s: %(err)s"), + LOG.error("Failed removing volume %(vol)s from " + "consistency group %(cg)s: %(err)s", {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) @@ -695,11 +695,11 @@ class XIVProxy(proxy.IBMStorageProxy): target = None try: target, params = self._get_replication_target_params() - LOG.info(_LI('Target %(target)s: %(params)s'), - {'target': target, 'params': six.text_type(params)}) + LOG.info('Target %(target)s: %(params)s', + {'target': target, 'params': params}) except Exception as e: - LOG.error(_LE("Unable to delete replicated volume " - "'%(volume)s': %(error)s."), + LOG.error("Unable to delete replicated volume " + "'%(volume)s': %(error)s.", {'error': self._get_code_and_status_or_message(e), 'volume': volume['name']}) if target: @@ -708,8 +708,8 @@ class XIVProxy(proxy.IBMStorageProxy): "vol_delete", vol=volume['name']) except errors.XCLIError as e: LOG.error( - _LE("Unable to delete replicated volume " - "'%(volume)s': %(error)s."), + "Unable to delete replicated volume " + "'%(volume)s': %(error)s.", {'error': self._get_code_and_status_or_message(e), 'volume': volume['name']}) @@ -829,8 +829,8 @@ class XIVProxy(proxy.IBMStorageProxy): "mapping_list", host=host.get('name')).as_list if len(host_mappings) == 0: - LOG.info(_LI("Terminate connection for volume '%(volume)s': " - "%(host)s %(info)s."), + LOG.info("Terminate connection for volume '%(volume)s': " + "%(host)s %(info)s.", {'volume': volume['name'], 'host': host.get('name'), 'info': "will be deleted"}) @@ -993,8 +993,8 @@ class XIVProxy(proxy.IBMStorageProxy): return false_ret if volume.attach_status == 'attached': - LOG.info(_LI("Storage-assisted volume migration: Volume " - "%(volume)s is attached"), + LOG.info("Storage-assisted volume migration: Volume " + "%(volume)s is attached", {'volume': volume.id}) try: @@ -1206,8 +1206,8 @@ class XIVProxy(proxy.IBMStorageProxy): # In case of failback, mirroring must be active # In case of failover we attempt to move in any condition if failback and not active: - msg = (_LE("Volume %(vol)s: no active mirroring and can not " - "failback"), + msg = ("Volume %(vol)s: no active mirroring and can not " + "failback", {'vol': volume['name']}) LOG.error(msg) return False, msg @@ -1218,26 +1218,26 @@ class XIVProxy(proxy.IBMStorageProxy): except Exception as e: # failed attempt to switch_roles from the master details = self._get_code_and_status_or_message(e) - LOG.warning(_LW('Failed to perform switch_roles on' - ' %(vol)s: %(err)s. ' - 'Continue to change_role'), + LOG.warning('Failed to perform switch_roles on' + ' %(vol)s: %(err)s. ' + 'Continue to change_role', {'vol': volume['name'], 'err': details}) try: # this is the ugly stage we come to brute force - LOG.warning(_LW('Attempt to change_role to master')) + LOG.warning('Attempt to change_role to master') failover_volume_replication_mgr.failover_by_id( resource_id=volume['name']) return True, None except m_errors.NoMirrorDefinedError as e: details = self._get_code_and_status_or_message(e) - msg = (_LW("Volume %(vol)s no replication defined: %(err)s"), + msg = ("Volume %(vol)s no replication defined: %(err)s" % {'vol': volume['name'], 'err': details}) LOG.error(msg) return False, msg except Exception as e: details = self._get_code_and_status_or_message(e) - msg = (_LE('Volume %(vol)s change_role failed: %(err)s'), + msg = ('Volume %(vol)s change_role failed: %(err)s' % {'vol': volume['name'], 'err': details}) LOG.error(msg) return False, msg @@ -1255,15 +1255,14 @@ class XIVProxy(proxy.IBMStorageProxy): """ volume_update_list = [] - LOG.info(_LI("failover_host: from %(active)s to %(id)s"), + LOG.info("failover_host: from %(active)s to %(id)s", {'active': self.active_backend_id, 'id': secondary_id}) # special cases to handle if secondary_id == strings.PRIMARY_BACKEND_ID: # case: already failed back if self._using_default_backend(): - msg = _LI("Host has been failed back. No need " - "to fail back again.") - LOG.info(msg) + LOG.info("Host has been failed back. No need " + "to fail back again.") return self.active_backend_id, volume_update_list pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] pool_master = self._get_target_params( @@ -1271,8 +1270,7 @@ class XIVProxy(proxy.IBMStorageProxy): goal_status = 'available' else: if not self._using_default_backend(): - msg = _LI("Already failed over. No need to failover again.") - LOG.info(msg) + LOG.info("Already failed over. No need to failover again.") return self.active_backend_id, volume_update_list # case: need to select a target if secondary_id is None: @@ -1474,7 +1472,7 @@ class XIVProxy(proxy.IBMStorageProxy): pool=self.storage_info[storage.FLAG_KEYS['storage_pool']]).as_list if len(pools) != 1: LOG.error( - _LE("_update_stats: Pool %(pool)s not available on storage"), + "_update_stats: Pool %(pool)s not available on storage", {'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]}) return pool = pools[0] @@ -1638,7 +1636,7 @@ class XIVProxy(proxy.IBMStorageProxy): """Creates a consistency group.""" cgname = self._cg_name_from_group(group) - LOG.info(_LI("Creating consistency group %(name)s."), + LOG.info("Creating consistency group %(name)s.", {'name': cgname}) if isinstance(group, objects.Group): volume_type_ids = group.volume_type_ids @@ -1699,7 +1697,7 @@ class XIVProxy(proxy.IBMStorageProxy): self.delete_consistencygroup(context, group, []) except Exception as e: details = self._get_code_and_status_or_message(e) - LOG.error(_LE('Failed to cleanup CG %(details)s'), + LOG.error('Failed to cleanup CG %(details)s', {'details': details}) @proxy._trace_time @@ -1712,7 +1710,7 @@ class XIVProxy(proxy.IBMStorageProxy): or another CG with its list of volumes. """ cgname = self._cg_name_from_group(group) - LOG.info(_LI("Creating consistency group %(cg)s from src."), + LOG.info("Creating consistency group %(cg)s from src.", {'cg': cgname}) volumes_model_update = [] @@ -1723,7 +1721,7 @@ class XIVProxy(proxy.IBMStorageProxy): self.create_consistencygroup(context, group) except Exception as e: LOG.error( - _LE("Creating CG from cgsnapshot failed: %(details)s"), + "Creating CG from cgsnapshot failed: %(details)s", {'details': self._get_code_and_status_or_message(e)}) raise created_volumes = [] @@ -1766,7 +1764,7 @@ class XIVProxy(proxy.IBMStorageProxy): try: self.create_consistencygroup(context, group) except Exception as e: - LOG.error(_LE("Creating CG from CG failed: %(details)s"), + LOG.error("Creating CG from CG failed: %(details)s", {'details': self._get_code_and_status_or_message(e)}) raise created_volumes = [] @@ -1802,7 +1800,7 @@ class XIVProxy(proxy.IBMStorageProxy): """Deletes a consistency group.""" cgname = self._cg_name_from_group(group) - LOG.info(_LI("Deleting consistency group %(name)s."), + LOG.info("Deleting consistency group %(name)s.", {'name': cgname}) model_update = {} model_update['status'] = group.get('status', 'deleting') @@ -1814,8 +1812,8 @@ class XIVProxy(proxy.IBMStorageProxy): self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except errors.XCLIError as e: - LOG.error(_LE("Failed removing volume %(vol)s from " - "consistency group %(cg)s: %(err)s"), + LOG.error("Failed removing volume %(vol)s from " + "consistency group %(cg)s: %(err)s", {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) @@ -1850,10 +1848,9 @@ class XIVProxy(proxy.IBMStorageProxy): "cg_delete", cg=cgname).as_list model_update['status'] = 'deleted' except (errors.CgDoesNotExistError, errors.CgBadNameError): - error = (_LW("consistency group %(cgname)s does not " - "exist on backend") % - {'cgname': cgname}) - LOG.warning(error) + LOG.warning("consistency group %(cgname)s does not " + "exist on backend", + {'cgname': cgname}) # if the object was already deleted on the backend, we can # continue and delete the openstack object model_update['status'] = 'deleted' @@ -1879,7 +1876,7 @@ class XIVProxy(proxy.IBMStorageProxy): """Updates a consistency group.""" cgname = self._cg_name_from_group(group) - LOG.info(_LI("Updating consistency group %(name)s."), {'name': cgname}) + LOG.info("Updating consistency group %(name)s.", {'name': cgname}) model_update = {'status': 'available'} add_volumes_update = [] @@ -1948,7 +1945,7 @@ class XIVProxy(proxy.IBMStorageProxy): cgname = self._cg_name_from_cgsnapshot(cgsnapshot) groupname = self._group_name_from_cgsnapshot(cgsnapshot) - LOG.info(_LI("Creating snapshot %(group)s for CG %(cg)s."), + LOG.info("Creating snapshot %(group)s for CG %(cg)s.", {'group': groupname, 'cg': cgname}) # call XCLI @@ -2013,7 +2010,7 @@ class XIVProxy(proxy.IBMStorageProxy): cgname = self._cg_name_from_cgsnapshot(cgsnapshot) groupname = self._group_name_from_cgsnapshot(cgsnapshot) - LOG.info(_LI("Deleting snapshot %(group)s for CG %(cg)s."), + LOG.info("Deleting snapshot %(group)s for CG %(cg)s.", {'group': groupname, 'cg': cgname}) # call XCLI @@ -2081,7 +2078,7 @@ class XIVProxy(proxy.IBMStorageProxy): else: chap_name = host['name'] else: - LOG.info(_LI("_create_chap: host missing!!!")) + LOG.info("_create_chap: host missing!!!") chap_name = "12345678901234" chap_secret = self._generate_chap_secret(chap_name) LOG.debug("_create_chap (new): %(chap_name)s ", @@ -2369,15 +2366,13 @@ class XIVProxy(proxy.IBMStorageProxy): "vol_mapping_list", vol=vol_name).as_dict('host') if host['name'] in mapped_vols: - LOG.info(_LI("Volume '%(volume)s' was already attached to " - "the host '%(host)s'."), + LOG.info("Volume '%(volume)s' was already attached to " + "the host '%(host)s'.", {'host': host['name'], 'volume': volume['name']}) return int(mapped_vols[host['name']].lun) except errors.VolumeBadNameError: - LOG.error(_LE("%(error)s '%(volume)s"), - {'error': "Volume not found.", - 'volume': volume['name']}) + LOG.error("Volume not found. '%s'", volume['name']) raise self.meta['exception'].VolumeNotFound(volume_id=volume['id']) used_luns = [int(mapped.get('lun')) for mapped in self._call_xiv_xcli( @@ -2436,7 +2431,7 @@ class XIVProxy(proxy.IBMStorageProxy): self.ibm_storage_cli, self.active_backend_id) if self.ibm_storage_cli: - LOG.info(_LI("_call_xiv_xcli #1: %s"), method) + LOG.info("_call_xiv_xcli #1: %s", method) else: LOG.debug("_call_xiv_xcli #2: %s", method) return getattr(self.ibm_storage_cli.cmd, method)(*args, **kwargs) diff --git a/cinder/volume/drivers/ibm/storwize_svc/replication.py b/cinder/volume/drivers/ibm/storwize_svc/replication.py index b0858113031..8a00ff49e8e 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/replication.py +++ b/cinder/volume/drivers/ibm/storwize_svc/replication.py @@ -23,7 +23,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import storwize_const @@ -104,8 +104,8 @@ class StorwizeSVCReplicationStretchedCluster(StorwizeSVCReplication): self.driver._helpers.rm_vdisk_copy(volume['name'], secondary['copy_id']) else: - LOG.info(_LI('Could not find replica to delete of' - ' volume %(vol)s.'), {'vol': vdisk}) + LOG.info('Could not find replica to delete of' + ' volume %(vol)s.', {'vol': vdisk}) def test_replica(self, tgt_volume, src_volume): vdisk = src_volume['name'] @@ -267,9 +267,9 @@ class StorwizeSVCReplicationGlobalMirror( self.target_helpers.switch_relationship(rel_info['name']) return {'replication_status': 'failed-over'} except Exception as e: - LOG.exception(_LE('Unable to fail-over the volume %(id)s to the ' - 'secondary back-end by switchrcrelationship ' - 'command, error: %(error)s'), + LOG.exception('Unable to fail-over the volume %(id)s to the ' + 'secondary back-end by switchrcrelationship ' + 'command, error: %(error)s', {"id": vref['id'], "error": e}) # If the switch command fail, try to make the aux volume # writeable again. @@ -372,7 +372,7 @@ class StorwizeSVCReplicationManager(object): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), command) + LOG.error("Error running SSH command: %s", command) def get_target_helpers(self): return self.target_helpers diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index e3af7b57e09..9573d1944ac 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -34,11 +34,11 @@ import six from cinder import context from cinder import exception -from cinder import ssh_utils -from cinder import utils as cinder_utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import fields +from cinder import ssh_utils +from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.ibm.storwize_svc import ( replication as storwize_rep) @@ -282,14 +282,14 @@ class StorwizeSSH(object): except Exception as ex: if (not multihostmap and hasattr(ex, 'message') and 'CMMVC6071E' in ex.message): - LOG.error(_LE('storwize_svc_multihostmap_enabled is set ' - 'to False, not allowing multi host mapping.')) + LOG.error('storwize_svc_multihostmap_enabled is set ' + 'to False, not allowing multi host mapping.') raise exception.VolumeDriverException( message=_('CMMVC6071E The VDisk-to-host mapping was not ' 'created because the VDisk is already mapped ' 'to a host.\n"')) with excutils.save_and_reraise_exception(): - LOG.error(_LE('Error mapping VDisk-to-host')) + LOG.error('Error mapping VDisk-to-host') def mkrcrelationship(self, master, aux, system, asyncmirror): ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master, @@ -396,13 +396,13 @@ class StorwizeSSH(object): if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg: vdisk = self.lsvdisk(name) if vdisk: - LOG.warning(_LW('CMMVC6372W The virtualized storage ' - 'capacity that the cluster is using is ' - 'approaching the virtualized storage ' - 'capacity that is licensed.')) + LOG.warning('CMMVC6372W The virtualized storage ' + 'capacity that the cluster is using is ' + 'approaching the virtualized storage ' + 'capacity that is licensed.') return vdisk['id'] with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create vdisk %(vol)s.'), + LOG.exception('Failed to create vdisk %(vol)s.', {'vol': name}) def rmvdisk(self, vdisk, force=True): @@ -620,7 +620,7 @@ class StorwizeHelpers(object): if resp.get('license_scheme', '0') == '9846': return True except exception.VolumeBackendAPIException: - LOG.exception(_LE("Failed to fetch licensing scheme.")) + LOG.exception("Failed to fetch licensing scheme.") return False def replication_licensed(self): @@ -633,8 +633,7 @@ class StorwizeHelpers(object): if product_key in storwize_const.REP_CAP_DEVS: return True except exception.VolumeBackendAPIException as war: - LOG.warning(_LW("Failed to run lsguicapability. " - "Exception: %s."), war) + LOG.warning("Failed to run lsguicapability. Exception: %s.", war) return False def get_system_info(self): @@ -767,7 +766,7 @@ class StorwizeHelpers(object): port_info['status'] == 'active'): wwpns.add(port_info['WWPN']) node['WWPN'] = list(wwpns) - LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s.'), + LOG.info('WWPN on node %(node)s: %(wwpn)s.', {'node': node['id'], 'wwpn': node['WWPN']}) def add_chap_secret_to_host(self, host_name): @@ -983,15 +982,15 @@ class StorwizeHelpers(object): # Check if the mapping exists resp = self.ssh.lsvdiskhostmap(volume_name) if not len(resp): - LOG.warning(_LW('unmap_vol_from_host: No mapping of volume ' - '%(vol_name)s to any host found.'), + LOG.warning('unmap_vol_from_host: No mapping of volume ' + '%(vol_name)s to any host found.', {'vol_name': volume_name}) return host_name if host_name is None: if len(resp) > 1: - LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of ' - 'volume %(vol_name)s found, no host ' - 'specified.'), {'vol_name': volume_name}) + LOG.warning('unmap_vol_from_host: Multiple mappings of ' + 'volume %(vol_name)s found, no host ' + 'specified.', {'vol_name': volume_name}) return else: host_name = resp[0]['host_name'] @@ -1001,8 +1000,8 @@ class StorwizeHelpers(object): if h == host_name: found = True if not found: - LOG.warning(_LW('unmap_vol_from_host: No mapping of volume ' - '%(vol_name)s to host %(host)s found.'), + LOG.warning('unmap_vol_from_host: No mapping of volume ' + '%(vol_name)s to host %(host)s found.', {'vol_name': volume_name, 'host': host_name}) return host_name # We now know that the mapping exists @@ -1103,8 +1102,8 @@ class StorwizeHelpers(object): key = 'replication' words = value.split() if not (words and len(words) == 2 and words[0] == ''): - LOG.error(_LE('Replication must be specified as ' - '\' True\' or \' False\'.')) + LOG.error('Replication must be specified as ' + '\' True\' or \' False\'.') del words[0] value = words[0] @@ -1169,8 +1168,8 @@ class StorwizeHelpers(object): testValue = testmethod() except Exception as ex: if raise_exception: - LOG.exception(_LE("_wait_for_a_condition: %s" - " execution failed."), + LOG.exception("_wait_for_a_condition: %s" + " execution failed.", testmethod.__name__) raise exception.VolumeBackendAPIException(data=ex) else: @@ -1366,8 +1365,8 @@ class StorwizeHelpers(object): model_update['status'] = fields.ConsistencyGroupStatus.ERROR # Release cg self.delete_fc_consistgrp(fc_consistgrp) - LOG.error(_LE("Failed to create CGSnapshot. " - "Exception: %s."), err) + LOG.error("Failed to create CGSnapshot. " + "Exception: %s.", err) for snapshot in snapshots: snapshots_model_update.append( @@ -1387,8 +1386,8 @@ class StorwizeHelpers(object): except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) - LOG.error(_LE("Failed to delete the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s."), + LOG.error("Failed to delete the snapshot %(snap)s of " + "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot['name'], 'exception': err}) for snapshot in snapshots: @@ -1454,8 +1453,8 @@ class StorwizeHelpers(object): with excutils.save_and_reraise_exception(): # Release cg self.delete_fc_consistgrp(fc_consistgrp) - LOG.error(_LE("Failed to create CG from CGsnapshot. " - "Exception: %s"), err) + LOG.error("Failed to create CG from CGsnapshot. " + "Exception: %s", err) return model_update, volumes_model_update LOG.debug('Leave: create_cg_from_source.') @@ -1465,15 +1464,14 @@ class StorwizeHelpers(object): status='available'): """Update the volume model's status and return it.""" volume_model_updates = [] - LOG.info(_LI( - "Updating status for CG: %(id)s."), - {'id': cgId}) + LOG.info("Updating status for CG: %(id)s.", + {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume['id'], 'status': status}) else: - LOG.info(_LI("No volume found for CG: %(cg)s."), + LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId}) return volume_model_updates @@ -1652,8 +1650,8 @@ class StorwizeHelpers(object): def get_relationship_info(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if not vol_attrs or not vol_attrs['RC_name']: - LOG.info(_LI("Unable to get remote copy information for " - "volume %s"), volume_name) + LOG.info("Unable to get remote copy information for " + "volume %s", volume_name) return relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) @@ -1703,7 +1701,7 @@ class StorwizeHelpers(object): """Ensures that vdisk is not part of FC mapping and deletes it.""" LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk) if not self.is_vdisk_defined(vdisk): - LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk) + LOG.info('Tried to delete non-existent vdisk %s.', vdisk) return self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True, allow_fctgt=True) @@ -2209,16 +2207,16 @@ class StorwizeSVCCommonDriver(san.SanDriver, try: self.sshpool = self._set_up_sshpool(self.active_ip) except paramiko.SSHException: - LOG.warning(_LW('Unable to use san_ip to create SSHPool. Now ' - 'attempting to use storwize_san_secondary_ip ' - 'to create SSHPool.')) + LOG.warning('Unable to use san_ip to create SSHPool. Now ' + 'attempting to use storwize_san_secondary_ip ' + 'to create SSHPool.') if self._toggle_ip(): self.sshpool = self._set_up_sshpool(self.active_ip) else: - LOG.warning(_LW('Unable to create SSHPool using san_ip ' - 'and not able to use ' - 'storwize_san_secondary_ip since it is ' - 'not configured.')) + LOG.warning('Unable to create SSHPool using san_ip ' + 'and not able to use ' + 'storwize_san_secondary_ip since it is ' + 'not configured.') raise try: return self._ssh_execute(self.sshpool, command, @@ -2229,22 +2227,22 @@ class StorwizeSVCCommonDriver(san.SanDriver, # before raising an error. try: if self._toggle_ip(): - LOG.warning(_LW("Unable to execute SSH command with " - "%(inactive)s. Attempting to execute SSH " - "command with %(active)s."), + LOG.warning("Unable to execute SSH command with " + "%(inactive)s. Attempting to execute SSH " + "command with %(active)s.", {'inactive': self.inactive_ip, 'active': self.active_ip}) self.sshpool = self._set_up_sshpool(self.active_ip) return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) else: - LOG.warning(_LW('Not able to use ' - 'storwize_san_secondary_ip since it is ' - 'not configured.')) + LOG.warning('Not able to use ' + 'storwize_san_secondary_ip since it is ' + 'not configured.') raise except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), + LOG.error("Error running SSH command: %s", command) def _set_up_sshpool(self, ip): @@ -2276,7 +2274,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, command, check_exit_code=check_exit_code) except Exception as e: - LOG.error(_LE('Error has occurred: %s'), e) + LOG.error('Error has occurred: %s', e) last_exception = e greenthread.sleep(self.DEFAULT_GR_SLEEP) try: @@ -2294,7 +2292,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), command) + LOG.error("Error running SSH command: %s", command) def _toggle_ip(self): # Change active_ip if storwize_san_secondary_ip is set. @@ -2302,8 +2300,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, return False self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip - LOG.info(_LI('Toggle active_ip from %(old)s to ' - '%(new)s.'), + LOG.info('Toggle active_ip from %(old)s to %(new)s.', {'old': self.inactive_ip, 'new': self.active_ip}) return True @@ -2318,7 +2315,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, volume_defined = self._helpers.is_vdisk_defined(vol_name) if not volume_defined: - LOG.error(_LE('ensure_export: Volume %s not found on storage.'), + LOG.error('ensure_export: Volume %s not found on storage.', volume['name']) def create_export(self, ctxt, volume, connector): @@ -2384,8 +2381,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._master_backend_helpers.delete_rc_volume( volume['name']) except Exception as ex: - LOG.error(_LE('Failed to get delete volume %(volume)s in ' - 'master backend. Exception: %(err)s.'), + LOG.error('Failed to get delete volume %(volume)s in ' + 'master backend. Exception: %(err)s.', {'volume': volume['name'], 'err': ex}) else: @@ -2542,8 +2539,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, rel_info = self._helpers.get_relationship_info(volume_name) if rel_info: - LOG.warning(_LW('_extend_volume_op: Extending a volume with ' - 'remote copy is not recommended.')) + LOG.warning('_extend_volume_op: Extending a volume with ' + 'remote copy is not recommended.') try: tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) @@ -2607,13 +2604,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None except KeyError: - LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any ' - 'registered vdisk copy operations.'), volume['id']) + LOG.error('_rm_vdisk_copy_op: Volume %s does not have any ' + 'registered vdisk copy operations.', volume['id']) return except ValueError: - LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have ' - 'the specified vdisk copy operation: orig=%(orig)s ' - 'new=%(new)s.'), + LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have ' + 'the specified vdisk copy operation: orig=%(orig)s ' + 'new=%(new)s.', {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return @@ -2622,17 +2619,17 @@ class StorwizeSVCCommonDriver(san.SanDriver, volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if not curr_ops: - LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not ' - 'have any registered vdisk copy operations.'), + LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not ' + 'have any registered vdisk copy operations.', volume['id']) return curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] try: curr_ops_list.remove((orig_copy_id, new_copy_id)) except ValueError: - LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does ' - 'not have the specified vdisk copy operation: ' - 'orig=%(orig)s new=%(new)s.'), + LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does ' + 'not have the specified vdisk copy operation: ' + 'orig=%(orig)s new=%(new)s.', {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return @@ -2676,7 +2673,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, try: volume = self.db.volume_get(ctxt, vol_id) except Exception: - LOG.warning(_LW('Volume %s does not exist.'), vol_id) + LOG.warning('Volume %s does not exist.', vol_id) del self._vdiskcopyops[vol_id] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() @@ -2688,9 +2685,9 @@ class StorwizeSVCCommonDriver(san.SanDriver, synced = self._helpers.is_vdisk_copy_synced(volume['name'], copy_op[1]) except Exception: - LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does ' - 'not have the specified vdisk copy ' - 'operation: orig=%(orig)s new=%(new)s.'), + LOG.info('_check_volume_copy_ops: Volume %(vol)s does ' + 'not have the specified vdisk copy ' + 'operation: orig=%(orig)s new=%(new)s.', {'vol': volume['id'], 'orig': copy_op[0], 'new': copy_op[1]}) else: @@ -2731,8 +2728,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, """Fail back all the volume on the secondary backend.""" volumes_update = [] if not self._active_backend_id: - LOG.info(_LI("Host has been failed back. doesn't need " - "to fail back again")) + LOG.info("Host has been failed back. doesn't need " + "to fail back again") return None, volumes_update try: @@ -2777,11 +2774,11 @@ class StorwizeSVCCommonDriver(san.SanDriver, {'volume_id': volume['id'], 'updates': {'replication_status': 'error', 'status': 'error'}}) - LOG.error(_LE('_failback_replica_volumes:no rc-releationship ' - 'is established between master: %(master)s and ' - 'aux %(aux)s. Please re-establish the ' - 'relationship and synchronize the volumes on ' - 'backend storage.'), + LOG.error('_failback_replica_volumes:no rc-releationship ' + 'is established between master: %(master)s and ' + 'aux %(aux)s. Please re-establish the ' + 'relationship and synchronize the volumes on ' + 'backend storage.', {'master': volume['name'], 'aux': tgt_volume}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol=' @@ -2798,7 +2795,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, {'volume_id': volume['id'], 'updates': model_updates}) except exception.VolumeDriverException: - LOG.error(_LE('Unable to fail back volume %(volume_id)s'), + LOG.error('Unable to fail back volume %(volume_id)s', {'volume_id': volume.id}) volumes_update.append( {'volume_id': volume['id'], @@ -2829,18 +2826,18 @@ class StorwizeSVCCommonDriver(san.SanDriver, rep_mgr = self._get_replica_mgr() rep_mgr.establish_target_partnership() except Exception as ex: - LOG.warning(_LW('Fail to establish partnership in backend. ' - 'error=%(ex)s'), {'error': ex}) + LOG.warning('Fail to establish partnership in backend. ' + 'error=%(ex)s', {'error': ex}) for volume in volumes: tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] rep_info = self._helpers.get_relationship_info(tgt_volume) if not rep_info: - LOG.error(_LE('_sync_with_aux: no rc-releationship is ' - 'established between master: %(master)s and aux ' - '%(aux)s. Please re-establish the relationship ' - 'and synchronize the volumes on backend ' - 'storage.'), {'master': volume['name'], - 'aux': tgt_volume}) + LOG.error('_sync_with_aux: no rc-releationship is ' + 'established between master: %(master)s and aux ' + '%(aux)s. Please re-establish the relationship ' + 'and synchronize the volumes on backend ' + 'storage.', {'master': volume['name'], + 'aux': tgt_volume}) continue LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol=' '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' @@ -2858,13 +2855,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._helpers.start_relationship(tgt_volume, primary='aux') except Exception as ex: - LOG.warning(_LW('Fail to copy data from aux to master. master:' - ' %(master)s and aux %(aux)s. Please ' - 're-establish the relationship and synchronize' - ' the volumes on backend storage. error=' - '%(ex)s'), {'master': volume['name'], - 'aux': tgt_volume, - 'error': ex}) + LOG.warning('Fail to copy data from aux to master. master:' + ' %(master)s and aux %(aux)s. Please ' + 're-establish the relationship and synchronize' + ' the volumes on backend storage. error=' + '%(ex)s', {'master': volume['name'], + 'aux': tgt_volume, + 'error': ex}) LOG.debug('leave: _sync_with_aux.') def _wait_replica_ready(self, ctxt, volumes): @@ -2873,10 +2870,10 @@ class StorwizeSVCCommonDriver(san.SanDriver, try: self._wait_replica_vol_ready(ctxt, tgt_volume) except Exception as ex: - LOG.error(_LE('_wait_replica_ready: wait for volume:%(volume)s' - ' remote copy synchronization failed due to ' - 'error:%(err)s.'), {'volume': tgt_volume, - 'err': ex}) + LOG.error('_wait_replica_ready: wait for volume:%(volume)s' + ' remote copy synchronization failed due to ' + 'error:%(err)s.', {'volume': tgt_volume, + 'err': ex}) def _wait_replica_vol_ready(self, ctxt, volume): LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s', @@ -2919,7 +2916,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, def _replication_failover(self, ctxt, volumes): volumes_update = [] if self._active_backend_id: - LOG.info(_LI("Host has been failed over to %s"), + LOG.info("Host has been failed over to %s", self._active_backend_id) return self._active_backend_id, volumes_update @@ -2965,11 +2962,11 @@ class StorwizeSVCCommonDriver(san.SanDriver, 'updates': {'replication_status': 'error_failing-over', 'status': 'error'}}) - LOG.error(_LE('_failover_replica_volumes: no rc-' - 'releationship is established for master:' - '%(master)s. Please re-establish the rc-' - 'relationship and synchronize the volumes on' - ' backend storage.'), + LOG.error('_failover_replica_volumes: no rc-' + 'releationship is established for master:' + '%(master)s. Please re-establish the rc-' + 'relationship and synchronize the volumes on' + ' backend storage.', {'master': volume['name']}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, ' @@ -2985,8 +2982,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, {'volume_id': volume['id'], 'updates': model_updates}) except exception.VolumeDriverException: - LOG.error(_LE('Unable to failover to aux volume. Please make ' - 'sure that the aux volume is ready.')) + LOG.error('Unable to failover to aux volume. Please make ' + 'sure that the aux volume is ready.') volumes_update.append( {'volume_id': volume['id'], 'updates': {'status': 'error', @@ -3129,9 +3126,9 @@ class StorwizeSVCCommonDriver(san.SanDriver, try: rep_manager.establish_target_partnership() except exception.VolumeDriverException: - LOG.error(_LE('The replication src %(src)s has not ' - 'successfully established partnership with the ' - 'replica target %(tgt)s.'), + LOG.error('The replication src %(src)s has not ' + 'successfully established partnership with the ' + 'replica target %(tgt)s.', {'src': self.configuration.san_ip, 'tgt': target['backend_id']}) @@ -3326,8 +3323,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, try: self._helpers.rename_vdisk(current_name, original_volume_name) except exception.VolumeBackendAPIException: - LOG.error(_LE('Unable to rename the logical volume ' - 'for volume: %s'), volume['id']) + LOG.error('Unable to rename the logical volume ' + 'for volume: %s', volume['id']) return {'_name_id': new_volume['_name_id'] or new_volume['id']} # If the back-end name(id) for the volume has been renamed, # it is OK for the volume to keep the original name(id) and there is @@ -3509,8 +3506,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) - LOG.error(_LE("Failed to delete the volume %(vol)s of CG. " - "Exception: %(exception)s."), + LOG.error("Failed to delete the volume %(vol)s of CG. " + "Exception: %(exception)s.", {'vol': volume['name'], 'exception': err}) volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py index d2ec6a52933..b0f683aba0a 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py @@ -39,7 +39,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import ( @@ -104,8 +104,8 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): def validate_connector(self, connector): """Check connector for at least one enabled FC protocol.""" if 'wwpns' not in connector: - LOG.error(_LE('The connector does not contain the required ' - 'information.')) + LOG.error('The connector does not contain the required ' + 'information.') raise exception.InvalidConnectorException( missing='wwpns') @@ -154,8 +154,8 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: - LOG.error(_LE('Did not find expected column name in ' - 'lsvdisk: %s.'), e) + LOG.error('Did not find expected column name in ' + 'lsvdisk: %s.', e) raise exception.VolumeBackendAPIException( data=_('initialize_connection: Missing volume attribute for ' 'volume %s.') % volume_name) @@ -180,8 +180,8 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] - LOG.warning(_LW('initialize_connection: Did not find a ' - 'preferred node for volume %s.'), volume_name) + LOG.warning('initialize_connection: Did not find a ' + 'preferred node for volume %s.', volume_name) properties = {} properties['target_discovered'] = False @@ -209,11 +209,11 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): except Exception: with excutils.save_and_reraise_exception(): self._do_terminate_connection(volume, connector) - LOG.error(_LE('initialize_connection: Failed ' - 'to collect return ' - 'properties for volume %(vol)s and connector ' - '%(conn)s.\n'), {'vol': volume, - 'conn': connector}) + LOG.error('initialize_connection: Failed ' + 'to collect return ' + 'properties for volume %(vol)s and connector ' + '%(conn)s.\n', {'vol': volume, + 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s', @@ -288,8 +288,8 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): if host_name: resp = self._helpers.check_host_mapped_vols(host_name) if not len(resp): - LOG.info(_LI("Need to remove FC Zone, building initiator " - "target map.")) + LOG.info("Need to remove FC Zone, building initiator " + "target map.") # Build info data structure for zone removing if 'wwpns' in connector and host_name: target_wwpns = [] diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py index ec015f1ba17..d70ccd5c6ee 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py @@ -39,7 +39,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder import utils @@ -104,8 +104,8 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): def validate_connector(self, connector): """Check connector for at least one enabled iSCSI protocol.""" if 'initiator' not in connector: - LOG.error(_LE('The connector does not contain the required ' - 'information.')) + LOG.error('The connector does not contain the required ' + 'information.') raise exception.InvalidConnectorException( missing='initiator') @@ -143,8 +143,7 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): if chap_enabled and chap_secret is None: chap_secret = self._helpers.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: - LOG.warning(_LW('CHAP secret exists for host but CHAP is ' - 'disabled.')) + LOG.warning('CHAP secret exists for host but CHAP is disabled.') multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = self._helpers.map_vol_to_host(volume_name, host_name, @@ -160,11 +159,11 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): except Exception: with excutils.save_and_reraise_exception(): self._do_terminate_connection(volume, connector) - LOG.error(_LE('initialize_connection: Failed ' - 'to collect return ' - 'properties for volume %(vol)s and connector ' - '%(conn)s.\n'), {'vol': volume, - 'conn': connector}) + LOG.error('initialize_connection: Failed ' + 'to collect return ' + 'properties for volume %(vol)s and connector ' + '%(conn)s.\n', {'vol': volume, + 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector: %(conn)s\n properties: %(prop)s', @@ -221,8 +220,8 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] - LOG.warning(_LW('_get_single_iscsi_data: Did not find a ' - 'preferred node for volume %s.'), volume_name) + LOG.warning('_get_single_iscsi_data: Did not find a ' + 'preferred node for volume %s.', volume_name) properties = { 'target_discovered': False, diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py b/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py index b032d3504dc..0ff4674b03d 100644 --- a/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py +++ b/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py @@ -22,7 +22,6 @@ from oslo_concurrency import processutils from oslo_log import log as logging import six -from cinder.i18n import _LE from cinder import utils LOG = logging.getLogger(__name__) @@ -45,9 +44,9 @@ def retry_cli(func): if rc == 0: break - LOG.error(_LE( + LOG.error( 'Retry %(retry)s times: %(method)s Failed ' - '%(rc)s: %(reason)s'), { + '%(rc)s: %(reason)s', { 'retry': retry_time, 'method': self.__class__.__name__, 'rc': rc, @@ -144,9 +143,9 @@ class ExecuteCommand(BaseCommand): rc = pe.exit_code result = pe.stdout result = result.replace('\n', '\\n') - LOG.error(_LE( + LOG.error( 'Error on execute command. ' - 'Error code: %(exit_code)d Error msg: %(result)s'), { + 'Error code: %(exit_code)d Error msg: %(result)s', { 'exit_code': pe.exit_code, 'result': result}) return rc, result @@ -219,9 +218,9 @@ class CLIBaseCommand(BaseCommand): rc = -2 # prevent confusing with cli real rc result = pe.stdout result = result.replace('\n', '\\n') - LOG.error(_LE( + LOG.error( 'Error on execute %(command)s. ' - 'Error code: %(exit_code)d Error msg: %(result)s'), { + 'Error code: %(exit_code)d Error msg: %(result)s', { 'command': command_line, 'exit_code': pe.exit_code, 'result': result}) diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py b/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py index 9894fa4ab2b..99443481d1e 100644 --- a/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py +++ b/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py @@ -26,7 +26,7 @@ from oslo_utils import timeutils from oslo_utils import units from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli from cinder.volume.drivers.san import san from cinder.volume import volume_types @@ -87,11 +87,11 @@ CLI_RC_FILTER = { 'DeletePartition': {'error': _('Failed to delete partition.')}, 'SetPartition': {'error': _('Failed to set partition.')}, 'CreateMap': { - 'warning': {20: _LW('The MCS Channel is grouped.')}, + 'warning': {20: 'The MCS Channel is grouped.'}, 'error': _('Failed to create map.'), }, 'DeleteMap': { - 'warning': {11: _LW('No mapping.')}, + 'warning': {11: 'No mapping.'}, 'error': _('Failed to delete map.'), }, 'CreateSnapshot': {'error': _('Failed to create snapshot.')}, @@ -99,13 +99,13 @@ CLI_RC_FILTER = { 'CreateReplica': {'error': _('Failed to create replica.')}, 'DeleteReplica': {'error': _('Failed to delete replica.')}, 'CreateIQN': { - 'warning': {20: _LW('IQN already existed.')}, + 'warning': {20: 'IQN already existed.'}, 'error': _('Failed to create iqn.'), }, 'DeleteIQN': { 'warning': { - 20: _LW('IQN has been used to create map.'), - 11: _LW('No such host alias name.'), + 20: 'IQN has been used to create map.', + 11: 'No such host alias name.', }, 'error': _('Failed to delete iqn.'), }, @@ -483,7 +483,7 @@ class InfortrendCommon(object): model_update = { "provider_location": self._concat_provider_location(model_dict), } - LOG.info(_LI('Create Volume %(volume_id)s completed.'), { + LOG.info('Create Volume %(volume_id)s completed.', { 'volume_id': volume_id}) return model_update @@ -845,7 +845,7 @@ class InfortrendCommon(object): ) if not check_exist: - LOG.warning(_LW('Volume %(volume_id)s already deleted.'), { + LOG.warning('Volume %(volume_id)s already deleted.', { 'volume_id': volume_id}) return @@ -856,9 +856,9 @@ class InfortrendCommon(object): part_id == entry['Source']): if not self._check_replica_completed(entry): has_pair = True - LOG.warning(_LW('Volume still %(status)s ' - 'Cannot delete volume.'), { - 'status': entry['Status']}) + LOG.warning('Volume still %(status)s ' + 'Cannot delete volume.', + {'status': entry['Status']}) else: have_map = entry['Source-Mapped'] == 'Yes' self._execute('DeleteReplica', entry['Pair-ID'], '-y') @@ -894,7 +894,7 @@ class InfortrendCommon(object): self._execute('DeletePartition', part_id, '-y') - LOG.info(_LI('Delete Volume %(volume_id)s completed.'), { + LOG.info('Delete Volume %(volume_id)s completed.', { 'volume_id': volume_id}) else: msg = _('Failed to delete volume ' @@ -946,7 +946,7 @@ class InfortrendCommon(object): model_update = self._create_volume_from_volume(volume, src_part_id) - LOG.info(_LI('Create Cloned Volume %(volume_id)s completed.'), { + LOG.info('Create Cloned Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return model_update @@ -998,7 +998,7 @@ class InfortrendCommon(object): def create_export(self, context, volume): model_update = volume['provider_location'] - LOG.info(_LI('Create export done from Volume %(volume_id)s.'), { + LOG.info('Create export done from Volume %(volume_id)s.', { 'volume_id': volume['id']}) return {'provider_location': model_update} @@ -1011,12 +1011,12 @@ class InfortrendCommon(object): if self._volume_stats is None or refresh: self._update_volume_stats() - LOG.info(_LI( + LOG.info( 'Successfully update volume stats. ' 'backend: %(volume_backend_name)s, ' 'vendor: %(vendor_name)s, ' 'driver version: %(driver_version)s, ' - 'storage protocol: %(storage_protocol)s.'), self._volume_stats) + 'storage protocol: %(storage_protocol)s.', self._volume_stats) return self._volume_stats @@ -1111,11 +1111,11 @@ class InfortrendCommon(object): snapshot_list = do_create_snapshot() - LOG.info(_LI( + LOG.info( 'Create success. ' 'Snapshot: %(snapshot)s, ' 'Snapshot ID in raid: %(raid_snapshot_id)s, ' - 'volume: %(volume)s.'), { + 'volume: %(volume)s.', { 'snapshot': snapshot_id, 'raid_snapshot_id': snapshot_list[-1]['SI-ID'], 'volume': volume_id}) @@ -1143,7 +1143,7 @@ class InfortrendCommon(object): if not has_pair: self._execute('DeleteSnapshot', raid_snapshot_id, '-y') - LOG.info(_LI('Delete Snapshot %(snapshot_id)s completed.'), { + LOG.info('Delete Snapshot %(snapshot_id)s completed.', { 'snapshot_id': snapshot_id}) else: msg = _('Failed to delete snapshot ' @@ -1161,9 +1161,9 @@ class InfortrendCommon(object): def _get_raid_snapshot_id(self, snapshot): if 'provider_location' not in snapshot: - LOG.warning(_LW( + LOG.warning( 'Failed to get Raid Snapshot ID and ' - 'did not store in snapshot.')) + 'did not store in snapshot.') return return snapshot['provider_location'] @@ -1174,9 +1174,9 @@ class InfortrendCommon(object): if not self._check_replica_completed(entry): has_pair = True - LOG.warning(_LW( - 'Snapshot still %(status)s Cannot delete snapshot.'), { - 'status': entry['Status']}) + LOG.warning( + 'Snapshot still %(status)s Cannot delete snapshot.', + {'status': entry['Status']}) else: self._execute('DeleteReplica', entry['Pair-ID'], '-y') return has_pair @@ -1208,9 +1208,9 @@ class InfortrendCommon(object): model_update = self._create_volume_from_snapshot_id( volume, raid_snapshot_id, src_part_id) - LOG.info(_LI( + LOG.info( 'Create Volume %(volume_id)s from ' - 'snapshot %(snapshot_id)s completed.'), { + 'snapshot %(snapshot_id)s completed.', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) @@ -1285,10 +1285,10 @@ class InfortrendCommon(object): properties = self._generate_fc_connection_properties( map_lun, target_wwpns, initiator_target_map) - LOG.info(_LI('Successfully initialized connection. ' - 'target_wwn: %(target_wwn)s, ' - 'initiator_target_map: %(initiator_target_map)s, ' - 'lun: %(target_lun)s.'), properties['data']) + LOG.info('Successfully initialized connection. ' + 'target_wwn: %(target_wwn)s, ' + 'initiator_target_map: %(initiator_target_map)s, ' + 'lun: %(target_lun)s.', properties['data']) return properties def _do_fc_connection(self, volume, connector): @@ -1411,8 +1411,8 @@ class InfortrendCommon(object): properties = self._generate_iscsi_connection_properties( property_value, volume) - LOG.info(_LI('Successfully initialized connection ' - 'with volume: %(volume_id)s.'), properties['data']) + LOG.info('Successfully initialized connection ' + 'with volume: %(volume_id)s.', properties['data']) return properties @log_func @@ -1542,9 +1542,9 @@ class InfortrendCommon(object): run_as_root=True) if rc != 0: - LOG.error(_LE( - 'Can not discovery in %(target_ip)s with %(target_iqn)s.'), { - 'target_ip': target_ip, 'target_iqn': target_iqn}) + LOG.error( + 'Can not discovery in %(target_ip)s with %(target_iqn)s.', + {'target_ip': target_ip, 'target_iqn': target_iqn}) return False else: for target in out.splitlines(): @@ -1571,8 +1571,8 @@ class InfortrendCommon(object): self._execute('SetPartition', 'expand', part_id, expand_command) - LOG.info(_LI( - 'Successfully extended volume %(volume_id)s to size %(size)s.'), { + LOG.info( + 'Successfully extended volume %(volume_id)s to size %(size)s.', { 'volume_id': volume['id'], 'size': new_size}) @lockutils.synchronized('connection', 'infortrend-', True) @@ -1612,9 +1612,9 @@ class InfortrendCommon(object): ) conn_info['data']['initiator_target_map'] = init_target_map - LOG.info(_LI( - 'Successfully terminated connection for volume: %(volume_id)s.'), { - 'volume_id': volume['id']}) + LOG.info( + 'Successfully terminated connection for volume: %(volume_id)s.', + {'volume_id': volume['id']}) return conn_info @@ -1632,14 +1632,14 @@ class InfortrendCommon(object): "provider_location": self._concat_provider_location(model_dict), } - LOG.info(_LI('Migrate Volume %(volume_id)s completed.'), { + LOG.info('Migrate Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return (True, model_update) def _is_valid_for_storage_assisted_migration(self, host): if 'pool_id' not in host['capabilities']: - LOG.warning(_LW('Failed to get target pool id.')) + LOG.warning('Failed to get target pool id.') return (False, None) dst_pool_id = host['capabilities']['pool_id'] @@ -1706,7 +1706,7 @@ class InfortrendCommon(object): self._execute('DeleteReplica', entry['Pair-ID'], '-y') except Exception: check_done = False - LOG.exception(_LE('Cannot detect replica status.')) + LOG.exception('Cannot detect replica status.') if check_done: raise loopingcall.LoopingCallDone() @@ -1781,7 +1781,7 @@ class InfortrendCommon(object): "provider_location": self._concat_provider_location(model_dict), } - LOG.info(_LI('Rename Volume %(volume_id)s completed.'), { + LOG.info('Rename Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return model_update @@ -1812,7 +1812,7 @@ class InfortrendCommon(object): new_vol_name = self._get_unmanaged_volume_name(volume_id) self._execute('SetPartition', part_id, 'name=%s' % new_vol_name) - LOG.info(_LI('Unmanage volume %(volume_id)s completed.'), { + LOG.info('Unmanage volume %(volume_id)s completed.', { 'volume_id': volume_id}) def _get_unmanaged_volume_name(self, volume_id): @@ -1876,16 +1876,16 @@ class InfortrendCommon(object): if volume['host'] != host['host']: if self._check_volume_attachment(volume): - LOG.warning(_LW( + LOG.warning( 'Volume %(volume_id)s cannot be retyped ' - 'during attachment.'), { + 'during attachment.', { 'volume_id': volume['id']}) return False if self._check_volume_has_snapshot(volume): - LOG.warning(_LW( + LOG.warning( 'Volume %(volume_id)s cannot be retyped ' - 'because it has snapshot.'), { + 'because it has snapshot.', { 'volume_id': volume['id']}) return False @@ -1894,9 +1894,9 @@ class InfortrendCommon(object): volume, host, new_extraspecs) if rc: - LOG.info(_LI( + LOG.info( 'Retype Volume %(volume_id)s is done ' - 'and migrated to pool %(pool_id)s.'), { + 'and migrated to pool %(pool_id)s.', { 'volume_id': volume['id'], 'pool_id': host['capabilities']['pool_id']}) @@ -1906,14 +1906,13 @@ class InfortrendCommon(object): (diff['extra_specs']['infortrend_provisioning'][0] != diff['extra_specs']['infortrend_provisioning'][1])): - LOG.warning(_LW( - 'The provisioning: %(provisioning)s ' - 'is not valid.'), { - 'provisioning': - diff['extra_specs']['infortrend_provisioning'][1]}) + LOG.warning( + 'The provisioning: %(provisioning)s is not valid.', + {'provisioning': + diff['extra_specs']['infortrend_provisioning'][1]}) return False - LOG.info(_LI('Retype Volume %(volume_id)s is completed.'), { + LOG.info('Retype Volume %(volume_id)s is completed.', { 'volume_id': volume['id']}) return True @@ -1936,12 +1935,12 @@ class InfortrendCommon(object): try: self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) except exception.InfortrendCliException: - LOG.exception(_LE('Failed to rename %(new_volume)s into ' - '%(volume)s.'), {'new_volume': new_volume['id'], - 'volume': volume['id']}) + LOG.exception('Failed to rename %(new_volume)s into ' + '%(volume)s.', {'new_volume': new_volume['id'], + 'volume': volume['id']}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} - LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), { + LOG.info('Update migrated volume %(new_volume)s completed.', { 'new_volume': new_volume['id']}) model_update = { diff --git a/cinder/volume/drivers/kaminario/kaminario_common.py b/cinder/volume/drivers/kaminario/kaminario_common.py index 33c60c0d965..62952ed01d3 100644 --- a/cinder/volume/drivers/kaminario/kaminario_common.py +++ b/cinder/volume/drivers/kaminario/kaminario_common.py @@ -30,7 +30,7 @@ import six import cinder from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils @@ -216,7 +216,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): if vg_rs.total != 0: LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name) vg_rs.hits[0].delete() - LOG.exception(_LE("Creation of volume %s failed."), vol_name) + LOG.exception("Creation of volume %s failed.", vol_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -267,8 +267,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): src_ssn.state = "in_sync" src_ssn.save() except Exception as ex: - LOG.exception(_LE("Replication for the volume %s has " - "failed."), vol.name) + LOG.exception("Replication for the volume %s has " + "failed.", vol.name) self._delete_by_ref(self.client, "replication/sessions", session_name, 'session') self._delete_by_ref(self.target, "replication/sessions", @@ -327,8 +327,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): tgt_ssn.state = "in_sync" tgt_ssn.save() except Exception as ex: - LOG.exception(_LE("Replication for the volume %s has " - "failed."), rvol_name) + LOG.exception("Replication for the volume %s has " + "failed.", rvol_name) self._delete_by_ref(self.target, "replication/sessions", rsession_name, 'session') self._delete_by_ref(self.client, "replication/sessions", @@ -368,8 +368,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): 'cinder-volume') if secondary_id and secondary_id != self.replica.backend_id: - LOG.error(_LE("Kaminario driver received failover_host " - "request, But backend is non replicated device")) + LOG.error("Kaminario driver received failover_host " + "request, But backend is non replicated device") raise exception.UnableToFailOver(reason=_("Failover requested " "on non replicated " "backend.")) @@ -569,9 +569,9 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): source=snap, retention_policy=rpolicy, is_exposable=True).save() except Exception as ex: - LOG.exception(_LE("Creating a view: %(view)s from snapshot: " - "%(snap)s failed"), {"view": view_name, - "snap": snap_name}) + LOG.exception("Creating a view: %(view)s from snapshot: " + "%(snap)s failed", {"view": view_name, + "snap": snap_name}) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -603,8 +603,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self.terminate_connection(volume, properties) cview.delete() self.delete_volume(volume) - LOG.exception(_LE("Copy to volume: %(vol)s from view: %(view)s " - "failed"), {"vol": vol_name, "view": view_name}) + LOG.exception("Copy to volume: %(vol)s from view: %(view)s " + "failed", {"vol": vol_name, "view": view_name}) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -650,7 +650,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): self.terminate_connection(src_vref, properties) self.terminate_connection(volume, properties) self.delete_volume(volume) - LOG.exception(_LE("Create a clone: %s failed."), clone_name) + LOG.exception("Create a clone: %s failed.", clone_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -676,7 +676,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): if vg_rs.total != 0: vg_rs.hits[0].delete() except Exception as ex: - LOG.exception(_LE("Deletion of volume %s failed."), vol_name) + LOG.exception("Deletion of volume %s failed.", vol_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -776,7 +776,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): source=vg, retention_policy=rpolicy, is_auto_deleteable=False).save() except Exception as ex: - LOG.exception(_LE("Creation of snapshot: %s failed."), snap_name) + LOG.exception("Creation of snapshot: %s failed.", snap_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -790,7 +790,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): if snap_rs.total != 0: snap_rs.hits[0].delete() except Exception as ex: - LOG.exception(_LE("Deletion of snapshot: %s failed."), snap_name) + LOG.exception("Deletion of snapshot: %s failed.", snap_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -805,7 +805,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Extending volume: %s in K2.", vol_name) vol.save() except Exception as ex: - LOG.exception(_LE("Extending volume: %s failed."), vol_name) + LOG.exception("Extending volume: %s failed.", vol_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -882,7 +882,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): return self.client.search("retention_policies", name="Best_Effort_Retention").hits[0] except Exception as ex: - LOG.exception(_LE("Retention policy search failed in K2.")) + LOG.exception("Retention policy search failed in K2.") raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -941,7 +941,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Deleting initiator hostname: %s in K2.", host_name) host.delete() else: - LOG.warning(_LW("Host: %s not found on K2."), host_name) + LOG.warning("Host: %s not found on K2.", host_name) @kaminario_logger def k2_initialize_connection(self, volume, connector): @@ -960,9 +960,9 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): except Exception as ex: if host_rs.total == 0: self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to map volume: %(vol)s to host: " - "%(host)s"), {'host': host_name, - 'vol': vol.name}) + LOG.exception("Unable to map volume: %(vol)s to host: " + "%(host)s", {'host': host_name, + 'vol': vol.name}) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) # Get lun number. @@ -1041,10 +1041,10 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug("Manage volume: %s in K2.", vol_name) vol.save() except exception.ManageExistingInvalidReference: - LOG.exception(_LE("manage volume: %s failed."), vol_name) + LOG.exception("manage volume: %s failed.", vol_name) raise except Exception: - LOG.exception(_LE("manage volume: %s failed."), vol_name) + LOG.exception("manage volume: %s failed.", vol_name) vg_rs = self.client.search("volume_groups", name=vg_new_name) if hasattr(vg_rs, 'hits') and vg_rs.total != 0: vg = vg_rs.hits[0] @@ -1111,8 +1111,8 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): LOG.debug(msg) return False else: - LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s ' - 'is not supported directly in K2.'), + LOG.error('Change from type1: %(type1)s to type2: %(type2)s ' + 'is not supported directly in K2.', {'type1': old_type, 'type2': new_type}) return False diff --git a/cinder/volume/drivers/kaminario/kaminario_fc.py b/cinder/volume/drivers/kaminario/kaminario_fc.py index 5665362d5b9..021fceed612 100644 --- a/cinder/volume/drivers/kaminario/kaminario_fc.py +++ b/cinder/volume/drivers/kaminario/kaminario_fc.py @@ -19,7 +19,7 @@ from oslo_log import log as logging from cinder import coordination from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common from cinder.zonemanager import utils as fczm_utils @@ -138,7 +138,7 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): host = self.client.new("hosts", name=host_name, type="Linux").save() except Exception as ex: - LOG.exception(_LE("Unable to create host : %s in K2."), + LOG.exception("Unable to create host : %s in K2.", host_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) @@ -160,8 +160,8 @@ class KaminarioFCDriver(common.KaminarioCinderDriver): except Exception as ex: if host_rs.total == 0: self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to add wwpn : %(wwpn)s to " - "host: %(host)s in K2."), + LOG.exception("Unable to add wwpn : %(wwpn)s to " + "host: %(host)s in K2.", {'wwpn': wwpn, 'host': host_name}) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) diff --git a/cinder/volume/drivers/kaminario/kaminario_iscsi.py b/cinder/volume/drivers/kaminario/kaminario_iscsi.py index 302d455afa1..6771991e5f6 100644 --- a/cinder/volume/drivers/kaminario/kaminario_iscsi.py +++ b/cinder/volume/drivers/kaminario/kaminario_iscsi.py @@ -19,7 +19,7 @@ from oslo_log import log as logging from cinder import coordination from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common @@ -135,7 +135,7 @@ class KaminarioISCSIDriver(common.KaminarioCinderDriver): iqn.save() except Exception as ex: self._delete_host_by_name(host_name) - LOG.exception(_LE("Unable to create host: %s in K2."), + LOG.exception("Unable to create host: %s in K2.", host_name) raise exception.KaminarioCinderDriverException( reason=six.text_type(ex.message)) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index 2271468db58..f0af9f39f21 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -29,7 +29,7 @@ import six from cinder.brick.local_dev import lvm as lvm from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects @@ -203,8 +203,8 @@ class LVMVolumeDriver(driver.VolumeDriver): LOG.debug("Updating volume stats") if self.vg is None: - LOG.warning(_LW('Unable to update stats on non-initialized ' - 'Volume Group: %s'), + LOG.warning('Unable to update stats on non-initialized ' + 'Volume Group: %s', self.configuration.volume_group) return @@ -326,12 +326,12 @@ class LVMVolumeDriver(driver.VolumeDriver): if volutils.supports_thin_provisioning(): if self.vg.get_volume(pool_name) is not None: - LOG.info(_LI('Enabling LVM thin provisioning by default ' - 'because a thin pool exists.')) + LOG.info('Enabling LVM thin provisioning by default ' + 'because a thin pool exists.') self.configuration.lvm_type = 'thin' elif len(self.vg.get_volumes()) == 0: - LOG.info(_LI('Enabling LVM thin provisioning by default ' - 'because no LVs exist.')) + LOG.info('Enabling LVM thin provisioning by default ' + 'because no LVs exist.') self.configuration.lvm_type = 'thin' if self.configuration.lvm_type == 'thin': @@ -387,8 +387,8 @@ class LVMVolumeDriver(driver.VolumeDriver): try: self.vg.rename_volume(current_name, original_volume_name) except processutils.ProcessExecutionError: - LOG.error(_LE('Unable to rename the logical volume ' - 'for volume: %s'), volume['id']) + LOG.error('Unable to rename the logical volume ' + 'for volume: %s', volume['id']) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. @@ -432,12 +432,12 @@ class LVMVolumeDriver(driver.VolumeDriver): return True if self.vg.lv_has_snapshot(volume['name']): - LOG.error(_LE('Unable to delete due to existing snapshot ' - 'for volume: %s'), volume['name']) + LOG.error('Unable to delete due to existing snapshot ' + 'for volume: %s', volume['name']) raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume) - LOG.info(_LI('Successfully deleted volume: %s'), volume['id']) + LOG.info('Successfully deleted volume: %s', volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -450,9 +450,9 @@ class LVMVolumeDriver(driver.VolumeDriver): """Deletes a snapshot.""" if self._volume_not_present(self._escape_snapshot(snapshot['name'])): # If the snapshot isn't present, then don't attempt to delete - LOG.warning(_LW("snapshot: %s not found, " - "skipping delete operations"), snapshot['name']) - LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id']) + LOG.warning("snapshot: %s not found, " + "skipping delete operations", snapshot['name']) + LOG.info('Successfully deleted snapshot: %s', snapshot['id']) return True # TODO(yamahata): zeroing out the whole snapshot triggers COW. @@ -499,7 +499,7 @@ class LVMVolumeDriver(driver.VolumeDriver): mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors - LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) + LOG.info('Creating clone of volume: %s', src_vref['id']) volume_name = src_vref['name'] temp_id = 'tmp-snap-%s' % volume['id'] temp_snapshot = {'volume_name': volume_name, @@ -769,7 +769,7 @@ class LVMVolumeDriver(driver.VolumeDriver): try: next(vg for vg in vg_list if vg['name'] == dest_vg) except StopIteration: - LOG.error(_LE("Destination Volume Group %s does not exist"), + LOG.error("Destination Volume Group %s does not exist", dest_vg) return false_ret @@ -801,8 +801,8 @@ class LVMVolumeDriver(driver.VolumeDriver): sparse=self._sparse_copy_volume) except Exception as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Volume migration failed due to " - "exception: %(reason)s."), + LOG.error("Volume migration failed due to " + "exception: %(reason)s.", {'reason': six.text_type(e)}, resource=volume) dest_vg_ref.delete(volume) self._delete_volume(volume) diff --git a/cinder/volume/drivers/nec/volume_helper.py b/cinder/volume/drivers/nec/volume_helper.py index 2ed74d08a36..dfa78eed90e 100644 --- a/cinder/volume/drivers/nec/volume_helper.py +++ b/cinder/volume/drivers/nec/volume_helper.py @@ -24,8 +24,8 @@ from oslo_utils import units from cinder import coordination from cinder import exception +from cinder.i18n import _ from cinder import volume -from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.nec import cli from cinder.volume.drivers.nec import volume_common @@ -246,11 +246,11 @@ class MStorageDriver(object): % {'id': volume['id'], 'size': volume['size']}) try: self._create_volume(volume) - LOG.info(_LI('Created Volume (%s)'), msgparm) + LOG.info('Created Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Volume (%(msgparm)s) ' - '(%(exception)s)'), + LOG.warning('Failed to Create Volume (%(msgparm)s) ' + '(%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_volume(self, volume): @@ -331,11 +331,11 @@ class MStorageDriver(object): 'oldsize': volume['size']}) try: self._extend_volume(volume, new_size) - LOG.info(_LI('Extended Volume (%s)'), msgparm) + LOG.info('Extended Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Extend Volume (%(msgparm)s) ' - '(%(exception)s)'), + LOG.warning('Failed to Extend Volume (%(msgparm)s) ' + '(%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _extend_volume(self, volume, new_size): @@ -388,11 +388,11 @@ class MStorageDriver(object): 'src_id': src_vref['id']}) try: self._create_cloned_volume(volume, src_vref) - LOG.info(_LI('Created Cloned Volume (%s)'), msgparm) + LOG.info('Created Cloned Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Cloned Volume ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create Cloned Volume ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_cloned_volume(self, volume, src_vref): @@ -511,12 +511,12 @@ class MStorageDriver(object): 'dsthost': host}) try: ret = self._migrate_volume(context, volume, host) - LOG.info(_LI('Migrated Volume (%s)'), msgparm) + LOG.info('Migrated Volume (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Migrate Volume ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Migrate Volume ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _migrate_volume(self, context, volume, host): @@ -608,12 +608,12 @@ class MStorageDriver(object): 'initiator': connector['initiator']}) try: ret = self._iscsi_do_export(_ctx, volume, connector, ensure) - LOG.info(_LI('Created iSCSI Export (%s)'), msgparm) + LOG.info('Created iSCSI Export (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create iSCSI Export ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create iSCSI Export ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _iscsi_do_export(self, _ctx, volume, connector, ensure): @@ -743,12 +743,12 @@ class MStorageDriver(object): 'wwpns': connector['wwpns']}) try: ret = self._fc_do_export(_ctx, volume, connector, ensure) - LOG.info(_LI('Created FC Export (%s)'), msgparm) + LOG.info('Created FC Export (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create FC Export ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create FC Export ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _fc_do_export(self, _ctx, volume, connector, ensure): @@ -859,11 +859,11 @@ class MStorageDriver(object): msgparm = 'Volume ID = %s' % volume['id'] try: self._remove_export(context, volume) - LOG.info(_LI('Removed Export (%s)'), msgparm) + LOG.info('Removed Export (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Remove Export ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Remove Export ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _remove_export(self, context, volume): @@ -961,12 +961,12 @@ class MStorageDriver(object): try: ret = self._iscsi_initialize_connection(volume, connector) - LOG.info(_LI('Initialized iSCSI Connection (%s)'), msgparm) + LOG.info('Initialized iSCSI Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Initialize iSCSI Connection ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Initialize iSCSI Connection ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _iscsi_initialize_connection(self, volume, connector): @@ -1024,12 +1024,12 @@ class MStorageDriver(object): try: ret = self._iscsi_terminate_connection(volume, connector) - LOG.info(_LI('Terminated iSCSI Connection (%s)'), msgparm) + LOG.info('Terminated iSCSI Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Terminate iSCSI Connection ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Terminate iSCSI Connection ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _iscsi_terminate_connection(self, volume, connector): @@ -1043,12 +1043,12 @@ class MStorageDriver(object): try: ret = self._fc_initialize_connection(volume, connector) - LOG.info(_LI('Initialized FC Connection (%s)'), msgparm) + LOG.info('Initialized FC Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Initialize FC Connection ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Initialize FC Connection ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _fc_initialize_connection(self, volume, connector): @@ -1149,12 +1149,12 @@ class MStorageDriver(object): try: ret = self._fc_terminate_connection(volume, connector) - LOG.info(_LI('Terminated FC Connection (%s)'), msgparm) + LOG.info('Terminated FC Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Terminate FC Connection ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Terminate FC Connection ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _fc_terminate_connection(self, volume, connector): @@ -1267,11 +1267,11 @@ class MStorageDriver(object): msgparm = 'Volume ID = %s' % volume['id'] try: self._delete_volume(volume) - LOG.info(_LI('Deleted Volume (%s)'), msgparm) + LOG.info('Deleted Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Delete Volume ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Delete Volume ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _delete_volume(self, volume): @@ -1341,11 +1341,11 @@ class MStorageDriver(object): % {'id': snapshot['id'], 'vol_id': snapshot['volume_id']}) try: self._create_snapshot(snapshot) - LOG.info(_LI('Created Snapshot (%s)'), msgparm) + LOG.info('Created Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_snapshot(self, snapshot): @@ -1354,7 +1354,7 @@ class MStorageDriver(object): {'id': snapshot['volume_id'], 'snap_id': snapshot['id']}) if len(self._properties['pool_backup_pools']) == 0: - LOG.error(_LE('backup_pools is not set.')) + LOG.error('backup_pools is not set.') raise exception.ParameterNotFound(param='backup_pools') xml = self._cli.view_all(self._properties['ismview_path']) @@ -1405,11 +1405,11 @@ class MStorageDriver(object): 'snapvol_id': snapshot['volume_id']}) try: self._create_volume_from_snapshot(volume, snapshot) - LOG.info(_LI('Created Volume from Snapshot (%s)'), msgparm) + LOG.info('Created Volume from Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Volume from Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create Volume from Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_volume_from_snapshot(self, volume, snapshot): @@ -1479,11 +1479,11 @@ class MStorageDriver(object): 'vol_id': snapshot['volume_id']}) try: self._delete_snapshot(snapshot) - LOG.info(_LI('Deleted Snapshot (%s)'), msgparm) + LOG.info('Deleted Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Delete Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Delete Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _delete_snapshot(self, snapshot): @@ -1518,11 +1518,11 @@ class MStorageDSVDriver(MStorageDriver): try: self._create_snapshot(snapshot, self._properties['diskarray_name']) - LOG.info(_LI('Created Snapshot (%s)'), msgparm) + LOG.info('Created Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') @@ -1537,7 +1537,7 @@ class MStorageDSVDriver(MStorageDriver): self._common.configs(xml)) if len(self._properties['pool_backup_pools']) == 0: - LOG.error(_LE('backup_pools is not set.')) + LOG.error('backup_pools is not set.') raise exception.ParameterNotFound(param='backup_pools') # get BV name. @@ -1564,11 +1564,11 @@ class MStorageDSVDriver(MStorageDriver): 'snapvol_id': snapshot['volume_id']}) try: self._delete_snapshot(snapshot) - LOG.info(_LI('Deleted Snapshot (%s)'), msgparm) + LOG.info('Deleted Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Delete Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Delete Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _delete_snapshot(self, snapshot): @@ -1606,11 +1606,11 @@ class MStorageDSVDriver(MStorageDriver): 'snapvol_id': snapshot['volume_id']}) try: self._create_volume_from_snapshot(volume, snapshot) - LOG.info(_LI('Created Volume from Snapshot (%s)'), msgparm) + LOG.info('Created Volume from Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): - LOG.warning(_LW('Failed to Create Volume from Snapshot ' - '(%(msgparm)s) (%(exception)s)'), + LOG.warning('Failed to Create Volume from Snapshot ' + '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_volume_from_snapshot(self, volume, snapshot): diff --git a/cinder/volume/drivers/netapp/common.py b/cinder/volume/drivers/netapp/common.py index 8414096ad8e..8d7422e5d50 100644 --- a/cinder/volume/drivers/netapp/common.py +++ b/cinder/volume/drivers/netapp/common.py @@ -23,7 +23,7 @@ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ from cinder.volume import driver from cinder.volume.drivers.netapp import options from cinder.volume.drivers.netapp import utils as na_utils @@ -75,7 +75,7 @@ class NetAppDriver(driver.ProxyVD): na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) app_version = na_utils.OpenStackInfo().info() - LOG.info(_LI('OpenStack OS Version Info: %(info)s'), + LOG.info('OpenStack OS Version Info: %(info)s', {'info': app_version}) kwargs['app_version'] = app_version @@ -92,8 +92,8 @@ class NetAppDriver(driver.ProxyVD): fmt = {'storage_family': storage_family, 'storage_protocol': storage_protocol} - LOG.info(_LI('Requested unified config: %(storage_family)s and ' - '%(storage_protocol)s.'), fmt) + LOG.info('Requested unified config: %(storage_family)s and ' + '%(storage_protocol)s.', fmt) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) if family_meta is None: @@ -110,6 +110,6 @@ class NetAppDriver(driver.ProxyVD): kwargs = kwargs or {} kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) - LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol ' - '%(storage_protocol)s loaded.'), fmt) + LOG.info('NetApp driver of family %(storage_family)s and protocol ' + '%(storage_protocol)s loaded.', fmt) return driver diff --git a/cinder/volume/drivers/netapp/dataontap/block_7mode.py b/cinder/volume/drivers/netapp/dataontap/block_7mode.py index d73206bd378..006a7327fbf 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_7mode.py @@ -31,7 +31,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume import configuration from cinder.volume.drivers.netapp.dataontap import block_base @@ -165,8 +165,8 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): volume_name = vol.get_child_content('name') if self._get_vol_option(volume_name, 'root') == 'true': return volume_name - LOG.warning(_LW('Could not determine root volume name ' - 'on %s.'), self._get_owner()) + LOG.warning('Could not determine root volume name on %s.', + self._get_owner()) return None def _get_owner(self): @@ -363,9 +363,9 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): # Inform deprecation of legacy option. if self.configuration.safe_get('netapp_volume_list'): - msg = _LW("The option 'netapp_volume_list' is deprecated and " - "will be removed in the future releases. Please use " - "the option 'netapp_pool_name_search_pattern' instead.") + msg = ("The option 'netapp_volume_list' is deprecated and " + "will be removed in the future releases. Please use " + "the option 'netapp_pool_name_search_pattern' instead.") versionutils.report_deprecated_feature(LOG, msg) pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) @@ -409,15 +409,15 @@ class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): job_set = na_utils.set_safe_attr(self, 'vol_refresh_running', True) if not job_set: - LOG.warning(_LW("Volume refresh job already running. " - "Returning...")) + LOG.warning("Volume refresh job already running. " + "Returning...") return self.vol_refresh_voluntary = False self.vols = self.zapi_client.get_filer_volumes() self.volume_list = self._get_filtered_pools() self.vol_refresh_time = timeutils.utcnow() except Exception as e: - LOG.warning(_LW("Error refreshing volume info. Message: %s"), + LOG.warning("Error refreshing volume info. Message: %s", e) finally: na_utils.set_safe_attr(self, 'vol_refresh_running', False) diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index a10914f59fa..3321fc0fff0 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -37,7 +37,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls @@ -134,10 +134,10 @@ class NetAppBlockStorageLibrary(object): divisor = self.configuration.netapp_size_multiplier reserved_ratio = round(1 - (1 / divisor), 2) reserved_percentage = 100 * int(reserved_ratio) - msg = _LW('The "netapp_size_multiplier" configuration option is ' - 'deprecated and will be removed in the Mitaka release. ' - 'Please set "reserved_percentage = %d" instead.') % ( - reserved_percentage) + msg = ('The "netapp_size_multiplier" configuration option is ' + 'deprecated and will be removed in the Mitaka release. ' + 'Please set "reserved_percentage = %d" instead.') % ( + reserved_percentage) versionutils.report_deprecated_feature(LOG, msg) return reserved_percentage @@ -242,8 +242,7 @@ class NetAppBlockStorageLibrary(object): self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name) except Exception: - LOG.exception(_LE("Exception creating LUN %(name)s in pool " - "%(pool)s."), + LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.", {'name': lun_name, 'pool': pool_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") @@ -285,8 +284,8 @@ class NetAppBlockStorageLibrary(object): self.zapi_client.destroy_lun(metadata['Path']) self.lun_table.pop(lun_name) else: - LOG.warning(_LW("No entry in LUN table for volume/snapshot" - " %(name)s."), {'name': lun_name}) + LOG.warning("No entry in LUN table for volume/snapshot" + " %(name)s.", {'name': lun_name}) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" @@ -362,16 +361,15 @@ class NetAppBlockStorageLibrary(object): qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): - LOG.error( - _LE("Resizing %s failed. Cleaning volume."), - destination_volume['id']) + LOG.error("Resizing %s failed. Cleaning volume.", + destination_volume['id']) self.delete_volume(destination_volume) return self._get_volume_model_update(destination_volume) except Exception: - LOG.exception(_LE("Exception cloning volume %(name)s from source " - "volume %(source)s."), + LOG.exception("Exception cloning volume %(name)s from source " + "volume %(source)s.", {'name': destination_name, 'source': source_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) @@ -413,11 +411,11 @@ class NetAppBlockStorageLibrary(object): igroup_name, ig_host_os, ig_type = self._get_or_create_igroup( initiator_list, initiator_type, self.host_type) if ig_host_os != self.host_type: - LOG.warning(_LW("LUN misalignment may occur for current" - " initiator group %(ig_nm)s) with host OS type" - " %(ig_os)s. Please configure initiator group" - " manually according to the type of the" - " host OS."), + LOG.warning("LUN misalignment may occur for current" + " initiator group %(ig_nm)s) with host OS type" + " %(ig_os)s. Please configure initiator group" + " manually according to the type of the" + " host OS.", {'ig_nm': igroup_name, 'ig_os': ig_host_os}) try: return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id) @@ -508,9 +506,9 @@ class NetAppBlockStorageLibrary(object): attr = getattr(self._get_lun_from_table(name), attr) return attr except exception.VolumeNotFound as e: - LOG.error(_LE("Message: %s"), e.msg) + LOG.error("Message: %s", e.msg) except Exception as e: - LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e) + LOG.error("Error getting LUN attribute. Exception: %s", e) return None def _create_lun_meta(self, lun): @@ -586,8 +584,8 @@ class NetAppBlockStorageLibrary(object): qos_policy_group_name=qos_policy_group_name) self.lun_table[name].size = new_size_bytes else: - LOG.info(_LI("No need to extend volume %s" - " as it is already the requested new size."), name) + LOG.info("No need to extend volume %s" + " as it is already the requested new size.", name) def _get_vol_option(self, volume_name, option_name): """Get the value for the volume option.""" @@ -606,7 +604,7 @@ class NetAppBlockStorageLibrary(object): Clones the block ranges, swaps the LUNs, and deletes the source LUN. """ seg = lun_path.split("/") - LOG.info(_LI("Resizing LUN %s using clone operation."), seg[-1]) + LOG.info("Resizing LUN %s using clone operation.", seg[-1]) lun_name = seg[-1] vol_name = seg[2] lun = self._get_lun_from_table(lun_name) @@ -640,7 +638,7 @@ class NetAppBlockStorageLibrary(object): """Try post sub clone resize in a transactional manner.""" st_tm_mv, st_nw_mv, st_del_old = None, None, None seg = path.split("/") - LOG.info(_LI("Post clone resize LUN %s"), seg[-1]) + LOG.info("Post clone resize LUN %s", seg[-1]) new_lun = 'new-%s' % (seg[-1]) tmp_lun = 'tmp-%s' % (seg[-1]) tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) @@ -660,12 +658,12 @@ class NetAppBlockStorageLibrary(object): raise exception.VolumeBackendAPIException( data=msg % (seg[-1])) elif st_del_old is None: - LOG.error(_LE("Failure deleting staged tmp LUN %s."), + LOG.error("Failure deleting staged tmp LUN %s.", tmp_lun) else: - LOG.error(_LE("Unknown exception in" - " post clone resize LUN %s."), seg[-1]) - LOG.error(_LE("Exception details: %s"), e) + LOG.error("Unknown exception in" + " post clone resize LUN %s.", seg[-1]) + LOG.error("Exception details: %s", e) def _get_lun_block_count(self, path): """Gets block counts for the LUN.""" @@ -706,8 +704,8 @@ class NetAppBlockStorageLibrary(object): path = lun.get_metadata_property('Path') if lun.name == volume['name']: new_path = path - LOG.info(_LI("LUN with given ref %s need not be renamed " - "during manage operation."), existing_ref) + LOG.info("LUN with given ref %s need not be renamed " + "during manage operation.", existing_ref) else: (rest, splitter, name) = path.rpartition('/') new_path = '%s/%s' % (rest, volume['name']) @@ -719,8 +717,8 @@ class NetAppBlockStorageLibrary(object): self.zapi_client.set_lun_qos_policy_group(new_path, qos_policy_group_name) self._add_lun_to_table(lun) - LOG.info(_LI("Manage operation completed for LUN with new path" - " %(path)s and uuid %(uuid)s."), + LOG.info("Manage operation completed for LUN with new path" + " %(path)s and uuid %(uuid)s.", {'path': lun.get_metadata_property('Path'), 'uuid': lun.get_metadata_property('UUID')}) @@ -777,8 +775,8 @@ class NetAppBlockStorageLibrary(object): Does not delete the underlying backend storage object. """ managed_lun = self._get_lun_from_table(volume['name']) - LOG.info(_LI("Unmanaged LUN with current path %(path)s and uuid " - "%(uuid)s."), + LOG.info("Unmanaged LUN with current path %(path)s and uuid " + "%(uuid)s.", {'path': managed_lun.get_metadata_property('Path'), 'uuid': managed_lun.get_metadata_property('UUID') or 'unknown'}) @@ -986,8 +984,7 @@ class NetAppBlockStorageLibrary(object): if not self._has_luns_mapped_to_initiators(initiators): # No more exports for this host, so tear down zone. - LOG.info(_LI("Need to remove FC Zone, building initiator " - "target map")) + LOG.info("Need to remove FC Zone, building initiator target map") target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) @@ -1064,8 +1061,8 @@ class NetAppBlockStorageLibrary(object): except Exception: volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) - LOG.exception(_LE("Volume %(vol)s in the consistency group " - "could not be deleted."), {'vol': volume}) + LOG.exception("Volume %(vol)s in the consistency group " + "could not be deleted.", {'vol': volume}) return model_update, volumes_model_update def update_consistencygroup(self, group, add_volumes=None, diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py index a4d2c2d6b22..aae2c3e007e 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/api.py +++ b/cinder/volume/drivers/netapp/dataontap/client/api.py @@ -31,7 +31,7 @@ import six from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils @@ -191,7 +191,7 @@ class NaServer(object): except urllib.error.HTTPError as e: raise NaApiError(e.code, e.msg) except Exception: - LOG.exception(_LE("Error communicating with NetApp filer.")) + LOG.exception("Error communicating with NetApp filer.") raise NaApiError('Unexpected error') response_xml = response.read() diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py b/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py index 7dcb352eff6..9671fa9d007 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py @@ -22,7 +22,7 @@ from oslo_log import log as logging import six from cinder import exception -from cinder.i18n import _, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base @@ -67,7 +67,7 @@ class Client(client_base.Client): node_client.send_request('ems-autosupport-log', message_dict) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: - LOG.warning(_LW('Failed to invoke EMS. %s'), e) + LOG.warning('Failed to invoke EMS. %s', e) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" @@ -151,8 +151,8 @@ class Client(client_base.Client): if luns: lun_list.extend(luns) except netapp_api.NaApiError: - LOG.warning(_LW("Error finding LUNs for volume %s." - " Verify volume exists."), vol) + LOG.warning("Error finding LUNs for volume %s." + " Verify volume exists.", vol) else: luns = self._get_vol_luns(None) lun_list.extend(luns) diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_base.py b/cinder/volume/drivers/netapp/dataontap/client/client_base.py index 6468d146068..a6421408fee 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_base.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_base.py @@ -23,7 +23,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LW, _LI +from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils @@ -106,8 +106,8 @@ class Client(object): self.connection.invoke_successfully(lun_create, True) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error provisioning volume %(lun_name)s on " - "%(volume_name)s. Details: %(ex)s"), + LOG.error("Error provisioning volume %(lun_name)s on " + "%(volume_name)s. Details: %(ex)s", {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex}) @@ -136,8 +136,8 @@ class Client(object): except netapp_api.NaApiError as e: code = e.code message = e.message - LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: ' - '%(message)s'), {'code': code, 'message': message}) + LOG.warning('Error mapping LUN. Code :%(code)s, Message: ' + '%(message)s', {'code': code, 'message': message}) raise def unmap_lun(self, path, igroup_name): @@ -149,9 +149,9 @@ class Client(object): self.connection.invoke_successfully(lun_unmap, True) except netapp_api.NaApiError as e: exc_info = sys.exc_info() - LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: " - "%(message)s"), {'code': e.code, - 'message': e.message}) + LOG.warning("Error unmapping LUN. Code :%(code)s, Message: " + "%(message)s", {'code': e.code, + 'message': e.message}) # if the LUN is already unmapped if e.code == '13115' or e.code == '9016': pass @@ -178,7 +178,7 @@ class Client(object): def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" seg = path.split("/") - LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1]) + LOG.info("Resizing LUN %s directly to new size.", seg[-1]) lun_resize = netapp_api.NaElement.create_node_with_children( 'lun-resize', **{'path': path, @@ -206,7 +206,7 @@ class Client(object): geometry['max_resize'] =\ result.get_child_content("max-resize-size") except Exception as e: - LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"), + LOG.error("LUN %(path)s geometry failed. Message - %(msg)s", {'path': path, 'msg': e.message}) return geometry diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py index edfe5a05e83..506d89e71e1 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py @@ -26,7 +26,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LW, _LE +from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base @@ -205,7 +205,7 @@ class Client(client_base.Client): node_client.send_request('ems-autosupport-log', message_dict) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: - LOG.warning(_LW('Failed to invoke EMS. %s') % e) + LOG.warning('Failed to invoke EMS. %s', e) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" @@ -617,9 +617,9 @@ class Client(client_base.Client): try: self.qos_policy_group_rename(current_name, new_name) except netapp_api.NaApiError as ex: - msg = _LW('Rename failure in cleanup of cDOT QOS policy group ' - '%(name)s: %(ex)s') - LOG.warning(msg, {'name': current_name, 'ex': ex}) + LOG.warning('Rename failure in cleanup of cDOT QOS policy ' + 'group %(name)s: %(ex)s', + {'name': current_name, 'ex': ex}) # Attempt to delete any QoS policies named "delete-openstack-*". self.remove_unused_qos_policy_groups() @@ -848,8 +848,7 @@ class Client(client_base.Client): LOG.debug('Not connected to cluster management LIF.') else: with excutils.save_and_reraise_exception(): - msg = _LE('Failed to get the list of nodes.') - LOG.exception(msg) + LOG.exception('Failed to get the list of nodes.') return False def get_operational_lif_addresses(self): @@ -1105,8 +1104,8 @@ class Client(client_base.Client): 'collected. This API requires cluster-scoped ' 'credentials.', {'name': flexvol_name}) else: - msg = _LE('Failed to get dedupe info for volume %s.') - LOG.exception(msg, flexvol_name) + LOG.exception('Failed to get dedupe info for volume %s.', + flexvol_name) return no_dedupe_response if self._get_record_count(result) != 1: @@ -1159,8 +1158,8 @@ class Client(client_base.Client): result = self.send_request('clone-split-status', {'volume-name': flexvol_name}) except netapp_api.NaApiError: - msg = _LE('Failed to get clone split info for volume %s.') - LOG.exception(msg, flexvol_name) + LOG.exception('Failed to get clone split info for volume %s.', + flexvol_name) return {'unsplit-size': 0, 'unsplit-clone-count': 0} clone_split_info = result.get_child_by_name( @@ -1195,8 +1194,8 @@ class Client(client_base.Client): try: result = self.send_iter_request('snapmirror-get-iter', api_args) except netapp_api.NaApiError: - msg = _LE('Failed to get SnapMirror info for volume %s.') - LOG.exception(msg, flexvol_name) + LOG.exception('Failed to get SnapMirror info for volume %s.', + flexvol_name) return False if not self._has_records(result): @@ -1230,8 +1229,8 @@ class Client(client_base.Client): try: result = self.send_iter_request('volume-get-iter', api_args) except netapp_api.NaApiError: - msg = _LE('Failed to get Encryption info for volume %s.') - LOG.exception(msg, flexvol_name) + LOG.exception('Failed to get Encryption info for volume %s.', + flexvol_name) return False if not self._has_records(result): @@ -1432,8 +1431,8 @@ class Client(client_base.Client): LOG.debug('Aggregate info can only be collected with ' 'cluster-scoped credentials.') else: - msg = _LE('Failed to get info for aggregate %s.') - LOG.exception(msg, aggregate_name) + LOG.exception('Failed to get info for aggregate %s.', + aggregate_name) return {} if len(aggrs) < 1: @@ -1508,8 +1507,8 @@ class Client(client_base.Client): LOG.debug('Disk types can only be collected with ' 'cluster scoped credentials.') else: - msg = _LE('Failed to get disk info for aggregate %s.') - LOG.exception(msg, aggregate_name) + LOG.exception('Failed to get disk info for aggregate %s.', + aggregate_name) return disk_types attributes_list = result.get_child_by_name( @@ -1560,8 +1559,8 @@ class Client(client_base.Client): LOG.debug('Aggregate capacity can only be collected with ' 'cluster scoped credentials.') else: - msg = _LE('Failed to get info for aggregate %s.') - LOG.exception(msg, aggregate_name) + LOG.exception('Failed to get info for aggregate %s.', + aggregate_name) return {} if len(aggrs) < 1: diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py index 4a5a298a319..d1308ec06f9 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_base.py @@ -37,7 +37,7 @@ import six from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import utils from cinder.volume import driver @@ -157,8 +157,8 @@ class NetAppNfsDriver(driver.ManageableVD, model_update['provider_location'] = volume['provider_location'] return model_update except Exception: - LOG.exception(_LE("Exception creating vol %(name)s on " - "pool %(pool)s."), + LOG.exception("Exception creating vol %(name)s on " + "pool %(pool)s.", {'name': volume['name'], 'pool': volume['provider_location']}) # We need to set this for the model update in order for the @@ -204,8 +204,8 @@ class NetAppNfsDriver(driver.ManageableVD, return model_update except Exception: - LOG.exception(_LE("Exception creating volume %(name)s from source " - "%(source)s on share %(share)s."), + LOG.exception("Exception creating volume %(name)s from source " + "%(source)s on share %(share)s.", {'name': destination_volume['id'], 'source': source['name'], 'share': destination_volume['provider_location']}) @@ -229,8 +229,8 @@ class NetAppNfsDriver(driver.ManageableVD, self.extend_volume(destination_volume, destination_volume_size) except Exception: - LOG.error(_LE("Resizing %s failed. Cleaning " - "volume."), destination_volume['name']) + LOG.error("Resizing %s failed. Cleaning " + "volume.", destination_volume['name']) self._cleanup_volume_on_failure(destination_volume) raise exception.CinderException( _("Resizing clone %s failed.") @@ -336,8 +336,8 @@ class NetAppNfsDriver(driver.ManageableVD, tries += 1 if tries >= self.configuration.num_shell_tries: raise - LOG.exception(_LE("Recovering from a failed execute. " - "Try number %s"), tries) + LOG.exception("Recovering from a failed execute. " + "Try number %s", tries) time.sleep(tries ** 2) def _get_volume_path(self, nfs_share, volume_name): @@ -369,21 +369,21 @@ class NetAppNfsDriver(driver.ManageableVD, """Fetch the image from image_service and write it to the volume.""" super(NetAppNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id) - LOG.info(_LI('Copied image to volume %s using regular download.'), + LOG.info('Copied image to volume %s using regular download.', volume['id']) self._register_image_in_cache(volume, image_id) def _register_image_in_cache(self, volume, image_id): """Stores image in the cache.""" file_name = 'img-cache-%s' % image_id - LOG.info(_LI("Registering image in cache %s"), file_name) + LOG.info("Registering image in cache %s", file_name) try: self._do_clone_rel_img_cache( volume['name'], file_name, volume['provider_location'], file_name) except Exception as e: - LOG.warning(_LW('Exception while registering image %(image_id)s' - ' in cache. Exception: %(exc)s'), + LOG.warning('Exception while registering image %(image_id)s' + ' in cache. Exception: %(exc)s', {'image_id': image_id, 'exc': e}) def _find_image_in_cache(self, image_id): @@ -408,7 +408,7 @@ class NetAppNfsDriver(driver.ManageableVD, dir = self._get_mount_point_for_share(share) file_path = '%s/%s' % (dir, dst) if not os.path.exists(file_path): - LOG.info(_LI('Cloning from cache to destination %s'), dst) + LOG.info('Cloning from cache to destination %s', dst) self._clone_backing_file_for_volume(src, dst, volume_id=None, share=share) src_path = '%s/%s' % (dir, src) @@ -441,7 +441,7 @@ class NetAppNfsDriver(driver.ManageableVD, self._get_capacity_info(share) avl_percent = int((float(total_avl) / total_size) * 100) if avl_percent <= thres_size_perc_start: - LOG.info(_LI('Cleaning cache for share %s.'), share) + LOG.info('Cleaning cache for share %s.', share) eligible_files = self._find_old_cache_files(share) threshold_size = int( (thres_size_perc_stop * total_size) / 100) @@ -453,8 +453,8 @@ class NetAppNfsDriver(driver.ManageableVD, else: continue except Exception as e: - LOG.warning(_LW('Exception during cache cleaning' - ' %(share)s. Message - %(ex)s'), + LOG.warning('Exception during cache cleaning' + ' %(share)s. Message - %(ex)s', {'share': share, 'ex': e}) continue finally: @@ -511,7 +511,7 @@ class NetAppNfsDriver(driver.ManageableVD, self._execute(*cmd, run_as_root=self._execute_as_root) return True except Exception as ex: - LOG.warning(_LW('Exception during deleting %s'), ex) + LOG.warning('Exception during deleting %s', ex) return False def clone_image(self, context, volume, @@ -544,8 +544,8 @@ class NetAppNfsDriver(driver.ManageableVD, post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e - LOG.info(_LI('Image cloning unsuccessful for image' - ' %(image_id)s. Message: %(msg)s'), + LOG.info('Image cloning unsuccessful for image' + ' %(image_id)s. Message: %(msg)s', {'image_id': image_id, 'msg': msg}) finally: cloned = cloned and post_clone @@ -556,7 +556,7 @@ class NetAppNfsDriver(driver.ManageableVD, def _clone_from_cache(self, volume, image_id, cache_result): """Clones a copy from image cache.""" cloned = False - LOG.info(_LI('Cloning image %s from cache'), image_id) + LOG.info('Cloning image %s from cache', image_id) for res in cache_result: # Repeat tries in other shares if failed in some (share, file_name) = res @@ -570,13 +570,13 @@ class NetAppNfsDriver(driver.ManageableVD, volume['provider_location'] = share break except Exception: - LOG.warning(_LW('Unexpected exception during' - ' image cloning in share %s'), share) + LOG.warning('Unexpected exception during' + ' image cloning in share %s', share) return cloned def _direct_nfs_clone(self, volume, image_location, image_id): """Clone directly in nfs share.""" - LOG.info(_LI('Checking image clone %s from glance share.'), image_id) + LOG.info('Checking image clone %s from glance share.', image_id) cloned = False image_locations = self._construct_image_nfs_url(image_location) run_as_root = self._execute_as_root @@ -599,7 +599,7 @@ class NetAppNfsDriver(driver.ManageableVD, break else: LOG.info( - _LI('Image will locally be converted to raw %s'), + 'Image will locally be converted to raw %s', image_id) dst = '%s/%s' % (dir_path, volume['name']) image_utils.convert_image(img_path, dst, 'raw', @@ -619,7 +619,7 @@ class NetAppNfsDriver(driver.ManageableVD, def _post_clone_image(self, volume): """Do operations post image cloning.""" - LOG.info(_LI('Performing post clone for %s'), volume['name']) + LOG.info('Performing post clone for %s', volume['name']) vol_path = self.local_path(volume) if self._discover_file_till_timeout(vol_path): self._set_rw_permissions(vol_path) @@ -634,7 +634,7 @@ class NetAppNfsDriver(driver.ManageableVD, if self._is_file_size_equal(path, new_size): return else: - LOG.info(_LI('Resizing file to %sG'), new_size) + LOG.info('Resizing file to %sG', new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if self._is_file_size_equal(path, new_size): @@ -669,7 +669,7 @@ class NetAppNfsDriver(driver.ManageableVD, return True else: if retry_seconds <= 0: - LOG.warning(_LW('Discover file retries exhausted.')) + LOG.warning('Discover file retries exhausted.') return False else: time.sleep(sleep_interval) @@ -727,8 +727,8 @@ class NetAppNfsDriver(driver.ManageableVD, share_candidates) return self._share_match_for_ip(ip, share_candidates) except Exception: - LOG.warning(_LW("Unexpected exception while " - "short listing used share.")) + LOG.warning("Unexpected exception while " + "short listing used share.") return None def _construct_image_nfs_url(self, image_location): @@ -770,7 +770,7 @@ class NetAppNfsDriver(driver.ManageableVD, def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" - LOG.info(_LI('Extending volume %s.'), volume['name']) + LOG.info('Extending volume %s.', volume['name']) try: path = self.local_path(volume) @@ -829,7 +829,7 @@ class NetAppNfsDriver(driver.ManageableVD, @utils.synchronized(dest_path, external=True) def _move_file(src, dst): if os.path.exists(dst): - LOG.warning(_LW("Destination %s already exists."), dst) + LOG.warning("Destination %s already exists.", dst) return False self._execute('mv', src, dst, run_as_root=self._execute_as_root) return True @@ -837,7 +837,7 @@ class NetAppNfsDriver(driver.ManageableVD, try: return _move_file(source_path, dest_path) except Exception as e: - LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'), + LOG.warning('Exception moving file %(src)s. Message - %(e)s', {'src': source_path, 'e': e}) return False @@ -1055,8 +1055,8 @@ class NetAppNfsDriver(driver.ManageableVD, """ vol_str = CONF.volume_name_template % volume['id'] vol_path = os.path.join(volume['provider_location'], vol_str) - LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is " - "no longer being managed."), {'cr': vol_path}) + LOG.info('Cinder NFS volume with current path "%(cr)s" is ' + 'no longer being managed.', {'cr': vol_path}) @utils.trace_method def create_consistencygroup(self, context, group): @@ -1088,8 +1088,8 @@ class NetAppNfsDriver(driver.ManageableVD, except Exception: volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) - LOG.exception(_LE("Volume %(vol)s in the consistency group " - "could not be deleted."), {'vol': volume}) + LOG.exception("Volume %(vol)s in the consistency group " + "could not be deleted.", {'vol': volume}) return model_update, volumes_model_update @utils.trace_method @@ -1203,8 +1203,8 @@ class NetAppNfsDriver(driver.ManageableVD, flexvol_name, snapshot_name) self.zapi_client.delete_snapshot(flexvol_name, snapshot_name) else: - LOG.error(_LE("Unexpected set of parameters received when " - "creating consistency group from source.")) + LOG.error("Unexpected set of parameters received when " + "creating consistency group from source.") model_update = {} model_update['status'] = 'error' diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py index 8cef3db0688..50734bd9b60 100644 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py @@ -29,7 +29,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields @@ -172,7 +172,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, self._set_qos_policy_group_on_volume(volume, qos_policy_group_info) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Setting QoS for %s failed"), volume['id']) + LOG.error("Setting QoS for %s failed", volume['id']) if cleanup: LOG.debug("Cleaning volume %s", volume['id']) self._cleanup_volume_on_failure(volume) @@ -324,8 +324,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, address = na_utils.resolve_hostname(host) if address not in vserver_addresses: - msg = _LW('Address not found for NFS share %s.') - LOG.warning(msg, share) + LOG.warning('Address not found for NFS share %s.', share) continue try: @@ -333,8 +332,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, flexvol_path=junction_path) pools[flexvol['name']] = {'pool_name': share} except exception.VolumeBackendAPIException: - msg = _LE('Flexvol not found for NFS share %s.') - LOG.exception(msg, share) + LOG.exception('Flexvol not found for NFS share %s.', share) return pools @@ -428,14 +426,14 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, LOG.debug('Deleting backing file for volume %s.', volume['id']) self._delete_file(volume['id'], volume['name']) except Exception: - LOG.exception(_LE('Could not delete volume %s on backend, ' - 'falling back to exec of "rm" command.'), + LOG.exception('Could not delete volume %s on backend, ' + 'falling back to exec of "rm" command.', volume['id']) try: super(NetAppCmodeNfsDriver, self).delete_volume(volume) except Exception: - LOG.exception(_LE('Exec of "rm" command on backing file for ' - '%s was unsuccessful.'), volume['id']) + LOG.exception('Exec of "rm" command on backing file for ' + '%s was unsuccessful.', volume['id']) def _delete_file(self, file_id, file_name): (_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id) @@ -455,15 +453,15 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, LOG.debug('Deleting backing file for snapshot %s.', snapshot['id']) self._delete_file(snapshot['volume_id'], snapshot['name']) except Exception: - LOG.exception(_LE('Could not delete snapshot %s on backend, ' - 'falling back to exec of "rm" command.'), + LOG.exception('Could not delete snapshot %s on backend, ' + 'falling back to exec of "rm" command.', snapshot['id']) try: # delete_file_from_share super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) except Exception: - LOG.exception(_LE('Exec of "rm" command on backing file for' - ' %s was unsuccessful.'), snapshot['id']) + LOG.exception('Exec of "rm" command on backing file for' + ' %s was unsuccessful.', snapshot['id']) @utils.trace_method def copy_image_to_volume(self, context, volume, image_service, image_id): @@ -478,8 +476,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, copy_success = self._copy_from_cache(volume, image_id, cache_result) if copy_success: - LOG.info(_LI('Copied image %(img)s to volume %(vol)s ' - 'using local image cache.'), + LOG.info('Copied image %(img)s to volume %(vol)s ' + 'using local image cache.', {'img': image_id, 'vol': volume['id']}) # Image cache was not present, attempt copy offload workflow if (not copy_success and col_path and @@ -487,12 +485,12 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, LOG.debug('No result found in image cache') self._copy_from_img_service(context, volume, image_service, image_id) - LOG.info(_LI('Copied image %(img)s to volume %(vol)s using' - ' copy offload workflow.'), + LOG.info('Copied image %(img)s to volume %(vol)s using' + ' copy offload workflow.', {'img': image_id, 'vol': volume['id']}) copy_success = True - except Exception as e: - LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e) + except Exception: + LOG.exception('Copy offload workflow unsuccessful.') finally: if not copy_success: super(NetAppCmodeNfsDriver, self).copy_image_to_volume( @@ -530,8 +528,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, if copied: self._post_clone_image(volume) - except Exception as e: - LOG.exception(_LE('Error in workflow copy from cache. %s.'), e) + except Exception: + LOG.exception('Error in workflow copy from cache.') return copied def _find_image_location(self, cache_result, volume_id): diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py index d1b2d2801a1..00f299c988d 100644 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py +++ b/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py @@ -18,7 +18,6 @@ Performance metrics functions and cache for NetApp 7-mode Data ONTAP systems. from oslo_log import log as logging -from cinder.i18n import _LE from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base @@ -48,9 +47,9 @@ class Performance7modeLibrary(perf_base.PerformanceLibrary): 'avg_processor_busy')) except netapp_api.NaApiError: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' - LOG.exception(_LE('Could not get performance base counter ' - 'name. Performance-based scheduler ' - 'functions may not be available.')) + LOG.exception('Could not get performance base counter ' + 'name. Performance-based scheduler ' + 'functions may not be available.') def update_performance_cache(self): """Called periodically to update node utilization metrics.""" @@ -88,8 +87,8 @@ class Performance7modeLibrary(perf_base.PerformanceLibrary): self._get_node_utilization_wafl_counters() + self._get_node_utilization_processor_counters()) except netapp_api.NaApiError: - LOG.exception(_LE('Could not get utilization counters from node ' - '%s'), self.node_name) + LOG.exception('Could not get utilization counters from node ' + '%s', self.node_name) return None def _get_node_utilization_system_counters(self): diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py index b037ff1d1a3..f88a9843a4a 100644 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py +++ b/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py @@ -19,7 +19,7 @@ Performance metrics functions and cache for NetApp systems. from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -83,8 +83,8 @@ class PerformanceLibrary(object): return max(min(100.0, node_utilization), 0) except Exception: - LOG.exception(_LE('Could not calculate node utilization for ' - 'node %s.'), node_name) + LOG.exception('Could not calculate node utilization for ' + 'node %s.', node_name) return DEFAULT_UTILIZATION def _get_kahuna_utilization(self, counters_t1, counters_t2): diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py index d086019f025..2695602e7ee 100644 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py @@ -18,7 +18,6 @@ Performance metrics functions and cache for NetApp cDOT systems. from oslo_log import log as logging -from cinder.i18n import _LE from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base @@ -55,9 +54,9 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' else: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' - LOG.exception(_LE('Could not get performance base counter ' - 'name. Performance-based scheduler ' - 'functions may not be available.')) + LOG.exception('Could not get performance base counter ' + 'name. Performance-based scheduler ' + 'functions may not be available.') def update_performance_cache(self, ssc_pools): """Called periodically to update per-pool node utilization metrics.""" @@ -147,8 +146,8 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): self._get_node_utilization_wafl_counters(node_name) + self._get_node_utilization_processor_counters(node_name)) except netapp_api.NaApiError: - LOG.exception(_LE('Could not get utilization counters from node ' - '%s'), node_name) + LOG.exception('Could not get utilization counters from node %s', + node_name) return None def _get_node_utilization_system_counters(self, node_name): diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py index 14353232d14..c13f05cd9ba 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py @@ -23,7 +23,7 @@ from oslo_log import log as logging import six from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ LOG = logging.getLogger(__name__) @@ -81,10 +81,10 @@ class CapabilitiesLibrary(object): msg = _('User not permitted to query Data ONTAP volumes.') raise exception.VolumeBackendAPIException(data=msg) else: - LOG.warning(_LW('The configured user account does not have ' - 'sufficient privileges to use all needed ' - 'APIs. The following extra specs will fail ' - 'or be ignored: %s.'), invalid_extra_specs) + LOG.warning('The configured user account does not have ' + 'sufficient privileges to use all needed ' + 'APIs. The following extra specs will fail ' + 'or be ignored: %s.', invalid_extra_specs) def get_ssc(self): """Get a copy of the Storage Service Catalog.""" @@ -116,8 +116,8 @@ class CapabilitiesLibrary(object): The self.ssc attribute is updated with the following format. { : {: }} """ - LOG.info(_LI("Updating storage service catalog information for " - "backend '%s'"), self.backend_name) + LOG.info("Updating storage service catalog information for " + "backend '%s'", self.backend_name) ssc = {} diff --git a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py index 3ca99142f6d..daa94fe4b5e 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py @@ -24,9 +24,9 @@ from oslo_log import log from oslo_utils import excutils from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder.objects import fields +from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils from cinder.volume import utils as volume_utils @@ -199,7 +199,7 @@ class DataMotionMixin(object): dest_vserver, dest_flexvol_name) except netapp_api.NaApiError: - LOG.exception(_LE("Could not re-sync SnapMirror.")) + LOG.exception("Could not re-sync SnapMirror.") def delete_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, release=True): @@ -241,7 +241,7 @@ class DataMotionMixin(object): if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or ENTRY_DOES_NOT_EXIST in e.message): - LOG.info(_LI('No SnapMirror relationship to delete.')) + LOG.info('No SnapMirror relationship to delete.') exc_context.reraise = False if release: diff --git a/cinder/volume/drivers/netapp/eseries/client.py b/cinder/volume/drivers/netapp/eseries/client.py index 8b79791c616..974787eac39 100644 --- a/cinder/volume/drivers/netapp/eseries/client.py +++ b/cinder/volume/drivers/netapp/eseries/client.py @@ -34,7 +34,6 @@ from six.moves import urllib from cinder import exception from cinder.i18n import _ -from cinder.i18n import _LE import cinder.utils as cinder_utils from cinder.volume.drivers.netapp.eseries import exception as es_exception from cinder.volume.drivers.netapp.eseries import utils @@ -86,8 +85,8 @@ class WebserviceClient(object): # Catching error conditions other than the perceived ones. # Helps propagating only known exceptions back to the caller. except Exception as e: - LOG.exception(_LE("Unexpected error while invoking web service." - " Error - %s."), e) + LOG.exception("Unexpected error while invoking web service." + " Error - %s.", e) raise exception.NetAppDriverException( _("Invoking web service failed.")) return response diff --git a/cinder/volume/drivers/netapp/eseries/library.py b/cinder/volume/drivers/netapp/eseries/library.py index 7718d55e1bf..2d111994c21 100644 --- a/cinder/volume/drivers/netapp/eseries/library.py +++ b/cinder/volume/drivers/netapp/eseries/library.py @@ -34,7 +34,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.netapp.eseries import client from cinder.volume.drivers.netapp.eseries import exception as eseries_exc @@ -222,10 +222,10 @@ class NetAppESeriesLibrary(object): def _check_multipath(self): if not self.configuration.use_multipath_for_image_xfer: - LOG.warning(_LW('Production use of "%(backend)s" backend requires ' - 'the Cinder controller to have multipathing ' - 'properly set up and the configuration option ' - '"%(mpflag)s" to be set to "True".'), + LOG.warning('Production use of "%(backend)s" backend requires ' + 'the Cinder controller to have multipathing ' + 'properly set up and the configuration option ' + '"%(mpflag)s" to be set to "True".', {'backend': self._backend_name, 'mpflag': 'use_multipath_for_image_xfer'}) @@ -241,14 +241,14 @@ class NetAppESeriesLibrary(object): try: host_group = self._client.get_host_group_by_name( utils.MULTI_ATTACH_HOST_GROUP_NAME) - LOG.info(_LI("The multi-attach E-Series host group '%(label)s' " - "already exists with clusterRef %(clusterRef)s"), + LOG.info("The multi-attach E-Series host group '%(label)s' " + "already exists with clusterRef %(clusterRef)s", host_group) except exception.NotFound: host_group = self._client.create_host_group( utils.MULTI_ATTACH_HOST_GROUP_NAME) - LOG.info(_LI("Created multi-attach E-Series host group %(label)s " - "with clusterRef %(clusterRef)s"), host_group) + LOG.info("Created multi-attach E-Series host group %(label)s " + "with clusterRef %(clusterRef)s", host_group) def _check_mode_get_or_register_storage_system(self): """Does validity checks for storage system registry and health.""" @@ -257,7 +257,7 @@ class NetAppESeriesLibrary(object): ip = na_utils.resolve_hostname(host) return ip except socket.gaierror as e: - LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'), + LOG.error('Error resolving host %(host)s. Error - %(e)s.', {'host': host, 'e': e}) raise exception.NoValidBackend( _("Controller IP '%(host)s' could not be resolved: %(e)s.") @@ -269,10 +269,10 @@ class NetAppESeriesLibrary(object): host = na_utils.resolve_hostname( self.configuration.netapp_server_hostname) if host in ips: - LOG.info(_LI('Embedded mode detected.')) + LOG.info('Embedded mode detected.') system = self._client.list_storage_systems()[0] else: - LOG.info(_LI('Proxy mode detected.')) + LOG.info('Proxy mode detected.') system = self._client.register_storage_system( ips, password=self.configuration.netapp_sa_password) self._client.set_system_id(system.get('id')) @@ -330,8 +330,8 @@ class NetAppESeriesLibrary(object): self._client.list_storage_system() except exception.NetAppDriverException: with excutils.save_and_reraise_exception(): - LOG.info(_LI("System with controller addresses [%s] is not " - "registered with web service."), + LOG.info("System with controller addresses [%s] is not " + "registered with web service.", self.configuration.netapp_controller_ips) # Update the stored password @@ -352,11 +352,10 @@ class NetAppESeriesLibrary(object): # password was not in sync previously. if not (pass_status_valid and status_valid): if not pass_status_valid: - LOG.info(_LI('Waiting for web service to validate the ' - 'configured password.')) + LOG.info('Waiting for web service to validate the ' + 'configured password.') else: - LOG.info(_LI('Waiting for web service array ' - 'communication.')) + LOG.info('Waiting for web service array communication.') if int(time.time() - start_time) >= self.SA_COMM_TIMEOUT: if not status_valid: raise exception.NetAppDriverException( @@ -369,7 +368,7 @@ class NetAppESeriesLibrary(object): # The system was found to have a good status else: - LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict) + LOG.info("System %(id)s has %(status)s status.", msg_dict) raise loopingcall.LoopingCallDone() checker = loopingcall.FixedIntervalLoopingCall(f=check_system_status) @@ -591,11 +590,10 @@ class NetAppESeriesLibrary(object): flash_cache=flash_cache, data_assurance=data_assurance, thin_provision=thin_provision) - LOG.info(_LI("Created volume with " - "label %s."), eseries_volume_label) + LOG.info("Created volume with label %s.", eseries_volume_label) except exception.NetAppDriverException as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error creating volume. Msg - %s."), e) + LOG.error("Error creating volume. Msg - %s.", e) # There was some kind failure creating the volume, make sure no # partial flawed work exists try: @@ -613,9 +611,9 @@ class NetAppESeriesLibrary(object): try: self._client.delete_volume(bad_vol["id"]) except exception.NetAppDriverException as e2: - LOG.error(_LE( + LOG.error( "Error cleaning up failed volume creation. " - "Msg - %s."), e2) + "Msg - %s.", e2) return vol @@ -630,10 +628,10 @@ class NetAppESeriesLibrary(object): try: vol = self._client.create_volume(pool['volumeGroupRef'], label, size_gb) - LOG.info(_LI("Created volume with label %s."), label) + LOG.info("Created volume with label %s.", label) return vol except exception.NetAppDriverException as e: - LOG.error(_LE("Error creating volume. Msg - %s."), e) + LOG.error("Error creating volume. Msg - %s.", e) msg = _("Failure creating volume %s.") raise exception.NetAppDriverException(msg % label) @@ -654,7 +652,7 @@ class NetAppESeriesLibrary(object): try: src_vol = self._create_snapshot_volume(image) self._copy_volume_high_priority_readonly(src_vol, dst_vol) - LOG.info(_LI("Created volume with label %s."), label) + LOG.info("Created volume with label %s.", label) except exception.NetAppDriverException: with excutils.save_and_reraise_exception(): self._client.delete_volume(dst_vol['volumeRef']) @@ -663,11 +661,10 @@ class NetAppESeriesLibrary(object): try: self._client.delete_snapshot_volume(src_vol['id']) except exception.NetAppDriverException as e: - LOG.error(_LE("Failure restarting snap vol. Error: %s."), - e) + LOG.error("Failure restarting snap vol. Error: %s.", e) else: - LOG.warning(_LW("Snapshot volume creation failed for " - "snapshot %s."), image['id']) + LOG.warning("Snapshot volume creation failed for " + "snapshot %s.", image['id']) return dst_vol @@ -681,7 +678,7 @@ class NetAppESeriesLibrary(object): def _copy_volume_high_priority_readonly(self, src_vol, dst_vol): """Copies src volume to dest volume.""" - LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."), + LOG.info("Copying src vol %(src)s to dest vol %(dst)s.", {'src': src_vol['label'], 'dst': dst_vol['label']}) job = None try: @@ -693,11 +690,11 @@ class NetAppESeriesLibrary(object): if (j_st['status'] in ['inProgress', 'pending', 'unknown']): return if j_st['status'] == 'failed' or j_st['status'] == 'halted': - LOG.error(_LE("Vol copy job status %s."), j_st['status']) + LOG.error("Vol copy job status %s.", j_st['status']) raise exception.NetAppDriverException( _("Vol copy job for dest %s failed.") % dst_vol['label']) - LOG.info(_LI("Vol copy job completed for dest %s."), + LOG.info("Vol copy job completed for dest %s.", dst_vol['label']) raise loopingcall.LoopingCallDone() @@ -710,12 +707,11 @@ class NetAppESeriesLibrary(object): try: self._client.delete_vol_copy_job(job['volcopyRef']) except exception.NetAppDriverException: - LOG.warning(_LW("Failure deleting " - "job %s."), job['volcopyRef']) + LOG.warning("Failure deleting job %s.", job['volcopyRef']) else: - LOG.warning(_LW('Volume copy job for src vol %s not found.'), + LOG.warning('Volume copy job for src vol %s not found.', src_vol['id']) - LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label']) + LOG.info('Copy job to dest vol %s completed.', dst_vol['label']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" @@ -730,7 +726,7 @@ class NetAppESeriesLibrary(object): try: self._client.delete_snapshot_group(es_snapshot['pitGroupRef']) except exception.NetAppDriverException: - LOG.warning(_LW("Failure deleting temp snapshot %s."), + LOG.warning("Failure deleting temp snapshot %s.", es_snapshot['id']) def delete_volume(self, volume): @@ -739,7 +735,7 @@ class NetAppESeriesLibrary(object): vol = self._get_volume(volume['name_id']) self._client.delete_volume(vol['volumeRef']) except (exception.NetAppDriverException, exception.VolumeNotFound): - LOG.warning(_LW("Volume %s already deleted."), volume['id']) + LOG.warning("Volume %s already deleted.", volume['id']) return def _is_cgsnapshot(self, snapshot_image): @@ -902,7 +898,7 @@ class NetAppESeriesLibrary(object): utils.convert_uuid_to_es_fmt(uuid.uuid4())) snap_grp = self._create_snapshot_group(label, vol) - LOG.info(_LI("Created snap grp with label %s."), label) + LOG.info("Created snap grp with label %s.", label) # We couldn't retrieve or create a snapshot group if snap_grp is None: @@ -973,7 +969,7 @@ class NetAppESeriesLibrary(object): try: es_snapshot = self._get_snapshot(snapshot) except exception.NotFound: - LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id']) + LOG.warning("Snapshot %s already deleted.", snapshot['id']) else: os_vol = snapshot['volume'] vol = self._get_volume(os_vol['name_id']) @@ -1034,8 +1030,7 @@ class NetAppESeriesLibrary(object): try: self._delete_snapshot_group(snap_grp_ref) except exception.NetAppDriverException as e: - LOG.warning(_LW("Unable to remove snapshot group - " - "%s."), e.msg) + LOG.warning("Unable to remove snapshot group - %s.", e.msg) return None, [snap_grp_ref] else: # Order by their sequence number, from oldest to newest @@ -1063,8 +1058,8 @@ class NetAppESeriesLibrary(object): try: self._delete_snapshot_group(snap_grp_ref) except exception.NetAppDriverException as e: - LOG.warning(_LW("Unable to remove snapshot group - " - "%s."), e.msg) + LOG.warning("Unable to remove snapshot group - %s.", + e.msg) return None, [snap_grp_ref] return {snap_grp_ref: repr(bitset)}, None @@ -1229,8 +1224,7 @@ class NetAppESeriesLibrary(object): if len(self._client.get_volume_mappings_for_host( host['hostRef'])) == 0: # No more exports for this host, so tear down zone. - LOG.info(_LI("Need to remove FC Zone, building initiator " - "target map.")) + LOG.info("Need to remove FC Zone, building initiator target map.") initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info @@ -1380,12 +1374,12 @@ class NetAppESeriesLibrary(object): host = self._client.update_host_type( host['hostRef'], ht_def) except exception.NetAppDriverException as e: - LOG.warning(_LW("Unable to update host type for host with " - "label %(l)s. %(e)s"), + LOG.warning("Unable to update host type for host with " + "label %(l)s. %(e)s", {'l': host['label'], 'e': e.msg}) return host except exception.NotFound as e: - LOG.warning(_LW("Message - %s."), e.msg) + LOG.warning("Message - %s.", e.msg) return self._create_host(port_ids, host_type) def _get_host_with_matching_port(self, port_ids): @@ -1407,7 +1401,7 @@ class NetAppESeriesLibrary(object): def _create_host(self, port_ids, host_type, host_group=None): """Creates host on system with given initiator as port_id.""" - LOG.info(_LI("Creating host with ports %s."), port_ids) + LOG.info("Creating host with ports %s.", port_ids) host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) host_type = self._get_host_type_definition(host_type) port_type = self.driver_protocol.lower() @@ -1493,9 +1487,8 @@ class NetAppESeriesLibrary(object): def _create_asup(self, cinder_host): if not self._client.features.AUTOSUPPORT: - msg = _LI("E-series proxy API version %s does not support " - "autosupport logging.") - LOG.info(msg % self._client.api_version) + LOG.info("E-series proxy API version %s does not support " + "autosupport logging.", self._client.api_version) return event_source = ("Cinder driver %s" % self.DRIVER_NAME) @@ -1537,8 +1530,8 @@ class NetAppESeriesLibrary(object): The self._ssc_stats attribute is updated with the following format. { : {: }} """ - LOG.info(_LI("Updating storage service catalog information for " - "backend '%s'"), self._backend_name) + LOG.info("Updating storage service catalog information for " + "backend '%s'", self._backend_name) relevant_pools = self._get_storage_pools() @@ -1552,12 +1545,12 @@ class NetAppESeriesLibrary(object): :param relevant_pools: The pools that this driver cares about """ - msg = _LI("E-series proxy API version %(version)s does not " - "support full set of SSC extra specs. The proxy version" - " must be at at least %(min_version)s.") - LOG.info(msg, {'version': self._client.api_version, - 'min_version': - self._client.features.SSC_API_V2.minimum_version}) + LOG.info("E-series proxy API version %(version)s does not " + "support full set of SSC extra specs. The proxy version" + " must be at at least %(min_version)s.", + {'version': self._client.api_version, + 'min_version': + self._client.features.SSC_API_V2.minimum_version}) self._ssc_stats = ( self._update_ssc_disk_encryption(relevant_pools)) @@ -1683,9 +1676,9 @@ class NetAppESeriesLibrary(object): # Inform deprecation of legacy option. if self.configuration.safe_get('netapp_storage_pools'): - msg = _LW("The option 'netapp_storage_pools' is deprecated and " - "will be removed in the future releases. Please use " - "the option 'netapp_pool_name_search_pattern' instead.") + msg = ("The option 'netapp_storage_pools' is deprecated and " + "will be removed in the future releases. Please use " + "the option 'netapp_pool_name_search_pattern' instead.") versionutils.report_deprecated_feature(LOG, msg) pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) @@ -1721,8 +1714,8 @@ class NetAppESeriesLibrary(object): sorted_pools) if not avl_pools: - LOG.warning(_LW("No storage pool found with available capacity " - "%s."), size_gb) + LOG.warning("No storage pool found with available capacity %s.", + size_gb) return avl_pools def _is_thin_provisioned(self, volume): @@ -1779,11 +1772,11 @@ class NetAppESeriesLibrary(object): if complete: raise loopingcall.LoopingCallDone() else: - msg = _LI("Waiting for volume expansion of %(vol)s to " - "complete, current remaining actions are " - "%(action)s. ETA: %(eta)s mins.") - LOG.info(msg, {'vol': volume['name_id'], - 'action': ', '.join(actions), 'eta': eta}) + LOG.info("Waiting for volume expansion of %(vol)s to " + "complete, current remaining actions are " + "%(action)s. ETA: %(eta)s mins.", + {'vol': volume['name_id'], + 'action': ', '.join(actions), 'eta': eta}) checker = loopingcall.FixedIntervalLoopingCall( check_progress) @@ -1973,7 +1966,7 @@ class NetAppESeriesLibrary(object): volume_update = list() for volume in volumes: - LOG.info(_LI('Deleting volume %s.'), volume['id']) + LOG.info('Deleting volume %s.', volume['id']) volume_update.append({ 'status': 'deleted', 'id': volume['id'], }) @@ -1982,14 +1975,14 @@ class NetAppESeriesLibrary(object): try: cg = self._get_consistencygroup(group) except exception.ConsistencyGroupNotFound: - LOG.warning(_LW('Consistency group already deleted.')) + LOG.warning('Consistency group already deleted.') else: self._client.delete_consistency_group(cg['id']) try: self._merge_soft_delete_changes(None, [cg['id']]) except (exception.NetAppDriverException, eseries_exc.WebServiceException): - LOG.warning(_LW('Unable to remove CG from the deletion map.')) + LOG.warning('Unable to remove CG from the deletion map.') model_update = {'status': 'deleted'} @@ -2009,14 +2002,14 @@ class NetAppESeriesLibrary(object): for volume in remove_volumes: es_vol = self._get_volume(volume['id']) LOG.info( - _LI('Removing volume %(v)s from consistency group %(''cg)s.'), + 'Removing volume %(v)s from consistency group %(''cg)s.', {'v': es_vol['label'], 'cg': es_cg['label']}) self._client.remove_consistency_group_member(es_vol['id'], es_cg['id']) for volume in add_volumes: es_vol = self._get_volume(volume['id']) - LOG.info(_LI('Adding volume %(v)s to consistency group %(cg)s.'), + LOG.info('Adding volume %(v)s to consistency group %(cg)s.', {'v': es_vol['label'], 'cg': es_cg['label']}) self._client.add_consistency_group_member( es_vol['id'], es_cg['id']) @@ -2070,8 +2063,7 @@ class NetAppESeriesLibrary(object): """Removes tmp vols with no snapshots.""" try: if not na_utils.set_safe_attr(self, 'clean_job_running', True): - LOG.warning(_LW('Returning as clean tmp ' - 'vol job already running.')) + LOG.warning('Returning as clean tmp vol job already running.') return for vol in self._client.list_volumes(): @@ -2092,13 +2084,13 @@ class NetAppESeriesLibrary(object): vol = self._get_existing_vol_with_manage_ref(existing_ref) label = utils.convert_uuid_to_es_fmt(volume['id']) if label == vol['label']: - LOG.info(_LI("Volume with given ref %s need not be renamed during" - " manage operation."), existing_ref) + LOG.info("Volume with given ref %s need not be renamed during" + " manage operation.", existing_ref) managed_vol = vol else: managed_vol = self._client.update_volume(vol['id'], label) - LOG.info(_LI("Manage operation completed for volume with new label" - " %(label)s and wwn %(wwn)s."), + LOG.info("Manage operation completed for volume with new label" + " %(label)s and wwn %(wwn)s.", {'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]}) def manage_existing_get_size(self, volume, existing_ref): @@ -2135,6 +2127,6 @@ class NetAppESeriesLibrary(object): message to indicate the volume is no longer under Cinder's control. """ managed_vol = self._get_volume(volume['id']) - LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn " - "%(wwn)s."), {'label': managed_vol['label'], - 'wwn': managed_vol[self.WORLDWIDENAME]}) + LOG.info("Unmanaged volume with current label %(label)s and wwn " + "%(wwn)s.", {'label': managed_vol['label'], + 'wwn': managed_vol[self.WORLDWIDENAME]}) diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py index 2c275d9edf6..38be5df238b 100644 --- a/cinder/volume/drivers/netapp/utils.py +++ b/cinder/volume/drivers/netapp/utils.py @@ -34,7 +34,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LW, _LI +from cinder.i18n import _ from cinder import utils from cinder import version from cinder.volume import qos_specs @@ -67,8 +67,8 @@ def validate_instantiation(**kwargs): """ if kwargs and kwargs.get('netapp_mode') == 'proxy': return - LOG.warning(_LW("It is not the recommended way to use drivers by NetApp. " - "Please use NetAppDriver to achieve the functionality.")) + LOG.warning("It is not the recommended way to use drivers by NetApp. " + "Please use NetAppDriver to achieve the functionality.") def check_flags(required_flags, configuration): @@ -142,14 +142,14 @@ def round_down(value, precision='0.00'): def log_extra_spec_warnings(extra_specs): for spec in (set(extra_specs.keys() if extra_specs else []) & set(OBSOLETE_SSC_SPECS.keys())): - LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s ' - 'instead.'), {'old': spec, - 'new': OBSOLETE_SSC_SPECS[spec]}) + LOG.warning('Extra spec %(old)s is obsolete. Use %(new)s ' + 'instead.', {'old': spec, + 'new': OBSOLETE_SSC_SPECS[spec]}) for spec in (set(extra_specs.keys() if extra_specs else []) & set(DEPRECATED_SSC_SPECS.keys())): - LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s ' - 'instead.'), {'old': spec, - 'new': DEPRECATED_SSC_SPECS[spec]}) + LOG.warning('Extra spec %(old)s is deprecated. Use %(new)s ' + 'instead.', {'old': spec, + 'new': DEPRECATED_SSC_SPECS[spec]}) def get_iscsi_connection_properties(lun_id, volume, iqn, @@ -283,7 +283,7 @@ def get_valid_qos_policy_group_info(volume, extra_specs=None): try: volume_type = get_volume_type_from_volume(volume) except KeyError: - LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id']) + LOG.exception('Cannot get QoS spec for volume %s.', volume['id']) return info if volume_type is None: return info @@ -409,7 +409,7 @@ class OpenStackInfo(object): "'%{version}\t%{release}\t%{vendor}'", self.PACKAGE_NAME) if not out: - LOG.info(_LI('No rpm info found for %(pkg)s package.'), { + LOG.info('No rpm info found for %(pkg)s package.', { 'pkg': self.PACKAGE_NAME}) return False parts = out.split() @@ -418,7 +418,7 @@ class OpenStackInfo(object): self._vendor = ' '.join(parts[2::]) return True except Exception as e: - LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e}) + LOG.info('Could not run rpm command: %(msg)s.', {'msg': e}) return False # ubuntu, mirantis on ubuntu @@ -429,7 +429,7 @@ class OpenStackInfo(object): out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: - LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'), + LOG.info('No dpkg-query info found for %(pkg)s package.', {'pkg': self.PACKAGE_NAME}) return False # debian format: [epoch:]upstream_version[-debian_revision] @@ -447,7 +447,7 @@ class OpenStackInfo(object): self._vendor = _vendor return True except Exception as e: - LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), { + LOG.info('Could not run dpkg-query command: %(msg)s.', { 'msg': e}) return False diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py index 3546b62ecfd..11f268677dc 100644 --- a/cinder/volume/drivers/nexenta/iscsi.py +++ b/cinder/volume/drivers/nexenta/iscsi.py @@ -18,7 +18,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.nexenta import jsonrpc @@ -136,8 +136,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): 'target_name': target_name}) except exception.NexentaException as exc: if 'already' in exc.args[0]: - LOG.info(_LI('Ignored target creation error "%s" while ' - 'ensuring export.'), + LOG.info('Ignored target creation error "%s" while ' + 'ensuring export.', exc) else: raise @@ -146,8 +146,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): self.nms.stmf.create_targetgroup(target_group_name) except exception.NexentaException as exc: if ('already' in exc.args[0]): - LOG.info(_LI('Ignored target group creation error "%s" ' - 'while ensuring export.'), + LOG.info('Ignored target group creation error "%s" ' + 'while ensuring export.', exc) else: raise @@ -158,8 +158,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): target_name) except exception.NexentaException as exc: if ('already' in exc.args[0]): - LOG.info(_LI('Ignored target group member addition error ' - '"%s" while ensuring export.'), + LOG.info('Ignored target group member addition error ' + '"%s" while ensuring export.', exc) else: raise @@ -229,7 +229,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), + LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']), 'volsize', '%sG' % new_size) @@ -245,12 +245,12 @@ class NexentaISCSIDriver(driver.ISCSIDriver): self.nms.zvol.destroy(volume_name, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: - LOG.info(_LI('Volume %s does not exist, it ' - 'seems it was already deleted.'), volume_name) + LOG.info('Volume %s does not exist, it ' + 'seems it was already deleted.', volume_name) return if 'zvol has children' in exc.args[0]: self._mark_as_garbage(volume_name) - LOG.info(_LI('Volume %s will be deleted later.'), volume_name) + LOG.info('Volume %s will be deleted later.', volume_name) return raise origin = props.get('origin') @@ -278,14 +278,14 @@ class NexentaISCSIDriver(driver.ISCSIDriver): (self._get_zvol_name(src_vref['name']), snapshot['name']))) except exception.NexentaException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( 'Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + '%(volume_name)s@%(name)s', snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning(_LW('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + LOG.warning('Failed to delete zfs snapshot ' + '%(volume_name)s@%(name)s', snapshot) raise def _get_zfs_send_recv_cmd(self, src, dst): @@ -344,8 +344,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): ssh_bound = True break if not ssh_bound: - LOG.warning(_LW("Remote NexentaStor appliance at %s should be " - "SSH-bound."), dst_host) + LOG.warning("Remote NexentaStor appliance at %s should be " + "SSH-bound.", dst_host) # Create temporary snapshot of volume on NexentaStor Appliance. snapshot = { @@ -364,22 +364,22 @@ class NexentaISCSIDriver(driver.ISCSIDriver): try: self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot send source snapshot %(src)s to " - "destination %(dst)s. Reason: %(exc)s"), + LOG.warning("Cannot send source snapshot %(src)s to " + "destination %(dst)s. Reason: %(exc)s", {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete temporary source snapshot " - "%(src)s on NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete temporary source snapshot " + "%(src)s on NexentaStor Appliance: %(exc)s", {'src': src, 'exc': exc}) try: self.delete_volume(volume) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete source volume %(volume)s on " - "NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete source volume %(volume)s on " + "NexentaStor Appliance: %(exc)s", {'volume': volume['name'], 'exc': exc}) dst_nms = self.get_nms_for_url(nms_url) @@ -388,8 +388,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): try: dst_nms.snapshot.destroy(dst_snapshot, '') except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete temporary destination snapshot " - "%(dst)s on NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete temporary destination snapshot " + "%(dst)s on NexentaStor Appliance: %(exc)s", {'dst': dst_snapshot, 'exc': exc}) return True, None @@ -424,12 +424,10 @@ class NexentaISCSIDriver(driver.ISCSIDriver): src_backend = self.__class__.__name__ dst_backend = capabilities['location_info'].split(':')[0] if src_backend != dst_backend: - LOG.warning(_LW('Cannot retype from %(src_backend)s to ' - '%(dst_backend)s.'), - { - 'src_backend': src_backend, - 'dst_backend': dst_backend, - }) + LOG.warning('Cannot retype from %(src_backend)s to ' + '%(dst_backend)s.', + {'src_backend': src_backend, + 'dst_backend': dst_backend}) return False hosts = (volume['host'], host['host']) @@ -457,8 +455,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): zvol, options[opt], new) retyped = True except exception.NexentaException: - LOG.error(_LE('Error trying to change %(opt)s' - ' from %(old)s to %(new)s'), + LOG.error('Error trying to change %(opt)s' + ' from %(old)s to %(new)s', {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, None @@ -497,13 +495,13 @@ class NexentaISCSIDriver(driver.ISCSIDriver): self.nms.snapshot.destroy(snapshot_name, '') except exception.NexentaException as exc: if "does not exist" in exc.args[0]: - LOG.info(_LI('Snapshot %s does not exist, it seems it was ' - 'already deleted.'), snapshot_name) + LOG.info('Snapshot %s does not exist, it seems it was ' + 'already deleted.', snapshot_name) return elif "snapshot has dependent clones" in exc.args[0]: self._mark_as_garbage(snapshot_name) - LOG.info(_LI('Snapshot %s has dependent clones, will be ' - 'deleted later.'), snapshot_name) + LOG.info('Snapshot %s has dependent clones, will be ' + 'deleted later.', snapshot_name) return raise self._collect_garbage(volume_name) @@ -609,8 +607,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): except exception.NexentaException as exc: if 'in use' not in exc.args[0]: raise - LOG.info(_LI('Ignored LU creation error "%s" while ensuring ' - 'export.'), exc) + LOG.info('Ignored LU creation error "%s" while ensuring ' + 'export.', exc) if not self._is_lu_shared(zvol_name): try: entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { @@ -618,8 +616,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): except exception.NexentaException as exc: if 'view entry exists' not in exc.args[0]: raise - LOG.info(_LI('Ignored LUN mapping entry addition error "%s" ' - 'while ensuring export.'), exc) + LOG.info('Ignored LUN mapping entry addition error "%s" ' + 'while ensuring export.', exc) model_update = {} if entry: provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { diff --git a/cinder/volume/drivers/nexenta/nfs.py b/cinder/volume/drivers/nexenta/nfs.py index 367ca7bcf3a..5a98419198e 100644 --- a/cinder/volume/drivers/nexenta/nfs.py +++ b/cinder/volume/drivers/nexenta/nfs.py @@ -25,7 +25,7 @@ from oslo_utils import units from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import options @@ -144,12 +144,12 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): - LOG.warning(_LW("Volume status must be 'available' or 'retyping'." - " Current volume status: %s"), volume['status']) + LOG.warning("Volume status must be 'available' or 'retyping'." + " Current volume status: %s", volume['status']) return false_ret if 'capabilities' not in host: - LOG.warning(_LW("Unsupported host. No capabilities found")) + LOG.warning("Unsupported host. No capabilities found") return false_ret capabilities = host['capabilities'] @@ -171,8 +171,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 ns_shares[share] >= volume['size']): shares.append(share) if len(shares) == 0: - LOG.warning(_LW("Remote NexentaStor appliance at %s should be " - "SSH-bound."), share) + LOG.warning("Remote NexentaStor appliance at %s should be " + "SSH-bound.", share) return false_ret share = sorted(shares, key=ns_shares.get, reverse=True)[0] snapshot = { @@ -191,22 +191,22 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 try: nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot send source snapshot %(src)s to " - "destination %(dst)s. Reason: %(exc)s"), + LOG.warning("Cannot send source snapshot %(src)s to " + "destination %(dst)s. Reason: %(exc)s", {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete temporary source snapshot " - "%(src)s on NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete temporary source snapshot " + "%(src)s on NexentaStor Appliance: %(exc)s", {'src': src, 'exc': exc}) try: self.delete_volume(volume) except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete source volume %(volume)s on " - "NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete source volume %(volume)s on " + "NexentaStor Appliance: %(exc)s", {'volume': volume['name'], 'exc': exc}) dst_nms = self._get_nms_for_url(capabilities['nms_url']) @@ -215,8 +215,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 try: dst_nms.snapshot.destroy(dst_snapshot, '') except exception.NexentaException as exc: - LOG.warning(_LW("Cannot delete temporary destination snapshot " - "%(dst)s on NexentaStor Appliance: %(exc)s"), + LOG.warning("Cannot delete temporary destination snapshot " + "%(dst)s on NexentaStor Appliance: %(exc)s", {'dst': dst_snapshot, 'exc': exc}) return True, {'provider_location': share} @@ -273,12 +273,10 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 src_backend = self.__class__.__name__ dst_backend = host['capabilities']['location_info'].split(':')[0] if src_backend != dst_backend: - LOG.warning(_LW('Cannot retype from %(src_backend)s to ' - '%(dst_backend)s.'), - { - 'src_backend': src_backend, - 'dst_backend': dst_backend - }) + LOG.warning('Cannot retype from %(src_backend)s to ' + '%(dst_backend)s.', + {'src_backend': src_backend, + 'dst_backend': dst_backend}) return False hosts = (volume['host'], host['host']) @@ -312,8 +310,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 folder, options[opt], new) retyped = True except exception.NexentaException: - LOG.error(_LE('Error trying to change %(opt)s' - ' from %(old)s to %(new)s'), + LOG.error('Error trying to change %(opt)s' + ' from %(old)s to %(new)s', {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, model_update @@ -366,8 +364,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 try: nms.folder.destroy('%s/%s' % (vol, folder)) except exception.NexentaException: - LOG.warning(_LW("Cannot destroy created folder: " - "%(vol)s/%(folder)s"), + LOG.warning("Cannot destroy created folder: " + "%(vol)s/%(folder)s", {'vol': vol, 'folder': folder}) raise @@ -396,8 +394,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 try: nms.folder.destroy('%s/%s' % (vol, folder), '') except exception.NexentaException: - LOG.warning(_LW("Cannot destroy cloned folder: " - "%(vol)s/%(folder)s"), + LOG.warning("Cannot destroy cloned folder: " + "%(vol)s/%(folder)s", {'vol': vol, 'folder': folder}) raise @@ -418,7 +416,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 :param volume: new volume reference :param src_vref: source volume reference """ - LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) + LOG.info('Creating clone of volume: %s', src_vref['id']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'volume_size': src_vref['size'], @@ -430,13 +428,13 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 try: return self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: - LOG.error(_LE('Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + LOG.error('Volume creation failed, deleting created snapshot ' + '%(volume_name)s@%(name)s', snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning(_LW('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + LOG.warning('Failed to delete zfs snapshot ' + '%(volume_name)s@%(name)s', snapshot) raise def delete_volume(self, volume): @@ -458,8 +456,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 nms.folder.destroy(folder, '-r') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: - LOG.info(_LI('Folder %s does not exist, it was ' - 'already deleted.'), folder) + LOG.info('Folder %s does not exist, it was ' + 'already deleted.', folder) return raise self._get_capacity_info(nfs_share) @@ -469,8 +467,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 nms.snapshot.destroy(origin, '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: - LOG.info(_LI('Snapshot %s does not exist, it was ' - 'already deleted.'), origin) + LOG.info('Snapshot %s does not exist, it was ' + 'already deleted.', origin) return raise @@ -480,7 +478,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), + LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] @@ -529,20 +527,16 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') except exception.NexentaException as exc: if 'does not exist' in exc.args[0]: - LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not ' - 'exist, it was already deleted.'), - { - 'folder': folder, - 'snapshot': snapshot, - }) + LOG.info('Snapshot %(folder)s@%(snapshot)s does not ' + 'exist, it was already deleted.', + {'folder': folder, + 'snapshot': snapshot}) return elif 'has dependent clones' in exc.args[0]: - LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent ' - 'clones, it will be deleted later.'), - { - 'folder': folder, - 'snapshot': snapshot, - }) + LOG.info('Snapshot %(folder)s@%(snapshot)s has dependent ' + 'clones, it will be deleted later.', + {'folder': folder, + 'snapshot': snapshot}) return def _create_sparsed_file(self, nms, path, size): @@ -571,8 +565,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) - LOG.info(_LI('Creating regular file: %s.' - 'This may take some time.'), path) + LOG.info('Creating regular file: %s.' + 'This may take some time.', path) nms.appliance.execute( 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % { @@ -582,7 +576,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 } ) - LOG.info(_LI('Regular file: %s created.'), path) + LOG.info('Regular file: %s created.', path) def _set_rw_permissions_for_all(self, nms, path): """Sets 666 permissions for the path. @@ -661,8 +655,8 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 share_opts = share_info[2].strip() if len(share_info) > 2 else None if not re.match(r'.+:/.+', share_address): - LOG.warning(_LW("Share %s ignored due to invalid format. " - "Must be of form address:/export."), + LOG.warning("Share %s ignored due to invalid format. " + "Must be of form address:/export.", share_address) continue @@ -696,7 +690,7 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 self._remotefsclient.mount(nfs_share, mnt_flags) else: if mount_path in self._remotefsclient._read_mounts(): - LOG.info(_LI('Already mounted: %s'), mount_path) + LOG.info('Already mounted: %s', mount_path) return self._execute('mkdir', '-p', mount_path, @@ -706,16 +700,15 @@ class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 return except Exception as e: if attempt == (num_attempts - 1): - LOG.error(_LE('Mount failure for %(share)s after ' - '%(count)d attempts.'), { - 'share': nfs_share, - 'count': num_attempts}) + LOG.error('Mount failure for %(share)s after ' + '%(count)d attempts.', + {'share': nfs_share, + 'count': num_attempts}) raise exception.NfsException(six.text_type(e)) LOG.warning( - _LW('Mount attempt %(attempt)d failed: %(error)s. ' - 'Retrying mount ...'), { - 'attempt': attempt, - 'error': e}) + 'Mount attempt %(attempt)d failed: %(error)s. ' + 'Retrying mount ...', + {'attempt': attempt, 'error': e}) greenthread.sleep(1) def _mount_subfolders(self): diff --git a/cinder/volume/drivers/nexenta/ns5/iscsi.py b/cinder/volume/drivers/nexenta/ns5/iscsi.py index dc9dab1cf7d..dc8ebfa9476 100644 --- a/cinder/volume/drivers/nexenta/ns5/iscsi.py +++ b/cinder/volume/drivers/nexenta/ns5/iscsi.py @@ -21,7 +21,7 @@ from oslo_utils import units from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LI, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.nexenta.ns5 import jsonrpc @@ -184,8 +184,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): self.nef.delete(url) except exception.NexentaException as exc: # We assume that volume is gone - LOG.warning(_LW('Got error trying to delete volume %(volume)s,' - ' assuming it is already gone: %(exc)s'), + LOG.warning('Got error trying to delete volume %(volume)s,' + ' assuming it is already gone: %(exc)s', {'volume': volume, 'exc': exc}) def extend_volume(self, volume, new_size): @@ -194,7 +194,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), + LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) pool, group, name = self._get_volume_path(volume).split('/') url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' @@ -211,7 +211,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param snapshot: snapshot reference """ snapshot_vol = self._get_snapshot_volume(snapshot) - LOG.info(_LI('Creating snapshot %(snap)s of volume %(vol)s'), { + LOG.info('Creating snapshot %(snap)s of volume %(vol)s', { 'snap': snapshot['name'], 'vol': snapshot_vol['name'] }) @@ -230,7 +230,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param snapshot: snapshot reference """ - LOG.info(_LI('Deleting snapshot: %s'), snapshot['name']) + LOG.info('Deleting snapshot: %s', snapshot['name']) snapshot_vol = self._get_snapshot_volume(snapshot) volume_path = self._get_volume_path(snapshot_vol) pool, group, volume = volume_path.split('/') @@ -245,8 +245,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver): self.nef.delete(url) except exception.NexentaException as exc: if 'EBUSY' in exc.args[0]: - LOG.warning(_LW( - 'Could not delete snapshot %s - it has dependencies'), + LOG.warning( + 'Could not delete snapshot %s - it has dependencies', snapshot['name']) else: LOG.warning(exc) @@ -257,7 +257,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver): :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ - LOG.info(_LI('Creating volume from snapshot: %s'), snapshot['name']) + LOG.info('Creating volume from snapshot: %s', snapshot['name']) snapshot_vol = self._get_snapshot_volume(snapshot) volume_path = self._get_volume_path(snapshot_vol) pool, group, snapshot_vol = volume_path.split('/') @@ -289,15 +289,14 @@ class NexentaISCSIDriver(driver.ISCSIDriver): try: self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: - LOG.error(_LE('Volume creation failed, deleting created snapshot ' - '%s'), '@'.join( - [snapshot['volume_name'], snapshot['name']])) + LOG.error('Volume creation failed, deleting created snapshot %s', + '@'.join([snapshot['volume_name'], snapshot['name']])) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning(_LW('Failed to delete zfs snapshot ' - '%s'), '@'.join( - [snapshot['volume_name'], snapshot['name']])) + LOG.warning('Failed to delete zfs snapshot %s', + '@'.join([snapshot['volume_name'], + snapshot['name']])) raise def _get_snapshot_volume(self, snapshot): diff --git a/cinder/volume/drivers/nexenta/ns5/nfs.py b/cinder/volume/drivers/nexenta/ns5/nfs.py index f053a921fd0..d5d40f3efce 100644 --- a/cinder/volume/drivers/nexenta/ns5/nfs.py +++ b/cinder/volume/drivers/nexenta/ns5/nfs.py @@ -22,7 +22,7 @@ from oslo_utils import units from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options @@ -182,8 +182,8 @@ class NexentaNfsDriver(nfs.NfsDriver): pool, '%2F'.join([self._escape_path(fs), volume['name']])) self.nef.delete(url) except exception.NexentaException: - LOG.warning(_LW("Cannot destroy created folder: " - "%(vol)s/%(folder)s"), + LOG.warning("Cannot destroy created folder: " + "%(vol)s/%(folder)s", {'vol': pool, 'folder': '/'.join( [fs, volume['name']])}) raise @@ -235,7 +235,7 @@ class NexentaNfsDriver(nfs.NfsDriver): :param volume: volume reference :param new_size: volume new size in GB """ - LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'), + LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) if self.sparsed_volumes: self._execute('truncate', '-s', '%sG' % new_size, @@ -284,8 +284,8 @@ class NexentaNfsDriver(nfs.NfsDriver): self.nef.delete(url) except exception.NexentaException as exc: if 'EBUSY' is exc: - LOG.warning(_LW( - 'Could not delete snapshot %s - it has dependencies'), + LOG.warning( + 'Could not delete snapshot %s - it has dependencies', snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): @@ -320,10 +320,10 @@ class NexentaNfsDriver(nfs.NfsDriver): } self.nef.delete(url) except exception.NexentaException: - LOG.warning(_LW("Cannot destroy cloned filesystem: " - "%(vol)s/%(filesystem)s"), + LOG.warning("Cannot destroy cloned filesystem: " + "%(vol)s/%(filesystem)s", {'vol': dataset_path, - 'filesystem': volume['name']}) + 'filesystem': volume['name']}) raise if volume['size'] > snapshot['volume_size']: new_size = volume['size'] @@ -338,7 +338,7 @@ class NexentaNfsDriver(nfs.NfsDriver): :param volume: new volume reference :param src_vref: source volume reference """ - LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) + LOG.info('Creating clone of volume: %s', src_vref['id']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'volume_size': src_vref['size'], @@ -347,13 +347,13 @@ class NexentaNfsDriver(nfs.NfsDriver): try: return self.create_volume_from_snapshot(volume, snapshot) except exception.NexentaException: - LOG.error(_LE('Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + LOG.error('Volume creation failed, deleting created snapshot ' + '%(volume_name)s@%(name)s', snapshot) try: self.delete_snapshot(snapshot) except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning(_LW('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s'), snapshot) + LOG.warning('Failed to delete zfs snapshot ' + '%(volume_name)s@%(name)s', snapshot) raise def local_path(self, volume): diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py index 4b85aafdaa7..1ec00d00bef 100644 --- a/cinder/volume/drivers/nfs.py +++ b/cinder/volume/drivers/nfs.py @@ -26,7 +26,7 @@ from oslo_utils import units import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils @@ -209,10 +209,10 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): return except Exception as e: if attempt == (num_attempts - 1): - LOG.error(_LE('Mount failure for %(share)s after ' - '%(count)d attempts.'), { - 'share': nfs_share, - 'count': num_attempts}) + LOG.error('Mount failure for %(share)s after ' + '%(count)d attempts.', + {'share': nfs_share, + 'count': num_attempts}) raise exception.NfsException(six.text_type(e)) LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n' 'Retrying mount ...', @@ -355,7 +355,7 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" - LOG.info(_LI('Extending volume %s.'), volume.id) + LOG.info('Extending volume %s.', volume.id) extend_by = int(new_size) - volume.size if not self._is_share_eligible(volume.provider_location, extend_by): @@ -363,7 +363,7 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): ' extend volume %s to %sG' % (volume.id, new_size)) path = self.local_path(volume) - LOG.info(_LI('Resizing file to %sG...'), new_size) + LOG.info('Resizing file to %sG...', new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if not self._is_file_size_equal(path, new_size): @@ -405,11 +405,11 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): self.configuration.nas_secure_file_permissions) if self.configuration.nas_secure_file_permissions == 'false': - LOG.warning(_LW("The NAS file permissions mode will be 666 " - "(allowing other/world read & write access). " - "This is considered an insecure NAS environment. " - "Please see %s for information on a secure " - "NFS configuration."), + LOG.warning("The NAS file permissions mode will be 666 " + "(allowing other/world read & write access). " + "This is considered an insecure NAS environment. " + "Please see %s for information on a secure " + "NFS configuration.", doc_html) self.configuration.nas_secure_file_operations = \ @@ -431,11 +431,11 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): self.configuration.nas_secure_file_operations) if self.configuration.nas_secure_file_operations == 'false': - LOG.warning(_LW("The NAS file operations will be run as " - "root: allowing root level access at the storage " - "backend. This is considered an insecure NAS " - "environment. Please see %s " - "for information on a secure NAS configuration."), + LOG.warning("The NAS file operations will be run as " + "root: allowing root level access at the storage " + "backend. This is considered an insecure NAS " + "environment. Please see %s " + "for information on a secure NAS configuration.", doc_html) def update_migrated_volume(self, ctxt, volume, new_volume, @@ -463,8 +463,8 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): try: os.rename(current_path, original_path) except OSError: - LOG.error(_LE('Unable to rename the logical volume ' - 'for volume: %s'), volume.id) + LOG.error('Unable to rename the logical volume ' + 'for volume: %s', volume.id) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. @@ -513,8 +513,8 @@ class NfsDriver(remotefs.RemoteFSSnapDriver, driver.ExtendVD): {'vol': volume.id, 'loc': volume.provider_location}) if not volume.provider_location: - LOG.warning(_LW('Volume %s does not have provider_location ' - 'specified, skipping'), volume.name) + LOG.warning('Volume %s does not have provider_location ' + 'specified, skipping', volume.name) return info_path = self._local_path_volume_info(volume) diff --git a/cinder/volume/drivers/nimble.py b/cinder/volume/drivers/nimble.py index 9c54b3fffcf..a51ca5940fe 100644 --- a/cinder/volume/drivers/nimble.py +++ b/cinder/volume/drivers/nimble.py @@ -34,10 +34,10 @@ from oslo_log import log as logging from oslo_utils import units from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.objects import volume +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_types @@ -195,8 +195,8 @@ class NimbleBaseVolumeDriver(san.SanDriver): volume_name_prefix, "") if BACKUP_VOL_PREFIX + parent_vol_id in snap_info[ 'description']: - LOG.info(_LI('Nimble backup-snapshot exists name=%(' - 'name)s'), {'name': snap_info['name']}) + LOG.info('Nimble backup-snapshot exists name=%(' + 'name)s', {'name': snap_info['name']}) snap_vol_name = self.APIExecutor.get_volume_name( snap_info['vol_id']) LOG.debug("snap_vol_name %(snap)s", @@ -318,14 +318,14 @@ class NimbleBaseVolumeDriver(san.SanDriver): raise NimbleAPIException(_("Unable to enable" " GST")) self._group_target_enabled = True - LOG.info(_LI("Group Scoped Target enabled for " - "group %(group)s: %(ip)s"), + LOG.info("Group Scoped Target enabled for " + "group %(group)s: %(ip)s", {'group': group_info['name'], 'ip': self.configuration.san_ip}) elif 'group_target_enabled' not in group_info: - LOG.info(_LI("Group Scoped Target NOT " - "present for group %(group)s: " - "%(ip)s"), + LOG.info("Group Scoped Target NOT " + "present for group %(group)s: " + "%(ip)s", {'group': group_info['name'], 'ip': self.configuration.san_ip}) else: @@ -374,8 +374,8 @@ class NimbleBaseVolumeDriver(san.SanDriver): def extend_volume(self, volume, new_size): """Extend an existing volume.""" volume_name = volume['name'] - LOG.info(_LI('Entering extend_volume volume=%(vol)s ' - 'new_size=%(size)s'), + LOG.info('Entering extend_volume volume=%(vol)s ' + 'new_size=%(size)s', {'vol': volume_name, 'size': new_size}) vol_size = int(new_size) * units.Ki reserve = not self.configuration.san_thin_provision @@ -424,7 +424,7 @@ class NimbleBaseVolumeDriver(san.SanDriver): raise exception.InvalidVolume(reason=msg) new_vol_name = volume['name'] - LOG.info(_LI("Volume status before managing it : %(status)s"), + LOG.info("Volume status before managing it : %(status)s", {'status': vol_info['online']}) if vol_info['online'] is True: msg = (_('Volume %s is online. Set volume to offline for ' @@ -502,9 +502,9 @@ class NimbleBaseVolumeDriver(san.SanDriver): group_info = self.APIExecutor.get_group_info() self._enable_group_scoped_target(group_info) except Exception: - LOG.error(_LE('Failed to create REST client. ' - 'Check san_ip, username, password' - ' and make sure the array version is compatible')) + LOG.error('Failed to create REST client. ' + 'Check san_ip, username, password' + ' and make sure the array version is compatible') raise self._update_existing_vols_agent_type(context) @@ -560,8 +560,8 @@ class NimbleBaseVolumeDriver(san.SanDriver): AGENT_TYPE_OPENSTACK}} self.APIExecutor.edit_vol(vol.name, data) except NimbleAPIException: - LOG.warning(_LW('Error updating agent-type for ' - 'volume %s.'), vol.name) + LOG.warning('Error updating agent-type for ' + 'volume %s.', vol.name) raise def _get_model_info(self, volume_name): @@ -579,8 +579,8 @@ class NimbleBaseVolumeDriver(san.SanDriver): def _create_igroup_for_initiator(self, initiator_name, wwpns): """Creates igroup for an initiator and returns the igroup name.""" igrp_name = 'openstack-' + self._generate_random_string(12) - LOG.info(_LI('Creating initiator group %(grp)s ' - 'with initiator %(iname)s'), + LOG.info('Creating initiator group %(grp)s ' + 'with initiator %(iname)s', {'grp': igrp_name, 'iname': initiator_name}) if self._storage_protocol == "iSCSI": self.APIExecutor.create_initiator_group(igrp_name) @@ -605,12 +605,12 @@ class NimbleBaseVolumeDriver(san.SanDriver): {'initiator': initiator_wwpns, 'wwpns': wwpns_list}) if set(initiator_wwpns) == set(wwpns_list): - LOG.info(_LI('igroup %(grp)s found for ' - 'initiator %(wwpns_list)s'), + LOG.info('igroup %(grp)s found for ' + 'initiator %(wwpns_list)s', {'grp': initiator_group['name'], 'wwpns_list': wwpns_list}) return initiator_group['name'] - LOG.info(_LI('No igroup found for initiators %s'), initiator_wwpns) + LOG.info('No igroup found for initiators %s', initiator_wwpns) return '' def _get_igroupname_for_initiator(self, initiator_name): @@ -620,22 +620,22 @@ class NimbleBaseVolumeDriver(san.SanDriver): if (len(initiator_group['iscsi_initiators']) == 1 and initiator_group['iscsi_initiators'][0]['iqn'] == initiator_name): - LOG.info(_LI('igroup %(grp)s found for ' - 'initiator %(iname)s'), + LOG.info('igroup %(grp)s found for ' + 'initiator %(iname)s', {'grp': initiator_group['name'], 'iname': initiator_name}) return initiator_group['name'] - LOG.info(_LI('No igroup found for initiator %s'), initiator_name) + LOG.info('No igroup found for initiator %s', initiator_name) return '' def get_lun_number(self, volume, initiator_group_name): vol_info = self.APIExecutor.get_vol_info(volume['name']) for acl in vol_info['access_control_records']: if (initiator_group_name == acl['initiator_group_name']): - LOG.info(_LI("access_control_record =%(acl)s"), + LOG.info("access_control_record =%(acl)s", {'acl': acl}) lun = acl['lun'] - LOG.info(_LI("LUN : %(lun)s"), {"lun": lun}) + LOG.info("LUN : %(lun)s", {"lun": lun}) return lun raise NimbleAPIException(_("Lun number not found for volume %(vol)s " "with initiator_group: %(igroup)s") % @@ -663,8 +663,8 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" - LOG.info(_LI('Entering initialize_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s'), + LOG.info('Entering initialize_connection volume=%(vol)s' + ' connector=%(conn)s location=%(loc)s', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) @@ -674,8 +674,8 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): if not initiator_group_name: initiator_group_name = self._create_igroup_for_initiator( initiator_name, None) - LOG.info(_LI('Initiator group name is %(grp)s for initiator ' - '%(iname)s'), + LOG.info('Initiator group name is %(grp)s for initiator ' + '%(iname)s', {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) (iscsi_portal, iqn) = volume['provider_location'].split() @@ -697,8 +697,8 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" - LOG.info(_LI('Entering terminate_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s.'), + LOG.info('Entering terminate_connection volume=%(vol)s' + ' connector=%(conn)s location=%(loc)s.', {'vol': volume['name'], 'conn': connector, 'loc': volume['provider_location']}) @@ -726,7 +726,7 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): target_ipaddr = self._get_discovery_ip(netconfig) iscsi_portal = target_ipaddr + ':3260' provider_location = '%s %s' % (iscsi_portal, iqn) - LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'), + LOG.info('vol_name=%(name)s provider_location=%(loc)s', {'name': volume_name, 'loc': provider_location}) return provider_location @@ -737,24 +737,24 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): {'netlabel': subnet_label, 'netconf': netconfig}) ret_data_ip = '' for subnet in netconfig['array_list'][0]['nic_list']: - LOG.info(_LI('Exploring array subnet label %s'), subnet[ + LOG.info('Exploring array subnet label %s', subnet[ 'subnet_label']) if subnet['data_ip']: if subnet_label == '*': # Use the first data subnet, save mgmt+data for later - LOG.info(_LI('Data ip %(data_ip)s is used ' - 'on data subnet %(net_label)s'), + LOG.info('Data ip %(data_ip)s is used ' + 'on data subnet %(net_label)s', {'data_ip': subnet['data_ip'], 'net_label': subnet['subnet_label']}) return subnet['data_ip'] elif subnet_label == subnet['subnet_label']: - LOG.info(_LI('Data ip %(data_ip)s is used' - ' on subnet %(net_label)s'), + LOG.info('Data ip %(data_ip)s is used' + ' on subnet %(net_label)s', {'data_ip': subnet['data_ip'], 'net_label': subnet['subnet_label']}) return subnet['data_ip'] if ret_data_ip: - LOG.info(_LI('Data ip %s is used on mgmt+data subnet'), + LOG.info('Data ip %s is used on mgmt+data subnet', ret_data_ip) return ret_data_ip else: @@ -767,30 +767,30 @@ class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): {'netlabel': subnet_label, 'netconf': netconfig}) ret_discovery_ip = '' for subnet in netconfig['subnet_list']: - LOG.info(_LI('Exploring array subnet label %s'), subnet['label']) + LOG.info('Exploring array subnet label %s', subnet['label']) if subnet_label == '*': # Use the first data subnet, save mgmt+data for later if subnet['type'] == SM_SUBNET_DATA: - LOG.info(_LI('Discovery ip %(disc_ip)s is used ' - 'on data subnet %(net_label)s'), + LOG.info('Discovery ip %(disc_ip)s is used ' + 'on data subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) return subnet['discovery_ip'] elif (subnet['type'] == SM_SUBNET_MGMT_PLUS_DATA): - LOG.info(_LI('Discovery ip %(disc_ip)s is found' - ' on mgmt+data subnet %(net_label)s'), + LOG.info('Discovery ip %(disc_ip)s is found' + ' on mgmt+data subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) ret_discovery_ip = subnet['discovery_ip'] # If subnet is specified and found, use the subnet elif subnet_label == subnet['label']: - LOG.info(_LI('Discovery ip %(disc_ip)s is used' - ' on subnet %(net_label)s'), + LOG.info('Discovery ip %(disc_ip)s is used' + ' on subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) return subnet['discovery_ip'] if ret_discovery_ip: - LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'), + LOG.info('Discovery ip %s is used on mgmt+data subnet', ret_discovery_ip) return ret_discovery_ip else: @@ -811,7 +811,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): netconfig = self.APIExecutor.get_netconfig('active') array_name = netconfig['group_leader_array'] provider_location = '%s' % (array_name) - LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'), + LOG.info('vol_name=%(name)s provider_location=%(loc)s', {'name': volume_name, 'loc': provider_location}) return provider_location @@ -828,7 +828,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): connector['wwpns'], target_wwns) map_fabric = dev_map - LOG.info(_LI("dev_map =%(fabric)s"), {'fabric': map_fabric}) + LOG.info("dev_map =%(fabric)s", {'fabric': map_fabric}) for fabric_name in dev_map: fabric = dev_map[fabric_name] @@ -846,8 +846,8 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): @fczm_utils.add_fc_zone def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" - LOG.info(_LI('Entering initialize_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s'), + LOG.info('Entering initialize_connection volume=%(vol)s' + ' connector=%(conn)s location=%(loc)s', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) @@ -861,8 +861,8 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): initiator_group_name = self._create_igroup_for_initiator( initiator_name, wwpns) - LOG.info(_LI('Initiator group name is %(grp)s for initiator ' - '%(iname)s'), + LOG.info('Initiator group name is %(grp)s for initiator ' + '%(iname)s', {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) lun = self.get_lun_number(volume, initiator_group_name) @@ -880,7 +880,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} - LOG.info(_LI("Return FC data for zone addition: %(data)s."), + LOG.info("Return FC data for zone addition: %(data)s.", {'data': data}) return data @@ -888,8 +888,8 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): @fczm_utils.remove_fc_zone def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" - LOG.info(_LI('Entering terminate_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s.'), + LOG.info('Entering terminate_connection volume=%(vol)s' + ' connector=%(conn)s location=%(loc)s.', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) @@ -924,7 +924,7 @@ class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): LOG.debug("get_wwpns_from_array %s" % array_name) target_wwpns = [] interface_info = self.APIExecutor.get_fc_interface_list(array_name) - LOG.info(_LI("interface_info %(interface_info)s"), + LOG.info("interface_info %(interface_info)s", {"interface_info": interface_info}) for wwpn_list in interface_info: wwpn = wwpn_list['wwpn'] @@ -947,12 +947,12 @@ def _connection_checker(func): except Exception as e: if attempts < 1 and (re.search("Failed to execute", six.text_type(e))): - LOG.info(_LI('Session might have expired.' - ' Trying to relogin')) + LOG.info('Session might have expired.' + ' Trying to relogin') self.login() continue else: - LOG.error(_LE('Re-throwing Exception %s'), e) + LOG.error('Re-throwing Exception %s', e) raise return inner_connection_checker @@ -1077,7 +1077,7 @@ class NimbleRestAPIExecutor(object): def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled): response = self._execute_create_vol(volume, pool_name, reserve, protocol, is_gst_enabled) - LOG.info(_LI('Successfully created volume %(name)s'), + LOG.info('Successfully created volume %(name)s', {'name': response['name']}) return response['name'] @@ -1278,7 +1278,7 @@ class NimbleRestAPIExecutor(object): if 'data' not in r.json(): raise NimbleAPIException(_("Unable to retrieve initiator group " "list")) - LOG.info(_LI('Successfully retrieved InitiatorGrpList')) + LOG.info('Successfully retrieved InitiatorGrpList') return r.json()['data'] def get_initiator_grp_id_by_name(self, initiator_group_name): @@ -1322,7 +1322,7 @@ class NimbleRestAPIExecutor(object): except NimbleAPIException as ex: LOG.debug("add_acl_exception: %s", ex) if SM_OBJ_EXIST_MSG in six.text_type(ex): - LOG.warning(_LW('Volume %(vol)s : %(state)s'), + LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_EXIST_MSG}) else: @@ -1343,8 +1343,8 @@ class NimbleRestAPIExecutor(object): return r.json()['data'][0] def remove_acl(self, volume, initiator_group_name): - LOG.info(_LI("removing ACL from volume=%(vol)s" - "and %(igroup)s"), + LOG.info("removing ACL from volume=%(vol)s" + "and %(igroup)s", {"vol": volume['name'], "igroup": initiator_group_name}) initiator_group_id = self.get_initiator_grp_id_by_name( @@ -1360,7 +1360,7 @@ class NimbleRestAPIExecutor(object): except NimbleAPIException as ex: LOG.debug("remove_acl_exception: %s", ex) if SM_OBJ_ENOENT_MSG in six.text_type(ex): - LOG.warning(_LW('Volume %(vol)s : %(state)s'), + LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_ENOENT_MSG}) else: @@ -1416,7 +1416,7 @@ class NimbleRestAPIExecutor(object): msg = (_("Error %s") % ex) LOG.debug("online_vol_exception: %s" % msg) if msg.__contains__("Object is %s" % SM_STATE_MSG): - LOG.warning(_LW('Volume %(vol)s : %(state)s'), + LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume_name, 'state': SM_STATE_MSG}) # TODO(rkumar): Check if we need to ignore the connected @@ -1439,7 +1439,7 @@ class NimbleRestAPIExecutor(object): except Exception as ex: LOG.debug("online_snap_exception: %s" % ex) if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG): - LOG.warning(_LW('Snapshot %(snap)s :%(state)s'), + LOG.warning('Snapshot %(snap)s :%(state)s', {'snap': snap_name, 'state': SM_STATE_MSG}) else: @@ -1508,12 +1508,12 @@ class NimbleRestAPIExecutor(object): else: agent_type = AGENT_TYPE_OPENSTACK - LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s ' - 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s ' - 'reserve=%(reserve)s' 'agent-type=%(agent-type)s ' - 'perfpol-name=%(perfpol-name)s ' - 'encryption=%(encryption)s cipher=%(cipher)s ' - 'multi-initiator=%(multi-initiator)s'), + LOG.info('Cloning volume from snapshot volume=%(vol)s ' + 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s ' + 'reserve=%(reserve)s' 'agent-type=%(agent-type)s ' + 'perfpol-name=%(perfpol-name)s ' + 'encryption=%(encryption)s cipher=%(cipher)s ' + 'multi-initiator=%(multi-initiator)s', {'vol': volume_name, 'snap': snap_name, 'clone': clone_name, diff --git a/cinder/volume/drivers/prophetstor/dpl_fc.py b/cinder/volume/drivers/prophetstor/dpl_fc.py index 742f0a443a6..ca9450be341 100644 --- a/cinder/volume/drivers/prophetstor/dpl_fc.py +++ b/cinder/volume/drivers/prophetstor/dpl_fc.py @@ -18,7 +18,7 @@ import errno from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.prophetstor import dplcommon @@ -58,8 +58,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, if fcInfo['type'] == 'fc': fcInfos[fcUuid] = fcInfo except Exception as e: - LOG.error(_LE("Failed to get fiber channel info from storage " - "due to %(stat)s"), {'stat': e}) + LOG.error("Failed to get fiber channel info from storage " + "due to %(stat)s", {'stat': e}) return fcInfos def _get_targets(self): @@ -83,8 +83,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, targetInfos[str(targetInfo[0])] = targetI except Exception as e: targetInfos = {} - LOG.error(_LE("Failed to get fiber channel target from " - "storage server due to %(stat)s"), + LOG.error("Failed to get fiber channel target from " + "storage server due to %(stat)s", {'stat': e}) return targetInfos @@ -101,8 +101,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, targetWwpns = fc_info.get('target_identifier', '') lstargetWwpns.append(targetWwpns) except Exception as e: - LOG.error(_LE("Failed to get target wwpns from storage due " - "to %(stat)s"), {'stat': e}) + LOG.error("Failed to get target wwpns from storage due " + "to %(stat)s", {'stat': e}) lstargetWwpns = [] return lstargetWwpns @@ -119,7 +119,7 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, fActive = True break except Exception: - LOG.error(_LE('Failed to get sns table')) + LOG.error('Failed to get sns table') return fActive def _convertHex2String(self, wwpns): @@ -147,8 +147,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, self._conver_uuid2hex(volumeid), targetwwpns, initiatorwwpns, volumename) except Exception: - LOG.error(_LE('Volume %(volumeid)s failed to send assign command, ' - 'ret: %(status)s output: %(output)s'), + LOG.error('Volume %(volumeid)s failed to send assign command, ' + 'ret: %(status)s output: %(output)s', {'volumeid': volumeid, 'status': ret, 'output': output}) ret = errno.EFAULT @@ -204,7 +204,7 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, '%(status)s.') % {'id': volumeid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI('Flexvisor succeeded to unassign volume %(id)s.'), + LOG.info('Flexvisor succeeded to unassign volume %(id)s.', {'id': volumeid}) return ret @@ -238,8 +238,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, ret = 0 targetIdentifier = [] szwwpns = [] - LOG.info(_LI('initialize_connection volume: %(volume)s, connector:' - ' %(connector)s'), + LOG.info('initialize_connection volume: %(volume)s, connector:' + ' %(connector)s', {"volume": volume, "connector": connector}) # Get Storage Fiber channel controller dc_fc = self._get_fc_channel() @@ -274,7 +274,7 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, {}).get('targetAddr', '') lsTargetWwpn.append(targetWwpn) # Use wwpns to assign volume. - LOG.info(_LI('Prefer use target wwpn %(wwpn)s'), + LOG.info('Prefer use target wwpn %(wwpn)s', {'wwpn': lsTargetWwpn}) # Start to create export in all FC target node. assignedTarget = [] @@ -287,8 +287,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, else: assignedTarget.append(pTarget) except Exception as e: - LOG.error(_LE('Failed to export fiber channel target ' - 'due to %s'), e) + LOG.error('Failed to export fiber channel target ' + 'due to %s', e) ret = errno.EFAULT break if ret == 0: @@ -326,16 +326,16 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, properties['target_lun'] = int(nLun) properties['volume_id'] = volume['id'] properties['initiator_target_map'] = init_targ_map - LOG.info(_LI('%(volume)s assign type fibre_channel, properties ' - '%(properties)s'), + LOG.info('%(volume)s assign type fibre_channel, properties ' + '%(properties)s', {'volume': volume['id'], 'properties': properties}) else: msg = _('Invalid connection initialization response of ' 'volume %(name)s') % {'name': volume['name']} raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI('Connect initialization info: ' - '{driver_volume_type: fibre_channel, ' - 'data: %(properties)s'), {'properties': properties}) + LOG.info('Connect initialization info: ' + '{driver_volume_type: fibre_channel, ' + 'data: %(properties)s', {'properties': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties} @@ -354,8 +354,8 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver, szwwpns = [] ret = 0 info = {'driver_volume_type': 'fibre_channel', 'data': {}} - LOG.info(_LI('terminate_connection volume: %(volume)s, ' - 'connector: %(con)s'), + LOG.info('terminate_connection volume: %(volume)s, ' + 'connector: %(con)s', {'volume': volume, 'con': connector}) # Query targetwwpns. # Get all target list of volume. diff --git a/cinder/volume/drivers/prophetstor/dpl_iscsi.py b/cinder/volume/drivers/prophetstor/dpl_iscsi.py index a726f3b581d..cf86b2abf74 100644 --- a/cinder/volume/drivers/prophetstor/dpl_iscsi.py +++ b/cinder/volume/drivers/prophetstor/dpl_iscsi.py @@ -18,7 +18,7 @@ import errno from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder import interface import cinder.volume.driver from cinder.volume.drivers.prophetstor import dplcommon @@ -133,8 +133,8 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: - LOG.info(_LI('Flexvisor already unassigned volume ' - '%(id)s.'), {'id': volume['id']}) + LOG.info('Flexvisor already unassigned volume %(id)s.', + {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to unassign volume:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} @@ -152,6 +152,5 @@ class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, (backend_name or 'DPLISCSIDriver') self._stats = data except Exception as exc: - LOG.warning(_LW('Cannot get volume status ' - '%(exc)s.'), {'exc': exc}) + LOG.warning('Cannot get volume status %(exc)s.', {'exc': exc}) return self._stats diff --git a/cinder/volume/drivers/prophetstor/dplcommon.py b/cinder/volume/drivers/prophetstor/dplcommon.py index 6bd14d202ef..379a2db5e56 100644 --- a/cinder/volume/drivers/prophetstor/dplcommon.py +++ b/cinder/volume/drivers/prophetstor/dplcommon.py @@ -33,7 +33,7 @@ import six from six.moves import http_client from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import driver @@ -94,8 +94,8 @@ class DPLCommand(object): payload = json.dumps(params, ensure_ascii=False) payload.encode('utf-8') except Exception as e: - LOG.error(_LE('JSON encode params %(param)s error:' - ' %(status)s.'), {'param': params, 'status': e}) + LOG.error('JSON encode params %(param)s error:' + ' %(status)s.', {'param': params, 'status': e}) retcode = errno.EINVAL for i in range(CONNECTION_RETRY): try: @@ -106,11 +106,11 @@ class DPLCommand(object): retcode = 0 break except IOError as ioerr: - LOG.error(_LE('Connect to Flexvisor error: %s.'), + LOG.error('Connect to Flexvisor error: %s.', ioerr) retcode = errno.ENOTCONN except Exception as e: - LOG.error(_LE('Connect to Flexvisor failed: %s.'), + LOG.error('Connect to Flexvisor failed: %s.', e) retcode = errno.EFAULT @@ -134,7 +134,7 @@ class DPLCommand(object): retcode = errno.ENOTCONN continue except Exception as e: - LOG.error(_LE('Failed to send request: %s.'), + LOG.error('Failed to send request: %s.', e) retcode = errno.EFAULT break @@ -143,7 +143,7 @@ class DPLCommand(object): try: response = connection.getresponse() if response.status == http_client.SERVICE_UNAVAILABLE: - LOG.error(_LE('The Flexvisor service is unavailable.')) + LOG.error('The Flexvisor service is unavailable.') time.sleep(1) retry -= 1 retcode = errno.ENOPROTOOPT @@ -157,7 +157,7 @@ class DPLCommand(object): retcode = errno.EFAULT continue except Exception as e: - LOG.error(_LE('Failed to get response: %s.'), + LOG.error('Failed to get response: %s.', e) retcode = errno.EFAULT break @@ -166,8 +166,8 @@ class DPLCommand(object): and response.status == http_client.NOT_FOUND): retcode = errno.ENODATA elif retcode == 0 and response.status not in expected_status: - LOG.error(_LE('%(method)s %(url)s unexpected response status: ' - '%(response)s (expects: %(expects)s).'), + LOG.error('%(method)s %(url)s unexpected response status: ' + '%(response)s (expects: %(expects)s).', {'method': method, 'url': url, 'response': http_client.responses[response.status], @@ -184,11 +184,11 @@ class DPLCommand(object): data = response.read() data = json.loads(data) except (TypeError, ValueError) as e: - LOG.error(_LE('Call to json.loads() raised an exception: %s.'), + LOG.error('Call to json.loads() raised an exception: %s.', e) retcode = errno.ENOEXEC except Exception as e: - LOG.error(_LE('Read response raised an exception: %s.'), + LOG.error('Read response raised an exception: %s.', e) retcode = errno.ENOEXEC elif (retcode == 0 and @@ -198,11 +198,11 @@ class DPLCommand(object): data = response.read() data = json.loads(data) except (TypeError, ValueError) as e: - LOG.error(_LE('Call to json.loads() raised an exception: %s.'), + LOG.error('Call to json.loads() raised an exception: %s.', e) retcode = errno.ENOEXEC except Exception as e: - LOG.error(_LE('Read response raised an exception: %s.'), + LOG.error('Read response raised an exception: %s.', e) retcode = errno.ENOEXEC @@ -782,8 +782,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, continue except Exception as e: - LOG.error(_LE('Flexvisor failed to get event %(volume)s ' - '(%(status)s).'), + LOG.error('Flexvisor failed to get event %(volume)s ' + '(%(status)s).', {'volume': eventid, 'status': e}) raise loopingcall.LoopingCallDone(retvalue=False) @@ -810,8 +810,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI('Flexvisor succeeded to add volume %(id)s to ' - 'group %(cgid)s.'), + LOG.info('Flexvisor succeeded to add volume %(id)s to ' + 'group %(cgid)s.', {'id': volume['id'], 'cgid': cgId}) def _leave_volume_group(self, volume, cgId): @@ -833,8 +833,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI('Flexvisor succeeded to remove volume %(id)s from ' - 'group %(cgid)s.'), + LOG.info('Flexvisor succeeded to remove volume %(id)s from ' + 'group %(cgid)s.', {'id': volume['id'], 'cgid': cgId}) def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID): @@ -868,8 +868,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" - LOG.info(_LI('Start to create consistency group: %(group_name)s ' - 'id: %(id)s'), + LOG.info('Start to create consistency group: %(group_name)s ' + 'id: %(id)s', {'group_name': group['name'], 'id': group['id']}) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} try: @@ -897,7 +897,7 @@ class DPLCOMMONDriver(driver.CloneableImageVD, context, group['id']) model_update = {} model_update['status'] = group['status'] - LOG.info(_LI('Start to delete consistency group: %(cg_name)s'), + LOG.info('Start to delete consistency group: %(cg_name)s', {'cg_name': group['id']}) try: self.dpl.delete_vg(self._conver_uuid2hex(group['id'])) @@ -925,8 +925,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, snapshots = objects.SnapshotList().get_all_for_cgsnapshot( context, cgsnapshot['id']) model_update = {} - LOG.info(_LI('Start to create cgsnapshot for consistency group' - ': %(group_name)s'), + LOG.info('Start to create cgsnapshot for consistency group' + ': %(group_name)s', {'group_name': cgsnapshot['consistencygroup_id']}) try: self.dpl.create_vdev_snapshot( @@ -953,8 +953,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, context, cgsnapshot['id']) model_update = {} model_update['status'] = cgsnapshot['status'] - LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: ' - '%(group_name)s'), + LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' + '%(group_name)s', {'snap_name': cgsnapshot['id'], 'group_name': cgsnapshot['consistencygroup_id']}) try: @@ -1058,7 +1058,7 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException( data=msg) else: - LOG.info(_LI('Flexvisor succeeded to create volume %(id)s.'), + LOG.info('Flexvisor succeeded to create volume %(id)s.', {'id': volume['id']}) if volume.get('consistencygroup_id', None): @@ -1141,8 +1141,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException( data=msg) else: - LOG.info(_LI('Flexvisor succeeded to create volume %(id)s ' - 'from snapshot.'), {'id': volume['id']}) + LOG.info('Flexvisor succeeded to create volume %(id)s ' + 'from snapshot.', {'id': volume['id']}) if volume['size'] > snapshot['size']: self.extend_volume(volume, volume['size']) @@ -1188,8 +1188,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException( data=msg) else: - LOG.info(_LI('Flexvisor succeeded to create volume %(id)s ' - 'from snapshot.'), {'id': volume['id']}) + LOG.info('Flexvisor succeeded to create volume %(id)s ' + 'from snapshot.', {'id': volume['id']}) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" @@ -1232,7 +1232,7 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException( data=msg) else: - LOG.info(_LI('Flexvisor succeeded to clone volume %(id)s.'), + LOG.info('Flexvisor succeeded to clone volume %(id)s.', {'id': volume['id']}) if volume.get('consistencygroup_id', None): @@ -1256,13 +1256,13 @@ class DPLCOMMONDriver(driver.CloneableImageVD, self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(volume['consistencygroup_id'])) if ret: - LOG.warning(_LW('Flexvisor failed to delete volume ' - '%(id)s from the group %(vgid)s.'), + LOG.warning('Flexvisor failed to delete volume ' + '%(id)s from the group %(vgid)s.', {'id': volume['id'], 'vgid': volume['consistencygroup_id']}) except Exception as e: - LOG.warning(_LW('Flexvisor failed to delete volume %(id)s ' - 'from group %(vgid)s due to %(status)s.'), + LOG.warning('Flexvisor failed to delete volume %(id)s ' + 'from group %(vgid)s due to %(status)s.', {'id': volume['id'], 'vgid': volume['consistencygroup_id'], 'status': e}) @@ -1279,8 +1279,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: ret = 0 - LOG.info(_LI('Flexvisor volume %(id)s does not ' - 'exist.'), {'id': volume['id']}) + LOG.info('Flexvisor volume %(id)s does not ' + 'exist.', {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to delete volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': ret} @@ -1317,8 +1317,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, raise exception.VolumeBackendAPIException( data=msg) else: - LOG.info(_LI('Flexvisor succeeded to extend volume' - ' %(id)s.'), {'id': volume['id']}) + LOG.info('Flexvisor succeeded to extend volume' + ' %(id)s.', {'id': volume['id']}) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -1371,14 +1371,14 @@ class DPLCOMMONDriver(driver.CloneableImageVD, 'get event) %(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: - LOG.info(_LI('Flexvisor snapshot %(id)s not existed.'), + LOG.info('Flexvisor snapshot %(id)s not existed.', {'id': snapshot['id']}) elif ret != 0: msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: - LOG.info(_LI('Flexvisor succeeded to delete snapshot %(id)s.'), + LOG.info('Flexvisor succeeded to delete snapshot %(id)s.', {'id': snapshot['id']}) def get_volume_stats(self, refresh=False): @@ -1405,11 +1405,11 @@ class DPLCOMMONDriver(driver.CloneableImageVD, for poolUuid, poolName in output.get('children', []): qpools.append(poolUuid) else: - LOG.error(_LE("Flexvisor failed to get pool list." - "(Error: %d)"), ret) + LOG.error("Flexvisor failed to get pool list." + " (Error: %d)", ret) except Exception as e: - LOG.error(_LE("Flexvisor failed to get pool list due to " - "%s."), e) + LOG.error("Flexvisor failed to get pool list due to " + "%s.", e) # Query pool detail information for poolid in qpools: @@ -1430,8 +1430,8 @@ class DPLCOMMONDriver(driver.CloneableImageVD, pool['reserved_percentage'] = 0 pools.append(pool) else: - LOG.warning(_LW("Failed to query pool %(id)s status " - "%(ret)d."), {'id': poolid, 'ret': ret}) + LOG.warning("Failed to query pool %(id)s status " + "%(ret)d.", {'id': poolid, 'ret': ret}) continue return pools @@ -1460,14 +1460,14 @@ class DPLCOMMONDriver(driver.CloneableImageVD, data['pools'] = pools self._stats = data except Exception as e: - LOG.error(_LE('Failed to get server info due to ' - '%(state)s.'), {'state': e}) + LOG.error('Failed to get server info due to ' + '%(state)s.', {'state': e}) return self._stats def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.context = context - LOG.info(_LI('Activate Flexvisor cinder volume driver.')) + LOG.info('Activate Flexvisor cinder volume driver.') def check_for_setup_error(self): """Check DPL can connect properly.""" @@ -1489,7 +1489,7 @@ class DPLCOMMONDriver(driver.CloneableImageVD, ret = 0 output = status.get('output', {}) else: - LOG.error(_LE('Flexvisor failed to get pool %(id)s info.'), + LOG.error('Flexvisor failed to get pool %(id)s info.', {'id': poolid}) raise exception.VolumeBackendAPIException( data="failed to get event") diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index 505da982a27..09d38588578 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -32,7 +32,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder import utils @@ -325,8 +325,8 @@ class PureBaseVolumeDriver(san.SanDriver): ERR_MSG_ALREADY_BELONGS in err.text): # Happens if the volume already added to PG. ctxt.reraise = False - LOG.warning(_LW("Adding Volume to Protection Group " - "failed with message: %s"), err.text) + LOG.warning("Adding Volume to Protection Group " + "failed with message: %s", err.text) @pure_driver_debug_trace def create_cloned_volume(self, volume, src_vref): @@ -372,7 +372,7 @@ class PureBaseVolumeDriver(san.SanDriver): ERR_MSG_NOT_EXIST in err.text): # Happens if the volume does not exist. ctxt.reraise = False - LOG.warning(_LW("Volume deletion failed with message: %s"), + LOG.warning("Volume deletion failed with message: %s", err.text) @pure_driver_debug_trace @@ -404,8 +404,8 @@ class PureBaseVolumeDriver(san.SanDriver): ERR_MSG_PENDING_ERADICATION in err.text): # Happens if the snapshot does not exist. ctxt.reraise = False - LOG.warning(_LW("Unable to delete snapshot, assuming " - "already deleted. Error: %s"), err.text) + LOG.warning("Unable to delete snapshot, assuming " + "already deleted. Error: %s", err.text) def ensure_export(self, context, volume): pass @@ -434,8 +434,8 @@ class PureBaseVolumeDriver(san.SanDriver): host_name = host["name"] result = self._disconnect_host(array, host_name, vol_name) else: - LOG.error(_LE("Unable to disconnect host from volume, could not " - "determine Purity host")) + LOG.error("Unable to disconnect host from volume, could not " + "determine Purity host") result = False return result @@ -457,8 +457,8 @@ class PureBaseVolumeDriver(san.SanDriver): if err.code == 400 and ERR_MSG_NOT_CONNECTED in err.text: # Happens if the host and volume are not connected. ctxt.reraise = False - LOG.error(_LE("Disconnection failed with message: " - "%(msg)s."), {"msg": err.text}) + LOG.error("Disconnection failed with message: " + "%(msg)s.", {"msg": err.text}) connections = None try: connections = array.list_host_connections(host_name, private=True) @@ -471,7 +471,7 @@ class PureBaseVolumeDriver(san.SanDriver): host_still_used = bool(connections) if GENERATED_NAME.match(host_name) and not host_still_used: - LOG.info(_LI("Attempting to delete unneeded host %(host_name)r."), + LOG.info("Attempting to delete unneeded host %(host_name)r.", {"host_name": host_name}) try: array.delete_host(host_name) @@ -708,7 +708,7 @@ class PureBaseVolumeDriver(san.SanDriver): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False - LOG.warning(_LW("Unable to delete Protection Group: %s"), + LOG.warning("Unable to delete Protection Group: %s", err.text) for volume in volumes: @@ -765,8 +765,8 @@ class PureBaseVolumeDriver(san.SanDriver): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False - LOG.warning(_LW("Unable to delete Protection Group " - "Snapshot: %s"), err.text) + LOG.warning("Unable to delete Protection Group " + "Snapshot: %s", err.text) @pure_driver_debug_trace def delete_cgsnapshot(self, context, cgsnapshot, snapshots): @@ -964,7 +964,7 @@ class PureBaseVolumeDriver(san.SanDriver): "from existing hosts before importing" ) % {'driver': self.__class__.__name__}) new_vol_name = self._get_vol_name(volume) - LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"), + LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", {"ref_name": ref_vol_name, "new_name": new_vol_name}) self._rename_volume_object(ref_vol_name, new_vol_name, @@ -996,8 +996,8 @@ class PureBaseVolumeDriver(san.SanDriver): if (err.code == 400 and ERR_MSG_NOT_EXIST in err.text): ctxt.reraise = raise_not_exist - LOG.warning(_LW("Unable to rename %(old_name)s, error " - "message: %(error)s"), + LOG.warning("Unable to rename %(old_name)s, error " + "message: %(error)s", {"old_name": old_name, "error": err.text}) return new_name @@ -1012,7 +1012,7 @@ class PureBaseVolumeDriver(san.SanDriver): vol_name = self._get_vol_name(volume) unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX - LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"), + LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", {"ref_name": vol_name, "new_name": unmanaged_vol_name}) self._rename_volume_object(vol_name, unmanaged_vol_name) @@ -1038,9 +1038,9 @@ class PureBaseVolumeDriver(san.SanDriver): self._validate_manage_existing_ref(existing_ref, is_snap=True) ref_snap_name = existing_ref['name'] new_snap_name = self._get_snap_name(snapshot) - LOG.info(_LI("Renaming existing snapshot %(ref_name)s to " - "%(new_name)s"), {"ref_name": ref_snap_name, - "new_name": new_snap_name}) + LOG.info("Renaming existing snapshot %(ref_name)s to " + "%(new_name)s", {"ref_name": ref_snap_name, + "new_name": new_snap_name}) self._rename_volume_object(ref_snap_name, new_snap_name, raise_not_exist=True) @@ -1069,9 +1069,9 @@ class PureBaseVolumeDriver(san.SanDriver): self._verify_manage_snap_api_requirements() snap_name = self._get_snap_name(snapshot) unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX - LOG.info(_LI("Renaming existing snapshot %(ref_name)s to " - "%(new_name)s"), {"ref_name": snap_name, - "new_name": unmanaged_snap_name}) + LOG.info("Renaming existing snapshot %(ref_name)s to " + "%(new_name)s", {"ref_name": snap_name, + "new_name": unmanaged_snap_name}) self._rename_volume_object(snap_name, unmanaged_snap_name) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, @@ -1340,11 +1340,11 @@ class PureBaseVolumeDriver(san.SanDriver): if (err.code == 400 and ERR_MSG_COULD_NOT_BE_FOUND in err.text): ctxt.reraise = False - LOG.warning(_LW("Disable replication on volume failed: " - "already disabled: %s"), err.text) + LOG.warning("Disable replication on volume failed: " + "already disabled: %s", err.text) else: - LOG.error(_LE("Disable replication on volume failed with " - "message: %s"), err.text) + LOG.error("Disable replication on volume failed with " + "message: %s", err.text) @pure_driver_debug_trace def failover_host(self, context, volumes, secondary_id=None): @@ -1508,9 +1508,9 @@ class PureBaseVolumeDriver(san.SanDriver): ERR_MSG_ALREADY_INCLUDES in err.text): ctxt.reraise = False - LOG.info(_LI("Skipping add target %(target_array)s" - " to protection group %(pgname)s" - " since it's already added."), + LOG.info("Skipping add target %(target_array)s" + " to protection group %(pgname)s" + " since it's already added.", {"target_array": target_array.array_name, "pgname": pg_name}) @@ -1532,9 +1532,9 @@ class PureBaseVolumeDriver(san.SanDriver): if (err.code == 400 and ERR_MSG_ALREADY_ALLOWED in err.text): ctxt.reraise = False - LOG.info(_LI("Skipping allow pgroup %(pgname)s on " - "target array %(target_array)s since " - "it is already allowed."), + LOG.info("Skipping allow pgroup %(pgname)s on " + "target array %(target_array)s since " + "it is already allowed.", {"pgname": pg_name, "target_array": target_array.array_name}) @@ -1604,16 +1604,16 @@ class PureBaseVolumeDriver(san.SanDriver): if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text: # Happens if the PG already exists ctxt.reraise = False - LOG.warning(_LW("Skipping creation of PG %s since it " - "already exists."), pgname) + LOG.warning("Skipping creation of PG %s since it " + "already exists.", pgname) # We assume PG has already been setup with correct # replication settings. return if err.code == 400 and ( ERR_MSG_PENDING_ERADICATION in err.text): ctxt.reraise = False - LOG.warning(_LW("Protection group %s is deleted but not" - " eradicated - will recreate."), pgname) + LOG.warning("Protection group %s is deleted but not" + " eradicated - will recreate.", pgname) source_array.eradicate_pgroup(pgname) source_array.create_pgroup(pgname) @@ -1668,8 +1668,8 @@ class PureBaseVolumeDriver(san.SanDriver): if pg_snap: break except Exception: - LOG.exception(_LE('Error finding replicated pg snapshot ' - 'on %(secondary)s.'), + LOG.exception('Error finding replicated pg snapshot ' + 'on %(secondary)s.', {'secondary': array._backend_id}) if not secondary_array: @@ -1822,31 +1822,31 @@ class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): if host: host_name = host["name"] - LOG.info(_LI("Re-using existing purity host %(host_name)r"), + LOG.info("Re-using existing purity host %(host_name)r", {"host_name": host_name}) if self.configuration.use_chap_auth: if not GENERATED_NAME.match(host_name): - LOG.error(_LE("Purity host %(host_name)s is not managed " - "by Cinder and can't have CHAP credentials " - "modified. Remove IQN %(iqn)s from the host " - "to resolve this issue."), + LOG.error("Purity host %(host_name)s is not managed " + "by Cinder and can't have CHAP credentials " + "modified. Remove IQN %(iqn)s from the host " + "to resolve this issue.", {"host_name": host_name, "iqn": connector["initiator"]}) raise exception.PureDriverException( reason=_("Unable to re-use a host that is not " "managed by Cinder with use_chap_auth=True,")) elif chap_username is None or chap_password is None: - LOG.error(_LE("Purity host %(host_name)s is managed by " - "Cinder but CHAP credentials could not be " - "retrieved from the Cinder database."), + LOG.error("Purity host %(host_name)s is managed by " + "Cinder but CHAP credentials could not be " + "retrieved from the Cinder database.", {"host_name": host_name}) raise exception.PureDriverException( reason=_("Unable to re-use host with unknown CHAP " "credentials configured.")) else: host_name = self._generate_purity_host_name(connector["host"]) - LOG.info(_LI("Creating host object %(host_name)r with IQN:" - " %(iqn)s."), {"host_name": host_name, "iqn": iqn}) + LOG.info("Creating host object %(host_name)r with IQN:" + " %(iqn)s.", {"host_name": host_name, "iqn": iqn}) try: current_array.create_host(host_name, iqnlist=[iqn]) except purestorage.PureHTTPError as err: @@ -1947,12 +1947,12 @@ class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): if host: host_name = host["name"] - LOG.info(_LI("Re-using existing purity host %(host_name)r"), + LOG.info("Re-using existing purity host %(host_name)r", {"host_name": host_name}) else: host_name = self._generate_purity_host_name(connector["host"]) - LOG.info(_LI("Creating host object %(host_name)r with WWN:" - " %(wwn)s."), {"host_name": host_name, "wwn": wwns}) + LOG.info("Creating host object %(host_name)r with WWN:" + " %(wwn)s.", {"host_name": host_name, "wwn": wwns}) try: current_array.create_host(host_name, wwnlist=wwns) except purestorage.PureHTTPError as err: diff --git a/cinder/volume/drivers/qnap.py b/cinder/volume/drivers/qnap.py index 19ff4632904..7f500cc7893 100644 --- a/cinder/volume/drivers/qnap.py +++ b/cinder/volume/drivers/qnap.py @@ -36,7 +36,7 @@ from six.moves import http_client from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.san import san @@ -99,9 +99,9 @@ class QnapISCSIDriver(san.SanISCSIDriver): try: self.api_executor = self.creat_api_executor() except Exception: - LOG.error(_LE('Failed to create HTTP client. ' - 'Check ip, port, username, password' - ' and make sure the array version is compatible')) + LOG.error('Failed to create HTTP client. ' + 'Check ip, port, username, password' + ' and make sure the array version is compatible') msg = _('Failed to create HTTP client.') raise exception.VolumeDriverException(message=msg) @@ -774,7 +774,7 @@ def _connection_checker(func): self._login() continue - LOG.error(_LE('Re-throwing Exception %s'), e) + LOG.error('Re-throwing Exception %s', e) raise return inner_connection_checker diff --git a/cinder/volume/drivers/quobyte.py b/cinder/volume/drivers/quobyte.py index c671709987e..4b76dfdd558 100644 --- a/cinder/volume/drivers/quobyte.py +++ b/cinder/volume/drivers/quobyte.py @@ -25,7 +25,7 @@ from oslo_utils import fileutils from cinder import compute from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils @@ -153,20 +153,20 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): self.configuration.nas_secure_file_permissions = 'true' if self.configuration.nas_secure_file_operations == 'false': - LOG.warning(_LW("The NAS file operations will be run as " - "root, allowing root level access at the storage " - "backend.")) + LOG.warning("The NAS file operations will be run as " + "root, allowing root level access at the storage " + "backend.") self._execute_as_root = True else: - LOG.info(_LI("The NAS file operations will be run as" - " non privileged user in secure mode. Please" - " ensure your libvirtd settings have been configured" - " accordingly (see section 'OpenStack' in the Quobyte" - " Manual.")) + LOG.info("The NAS file operations will be run as" + " non privileged user in secure mode. Please" + " ensure your libvirtd settings have been configured" + " accordingly (see section 'OpenStack' in the Quobyte" + " Manual.") if self.configuration.nas_secure_file_permissions == 'false': - LOG.warning(_LW("The NAS file permissions mode will be 666 " - "(allowing other/world read & write access).")) + LOG.warning("The NAS file permissions mode will be 666 " + "(allowing other/world read & write access).") def _qemu_img_info(self, path, volume_name): return super(QuobyteDriver, self)._qemu_img_info_base( @@ -230,8 +230,8 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): """Deletes a logical volume.""" if not volume.provider_location: - LOG.warning(_LW('Volume %s does not have provider_location ' - 'specified, skipping'), volume.name) + LOG.warning('Volume %s does not have provider_location ' + 'specified, skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) @@ -377,7 +377,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): self._ensure_share_mounted(share) self._mounted_shares.append(share) except Exception as exc: - LOG.warning(_LW('Exception during mounting %s'), exc) + LOG.warning('Exception during mounting %s', exc) LOG.debug('Available shares %s', self._mounted_shares) @@ -432,17 +432,17 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): if exc.errno == errno.ENOTCONN: mounted = False try: - LOG.info(_LI('Fixing previous mount %s which was not' - ' unmounted correctly.'), mount_path) + LOG.info('Fixing previous mount %s which was not' + ' unmounted correctly.', mount_path) self._execute('umount.quobyte', mount_path, run_as_root=self._execute_as_root) except processutils.ProcessExecutionError as exc: - LOG.warning(_LW("Failed to unmount previous mount: " - "%s"), exc) + LOG.warning("Failed to unmount previous mount: " + "%s", exc) else: # TODO(quobyte): Extend exc analysis in here? - LOG.warning(_LW("Unknown error occurred while checking " - "mount point: %s Trying to continue."), + LOG.warning("Unknown error occurred while checking " + "mount point: %s Trying to continue.", exc) if not mounted: @@ -454,13 +454,13 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): command.extend(['-c', self.configuration.quobyte_client_cfg]) try: - LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume) + LOG.info('Mounting volume: %s ...', quobyte_volume) self._execute(*command, run_as_root=self._execute_as_root) - LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume) + LOG.info('Mounting volume: %s succeeded', quobyte_volume) mounted = True except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: - LOG.warning(_LW("%s is already mounted"), quobyte_volume) + LOG.warning("%s is already mounted", quobyte_volume) else: raise @@ -479,10 +479,10 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): # client looks healthy if not os.access(mount_path, os.W_OK | os.X_OK): - LOG.warning(_LW("Volume is not writable. " - "Please broaden the file" - " permissions." - " Mount: %s"), + LOG.warning("Volume is not writable. " + "Please broaden the file" + " permissions." + " Mount: %s", mount_path) return # we're happy here else: diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index fe0438fbea2..92f6b192bfc 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -29,7 +29,7 @@ import six from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields @@ -118,7 +118,7 @@ class RBDVolumeProxy(object): snapshot=snapshot, read_only=read_only) except driver.rbd.Error: - LOG.exception(_LE("error opening rbd image %s"), name) + LOG.exception("error opening rbd image %s", name) driver._disconnect_from_rados(client, ioctx) raise self.driver = driver @@ -237,7 +237,7 @@ class RBDDriver(driver.CloneableImageVD, replication_target = {'name': name, 'conf': utils.convert_str(conf), 'user': utils.convert_str(user)} - LOG.info(_LI('Adding replication target: %s.'), name) + LOG.info('Adding replication target: %s.', name) self._replication_targets.append(replication_target) self._target_names.append(name) @@ -392,7 +392,7 @@ class RBDDriver(driver.CloneableImageVD, ret, outbuf, _outs = client.cluster.mon_command( '{"prefix":"df", "format":"json"}', '') if ret != 0: - LOG.warning(_LW('Unable to get rados pool stats.')) + LOG.warning('Unable to get rados pool stats.') else: outbuf = json.loads(outbuf) pool_stats = [pool for pool in outbuf['pools'] if @@ -411,7 +411,7 @@ class RBDDriver(driver.CloneableImageVD, stats['provisioned_capacity_gb'] = total_usage_gb except self.rados.Error: # just log and return unknown capacities - LOG.exception(_LE('error refreshing volume stats')) + LOG.exception('error refreshing volume stats') self._stats = stats def get_volume_stats(self, refresh=False): @@ -740,7 +740,7 @@ class RBDDriver(driver.CloneableImageVD, try: rbd_image = self.rbd.Image(client.ioctx, volume_name) except self.rbd.ImageNotFound: - LOG.info(_LI("volume %s no longer exists in backend"), + LOG.info("volume %s no longer exists in backend", volume_name) return @@ -792,8 +792,8 @@ class RBDDriver(driver.CloneableImageVD, # delete can be retried. raise exception.VolumeIsBusy(msg, volume_name=volume_name) except self.rbd.ImageNotFound: - LOG.info(_LI("RBD volume %s not found, allowing delete " - "operation to proceed."), volume_name) + LOG.info("RBD volume %s not found, allowing delete " + "operation to proceed.", volume_name) return # If it is a clone, walk back up the parent chain deleting @@ -827,19 +827,19 @@ class RBDDriver(driver.CloneableImageVD, volume.unprotect_snap(snap_name) except self.rbd.InvalidArgument: LOG.info( - _LI("InvalidArgument: Unable to unprotect snapshot %s."), + "InvalidArgument: Unable to unprotect snapshot %s.", snap_name) except self.rbd.ImageNotFound: LOG.info( - _LI("ImageNotFound: Unable to unprotect snapshot %s."), + "ImageNotFound: Unable to unprotect snapshot %s.", snap_name) except self.rbd.ImageBusy: children_list = self._get_children_info(volume, snap_name) if children_list: for (pool, image) in children_list: - LOG.info(_LI('Image %(pool)s/%(image)s is dependent ' - 'on the snapshot %(snap)s.'), + LOG.info('Image %(pool)s/%(image)s is dependent ' + 'on the snapshot %(snap)s.', {'pool': pool, 'image': image, 'snap': snap_name}) @@ -848,7 +848,7 @@ class RBDDriver(driver.CloneableImageVD, try: volume.remove_snap(snap_name) except self.rbd.ImageNotFound: - LOG.info(_LI("Snapshot %s does not exist in backend."), + LOG.info("Snapshot %s does not exist in backend.", snap_name) def _disable_replication(self, volume): @@ -931,8 +931,8 @@ class RBDDriver(driver.CloneableImageVD, 'updates': {'replication_status': replication_status}} except Exception as e: replication_status = fields.ReplicationStatus.FAILOVER_ERROR - LOG.error(_LE('Failed to failover volume %(volume)s with ' - 'error: %(error)s.'), + LOG.error('Failed to failover volume %(volume)s with ' + 'error: %(error)s.', {'volume': volume.name, 'error': e}) else: replication_status = fields.ReplicationStatus.NOT_CAPABLE @@ -985,7 +985,7 @@ class RBDDriver(driver.CloneableImageVD, def failover_host(self, context, volumes, secondary_id=None): """Failover to replication target.""" - LOG.info(_LI('RBD driver failover started.')) + LOG.info('RBD driver failover started.') if not self._is_replication_enabled: raise exception.UnableToFailOver( reason=_('RBD: Replication is not enabled.')) @@ -1005,7 +1005,7 @@ class RBDDriver(driver.CloneableImageVD, for volume, is_demoted in zip(volumes, demotion_results)] self._active_backend_id = secondary_id self._active_config = remote - LOG.info(_LI('RBD driver failover completed.')) + LOG.info('RBD driver failover completed.') return secondary_id, updates def ensure_export(self, context, volume): @@ -1311,8 +1311,8 @@ class RBDDriver(driver.CloneableImageVD, utils.convert_str(existing_name), utils.convert_str(wanted_name)) except self.rbd.ImageNotFound: - LOG.error(_LE('Unable to rename the logical volume ' - 'for volume %s.'), volume.id) + LOG.error('Unable to rename the logical volume ' + 'for volume %s.', volume.id) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. diff --git a/cinder/volume/drivers/reduxio/rdx_cli_api.py b/cinder/volume/drivers/reduxio/rdx_cli_api.py index d90f8bae622..2085691963b 100644 --- a/cinder/volume/drivers/reduxio/rdx_cli_api.py +++ b/cinder/volume/drivers/reduxio/rdx_cli_api.py @@ -22,8 +22,8 @@ import paramiko import six from cinder import exception +from cinder.i18n import _ from cinder import utils -from cinder.i18n import _, _LE, _LI CONNECTION_RETRY_NUM = 5 @@ -157,7 +157,7 @@ class ReduxioAPI(object): raise exception.RdxAPIConnectionException(_( "Authentication Error. Check login credentials")) except Exception: - LOG.exception(_LE("Exception in connecting to Reduxio CLI")) + LOG.exception("Exception in connecting to Reduxio CLI") raise exception.RdxAPIConnectionException(_( "Failed to create ssh connection to Reduxio." " Please check network connection or Reduxio hostname/IP.")) @@ -170,7 +170,7 @@ class ReduxioAPI(object): the function throws an error. """ cmd.set_json_output() - LOG.info(_LI("Running cmd: %s"), cmd) + LOG.info("Running cmd: %s", cmd) success = False for x in range(1, CONNECTION_RETRY_NUM): try: @@ -181,9 +181,9 @@ class ReduxioAPI(object): success = True break except Exception: - LOG.exception(_LE("Error in running Reduxio CLI command")) + LOG.exception("Error in running Reduxio CLI command") LOG.error( - _LE("retrying(%(cur)s/%(overall)s)"), + "retrying(%(cur)s/%(overall)s)", {'cur': x, 'overall': CONNECTION_RETRY_NUM} ) self.connected = False @@ -202,7 +202,7 @@ class ReduxioAPI(object): data = json.loads(str_out) if stdout.channel.recv_exit_status() != 0: - LOG.error(_LE("Failed running cli command: %s"), data["msg"]) + LOG.error("Failed running cli command: %s", data["msg"]) raise exception.RdxAPICommandException(data["msg"]) LOG.debug("Command output is: %s", str_out) diff --git a/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py b/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py index 9b3f7979546..9ccb36dfdc1 100644 --- a/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py +++ b/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py @@ -21,9 +21,9 @@ from oslo_utils import units import six from cinder import exception -from cinder import utils as cinder_utils -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ import cinder.interface as cinder_interface +from cinder import utils as cinder_utils from cinder.volume.drivers.reduxio import rdx_cli_api from cinder.volume.drivers.san import san @@ -60,7 +60,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): def __init__(self, *args, **kwargs): """Initialize Reduxio ISCSI Driver.""" - LOG.info(_LI("Initializing Reduxio ISCSI Driver")) + LOG.info("Initializing Reduxio ISCSI Driver") super(ReduxioISCSIDriver, self).__init__(*args, **kwargs) self.rdxApi = None # type: rdx_cli_api.ReduxioAPI self._stats = {} @@ -124,8 +124,8 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def create_volume(self, volume): """Create a new volume.""" - LOG.info(_LI( - "Creating a new volume(%(name)s) with size(%(size)s)"), + LOG.info( + "Creating a new volume(%(name)s) with size(%(size)s)", {'name': volume["name"], 'size': volume["size"]}) vol_name = self._cinder_id_to_rdx(volume["id"]) self.rdxApi.create_volume( @@ -137,8 +137,8 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def manage_existing(self, volume, external_ref): """Create a new Cinder volume out of an existing Reduxio volume.""" - LOG.info(_LI("Manage existing volume(%(cinder_vol)s) " - "from Reduxio Volume(%(rdx_vol)s)"), + LOG.info("Manage existing volume(%(cinder_vol)s) " + "from Reduxio Volume(%(rdx_vol)s)", {'cinder_vol': volume["id"], 'rdx_vol': external_ref["source-name"]}) # Get the volume name from the external reference @@ -178,7 +178,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def unmanage(self, volume): """Remove the specified volume from Cinder management.""" - LOG.info(_LI("Unmanaging volume(%s)"), volume["id"]) + LOG.info("Unmanaging volume(%s)", volume["id"]) vol_name = self._cinder_id_to_rdx(volume['id']) cli_vol = self.rdxApi.find_volume_by_name(vol_name) managed_info = self._get_managed_info(cli_vol) @@ -193,7 +193,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def delete_volume(self, volume): """Delete the specified volume.""" - LOG.info(_LI("Deleting volume(%s)"), volume["id"]) + LOG.info("Deleting volume(%s)", volume["id"]) try: self.rdxApi.delete_volume( name=self._cinder_id_to_rdx(volume["id"])) @@ -207,9 +207,9 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): Extend the volume if the size of the volume is more than the snapshot. """ - LOG.info(_LI( + LOG.info( "cloning new volume(%(new_vol)s) from snapshot(%(snapshot)s)," - " src volume(%(src_vol)s)"), + " src volume(%(src_vol)s)", {'new_vol': volume["name"], 'snapshot': snapshot["name"], 'src_vol': snapshot["volume_name"]} @@ -249,13 +249,13 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): :param src_vref: The source volume to clone from :return: None """ - LOG.info(_LI("cloning new volume(%(clone)s) from src(%(src)s)"), + LOG.info("cloning new volume(%(clone)s) from src(%(src)s)", {'clone': volume['name'], 'src': src_vref['name']}) parent_name = self._cinder_id_to_rdx(src_vref["id"]) clone_name = self._cinder_id_to_rdx(volume["id"]) description = self._create_vol_managed_description(volume) if BACKDATE_META_FIELD in volume[METADATA_KEY]: - LOG.info(_LI("Cloning from backdate %s"), + LOG.info("Cloning from backdate %s", volume[METADATA_KEY][BACKDATE_META_FIELD]) self.rdxApi.clone_volume( @@ -265,7 +265,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): str_date=volume[METADATA_KEY][BACKDATE_META_FIELD] ) else: - LOG.info(_LI("Cloning from now")) + LOG.info("Cloning from now") self.rdxApi.clone_volume( parent_name=parent_name, clone_name=clone_name, @@ -296,8 +296,8 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): from the volume's current state. :return: None """ - LOG.info(_LI( - "Creating snapshot(%(snap)s) from volume(%(vol)s)"), + LOG.info( + "Creating snapshot(%(snap)s) from volume(%(vol)s)", {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) cli_vol_name = self._cinder_id_to_rdx(snapshot['volume_id']) cli_bookmark_name = self._cinder_id_to_rdx(snapshot['id']) @@ -317,7 +317,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" - LOG.info(_LI("Deleting snapshot(%(snap)s) from volume(%(vol)s)"), + LOG.info("Deleting snapshot(%(snap)s) from volume(%(vol)s)", {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) volume_name = self._cinder_id_to_rdx(snapshot['volume_id']) @@ -392,8 +392,8 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): @cinder_utils.trace def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" - LOG.info(_LI( - "Assigning volume(%(vol)s) with initiator(%(initiator)s)"), + LOG.info( + "Assigning volume(%(vol)s) with initiator(%(initiator)s)", {'vol': volume['name'], 'initiator': connector['initiator']}) initiator_iqn = connector['initiator'] @@ -405,12 +405,12 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): # if it doesnt exist for host in self.rdxApi.list_hosts(): if host["iscsi_name"] == initiator_iqn: - LOG.info(_LI("initiator exists in Reduxio")) + LOG.info("initiator exists in Reduxio") found = True initiator_name = host["name"] break if not found: - LOG.info(_LI("Initiator doesn't exist in Reduxio, Creating it")) + LOG.info("Initiator doesn't exist in Reduxio, Creating it") initiator_name = self._generate_initiator_name() self.rdxApi.create_host(name=initiator_name, iscsi_name=initiator_iqn) @@ -420,7 +420,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): if existing_assignment is None: # Create assignment between the host and the volume - LOG.info(_LI("Creating assignment")) + LOG.info("Creating assignment") self.rdxApi.assign(vol_rdx_name, host_name=initiator_name) else: LOG.debug("Assignment already exists") @@ -470,7 +470,7 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): # Lun num is the same for each path properties['data']['target_luns'] = [target_lun] * 4 - LOG.info(_LI("Assignment complete. Assignment details: %s"), + LOG.info("Assignment complete. Assignment details: %s", properties) return properties @@ -479,8 +479,8 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" iqn = connector['initiator'] - LOG.info(_LI("Deleting assignment volume(%(vol)s) with " - "initiator(%(initiator)s)"), + LOG.info("Deleting assignment volume(%(vol)s) with " + "initiator(%(initiator)s)", {'vol': volume['name'], 'initiator': iqn}) for cli_host in self.rdxApi.list_hosts(): @@ -498,5 +498,5 @@ class ReduxioISCSIDriver(san.SanISCSIDriver): LOG.debug("Assignment doesn't exist") return - LOG.warning(_LW("Did not find matching reduxio host for initiator %s"), + LOG.warning("Did not find matching reduxio host for initiator %s", iqn) diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py index 2735c7252cf..f425a88b67a 100644 --- a/cinder/volume/drivers/remotefs.py +++ b/cinder/volume/drivers/remotefs.py @@ -32,10 +32,10 @@ from cinder import compute from cinder import coordination from cinder import db from cinder import exception +from cinder.i18n import _ +from cinder.image import image_utils from cinder.objects import fields from cinder import utils -from cinder.i18n import _, _LE, _LI, _LW -from cinder.image import image_utils from cinder.volume import driver LOG = logging.getLogger(__name__) @@ -239,7 +239,7 @@ class RemoteFSDriver(driver.BaseVD): volume.provider_location = self._find_share(volume.size) - LOG.info(_LI('casted to %s'), volume.provider_location) + LOG.info('casted to %s', volume.provider_location) self._do_create_volume(volume) @@ -279,7 +279,7 @@ class RemoteFSDriver(driver.BaseVD): self._ensure_share_mounted(share) mounted_shares.append(share) except Exception as exc: - LOG.error(_LE('Exception during mounting %s'), exc) + LOG.error('Exception during mounting %s', exc) self._mounted_shares = mounted_shares @@ -295,9 +295,9 @@ class RemoteFSDriver(driver.BaseVD): LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s', {'vol': volume.id, 'loc': volume.provider_location}) if not volume.provider_location: - LOG.warning(_LW('Volume %s does not have ' - 'provider_location specified, ' - 'skipping'), volume.name) + LOG.warning('Volume %s does not have ' + 'provider_location specified, ' + 'skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) @@ -380,8 +380,8 @@ class RemoteFSDriver(driver.BaseVD): {'path': path, 'permissions': permissions}) else: permissions = 'ugo+rw' - LOG.warning(_LW('%(path)s is being set with open permissions: ' - '%(perm)s'), {'path': path, 'perm': permissions}) + LOG.warning('%(path)s is being set with open permissions: ' + '%(perm)s', {'path': path, 'perm': permissions}) self._execute('chmod', permissions, path, run_as_root=self._execute_as_root) @@ -495,8 +495,8 @@ class RemoteFSDriver(driver.BaseVD): share_opts = share_info[1].strip() if not re.match(self.SHARE_FORMAT_REGEX, share_address): - LOG.error(_LE("Share %s ignored due to invalid format. " - "Must be of form address:/export."), + LOG.error("Share %s ignored due to invalid format. " + "Must be of form address:/export.", share_address) continue @@ -572,20 +572,20 @@ class RemoteFSDriver(driver.BaseVD): NAS file operations. This base method will set the NAS security options to false. """ - doc_html = "http://docs.openstack.org/admin-guide" \ - "/blockstorage_nfs_backend.html" + doc_html = ("http://docs.openstack.org/admin-guide" + "/blockstorage_nfs_backend.html") self.configuration.nas_secure_file_operations = 'false' - LOG.warning(_LW("The NAS file operations will be run as root: " - "allowing root level access at the storage backend. " - "This is considered an insecure NAS environment. " - "Please see %s for information on a secure NAS " - "configuration."), + LOG.warning("The NAS file operations will be run as root: " + "allowing root level access at the storage backend. " + "This is considered an insecure NAS environment. " + "Please see %s for information on a secure NAS " + "configuration.", doc_html) self.configuration.nas_secure_file_permissions = 'false' - LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing " - "other/world read & write access). This is considered " - "an insecure NAS environment. Please see %s for " - "information on a secure NFS configuration."), + LOG.warning("The NAS file permissions mode will be 666 (allowing " + "other/world read & write access). This is considered " + "an insecure NAS environment. Please see %s for " + "information on a secure NFS configuration.", doc_html) def _determine_nas_security_option_setting(self, nas_option, mount_point, @@ -609,8 +609,8 @@ class RemoteFSDriver(driver.BaseVD): file_path = os.path.join(mount_point, file_name) if os.path.isfile(file_path): nas_option = 'true' - LOG.info(_LI('Cinder secure environment ' - 'indicator file exists.')) + LOG.info('Cinder secure environment ' + 'indicator file exists.') else: # The indicator file does not exist. If it is a new # installation, set to 'true' and create the indicator file. @@ -626,11 +626,11 @@ class RemoteFSDriver(driver.BaseVD): # protect from accidental removal (owner write only). self._execute('chmod', '640', file_path, run_as_root=self._execute_as_root) - LOG.info(_LI('New Cinder secure environment indicator' - ' file created at path %s.'), file_path) + LOG.info('New Cinder secure environment indicator' + ' file created at path %s.', file_path) except IOError as err: - LOG.error(_LE('Failed to created Cinder secure ' - 'environment indicator file: %s'), + LOG.error('Failed to created Cinder secure ' + 'environment indicator file: %s', err) else: # For existing installs, we default to 'false'. The @@ -928,7 +928,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): return snap_info['active'] def _create_cloned_volume(self, volume, src_vref): - LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'), + LOG.info('Cloning volume %(src)s to volume %(dst)s', {'src': src_vref.id, 'dst': volume.id}) @@ -986,7 +986,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): if (snapshot_file == active_file): return - LOG.info(_LI('Deleting stale snapshot: %s'), snapshot.id) + LOG.info('Deleting stale snapshot: %s', snapshot.id) self._delete(snapshot_path) del(snap_info[snapshot.id]) self._write_info_file(info_path, snap_info) @@ -1032,8 +1032,8 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): # exist, do not attempt to delete. # (This happens, for example, if snapshot_create failed due to lack # of permission to write to the share.) - LOG.info(_LI('Snapshot record for %s is not present, allowing ' - 'snapshot_delete to proceed.'), snapshot.id) + LOG.info('Snapshot record for %s is not present, allowing ' + 'snapshot_delete to proceed.', snapshot.id) return snapshot_file = snap_info[snapshot.id] @@ -1050,8 +1050,8 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): if base_file is None: # There should always be at least the original volume # file as base. - LOG.warning(_LW('No backing file found for %s, allowing ' - 'snapshot to be deleted.'), snapshot_path) + LOG.warning('No backing file found for %s, allowing ' + 'snapshot to be deleted.', snapshot_path) # Snapshot may be stale, so just delete it and update the # info file instead of blocking @@ -1370,7 +1370,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): connection_info) LOG.debug('nova call result: %s', result) except Exception: - LOG.exception(_LE('Call to Nova to create snapshot failed')) + LOG.exception('Call to Nova to create snapshot failed') raise # Loop and wait for result @@ -1463,7 +1463,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver): snapshot.id, delete_info) except Exception: - LOG.exception(_LE('Call to Nova delete snapshot failed')) + LOG.exception('Call to Nova delete snapshot failed') raise # Loop and wait for result diff --git a/cinder/volume/drivers/san/san.py b/cinder/volume/drivers/san/san.py index e6946fdd6aa..b297ed82b5a 100644 --- a/cinder/volume/drivers/san/san.py +++ b/cinder/volume/drivers/san/san.py @@ -28,7 +28,7 @@ from oslo_log import log as logging from oslo_utils import excutils from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.volume import driver @@ -147,7 +147,7 @@ class SanDriver(driver.BaseVD): except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), command) + LOG.error("Error running SSH command: %s", command) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py index 3b436b01f0b..2998380308f 100644 --- a/cinder/volume/drivers/sheepdog.py +++ b/cinder/volume/drivers/sheepdog.py @@ -32,7 +32,7 @@ from oslo_utils import excutils from oslo_utils import units from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils @@ -117,10 +117,9 @@ class SheepdogClient(object): except OSError as e: with excutils.save_and_reraise_exception(): if e.errno == errno.ENOENT: - msg = _LE('Sheepdog is not installed. ' - 'OSError: command is %s.') + msg = 'Sheepdog is not installed. OSError: command is %s.' else: - msg = _LE('OSError: command is %s.') + msg = 'OSError: command is %s.' LOG.error(msg, cmd) except processutils.ProcessExecutionError as e: _stderr = e.stderr @@ -154,10 +153,10 @@ class SheepdogClient(object): except OSError as e: with excutils.save_and_reraise_exception(): if e.errno == errno.ENOENT: - msg = _LE('Qemu-img is not installed. ' - 'OSError: command is %(cmd)s.') + msg = ('Qemu-img is not installed. OSError: command is ' + '%(cmd)s.') else: - msg = _LE('OSError: command is %(cmd)s.') + msg = 'OSError: command is %(cmd)s.' LOG.error(msg, {'cmd': tuple(cmd)}) except processutils.ProcessExecutionError as e: _stderr = e.stderr @@ -178,8 +177,8 @@ class SheepdogClient(object): except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to check cluster status.' - '(command: %s)'), cmd) + LOG.error('Failed to check cluster status.' + '(command: %s)', cmd) if _stdout.startswith(self.DOG_RESP_CLUSTER_RUNNING): LOG.debug('Sheepdog cluster is running.') @@ -202,19 +201,19 @@ class SheepdogClient(object): with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_VDI_ALREADY_EXISTS): - LOG.error(_LE('Volume already exists. %s'), vdiname) + LOG.error('Volume already exists. %s', vdiname) else: - LOG.error(_LE('Failed to create volume. %s'), vdiname) + LOG.error('Failed to create volume. %s', vdiname) def delete(self, vdiname): try: (_stdout, _stderr) = self._run_dog('vdi', 'delete', vdiname) if _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): - LOG.warning(_LW('Volume not found. %s'), vdiname) + LOG.warning('Volume not found. %s', vdiname) except exception.SheepdogCmdError as e: _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to delete volume. %s'), vdiname) + LOG.error('Failed to delete volume. %s', vdiname) def create_snapshot(self, vdiname, snapname): try: @@ -225,15 +224,15 @@ class SheepdogClient(object): with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_SNAPSHOT_VDI_NOT_FOUND): - LOG.error(_LE('Volume "%s" not found. Please check the ' - 'results of "dog vdi list".'), + LOG.error('Volume "%s" not found. Please check the ' + 'results of "dog vdi list".', vdiname) elif _stderr.rstrip('\\n').endswith( self.DOG_RESP_SNAPSHOT_EXISTED % {'snapname': snapname}): - LOG.error(_LE('Snapshot "%s" already exists.'), snapname) + LOG.error('Snapshot "%s" already exists.', snapname) else: - LOG.error(_LE('Failed to create snapshot. (command: %s)'), + LOG.error('Failed to create snapshot. (command: %s)', cmd) def delete_snapshot(self, vdiname, snapname): @@ -241,14 +240,14 @@ class SheepdogClient(object): (_stdout, _stderr) = self._run_dog('vdi', 'delete', '-s', snapname, vdiname) if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND): - LOG.warning(_LW('Snapshot "%s" not found.'), snapname) + LOG.warning('Snapshot "%s" not found.', snapname) elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): - LOG.warning(_LW('Volume "%s" not found.'), vdiname) + LOG.warning('Volume "%s" not found.', vdiname) except exception.SheepdogCmdError as e: cmd = e.kwargs['cmd'] _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to delete snapshot. (command: %s)'), + LOG.error('Failed to delete snapshot. (command: %s)', cmd) def clone(self, src_vdiname, src_snapname, dst_vdiname, size): @@ -263,21 +262,21 @@ class SheepdogClient(object): _stderr = e.kwargs['stderr'] with excutils.save_and_reraise_exception(): if self.QEMU_IMG_RESP_ALREADY_EXISTS in _stderr: - LOG.error(_LE('Clone volume "%s" already exists. ' - 'Please check the results of "dog vdi list".'), + LOG.error('Clone volume "%s" already exists. ' + 'Please check the results of "dog vdi list".', dst_vdiname) elif self.QEMU_IMG_RESP_VDI_NOT_FOUND in _stderr: - LOG.error(_LE('Src Volume "%s" not found. ' - 'Please check the results of "dog vdi list".'), + LOG.error('Src Volume "%s" not found. ' + 'Please check the results of "dog vdi list".', src_vdiname) elif self.QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND in _stderr: - LOG.error(_LE('Snapshot "%s" not found. ' - 'Please check the results of "dog vdi list".'), + LOG.error('Snapshot "%s" not found. ' + 'Please check the results of "dog vdi list".', src_snapname) elif self.QEMU_IMG_RESP_SIZE_TOO_LARGE in _stderr: - LOG.error(_LE('Volume size "%sG" is too large.'), size) + LOG.error('Volume size "%sG" is too large.', size) else: - LOG.error(_LE('Failed to clone volume.(command: %s)'), cmd) + LOG.error('Failed to clone volume.(command: %s)', cmd) def resize(self, vdiname, size): size = int(size) * units.Gi @@ -288,21 +287,21 @@ class SheepdogClient(object): with excutils.save_and_reraise_exception(): if _stderr.rstrip('\\n').endswith( self.DOG_RESP_VDI_NOT_FOUND): - LOG.error(_LE('Failed to resize vdi. vdi not found. %s'), + LOG.error('Failed to resize vdi. vdi not found. %s', vdiname) elif _stderr.startswith(self.DOG_RESP_VDI_SHRINK_NOT_SUPPORT): - LOG.error(_LE('Failed to resize vdi. ' - 'Shrinking vdi not supported. ' - 'vdi: %(vdiname)s new size: %(size)s'), + LOG.error('Failed to resize vdi. ' + 'Shrinking vdi not supported. ' + 'vdi: %(vdiname)s new size: %(size)s', {'vdiname': vdiname, 'size': size}) elif _stderr.startswith(self.DOG_RESP_VDI_SIZE_TOO_LARGE): - LOG.error(_LE('Failed to resize vdi. ' - 'Too large volume size. ' - 'vdi: %(vdiname)s new size: %(size)s'), + LOG.error('Failed to resize vdi. ' + 'Too large volume size. ' + 'vdi: %(vdiname)s new size: %(size)s', {'vdiname': vdiname, 'size': size}) else: - LOG.error(_LE('Failed to resize vdi. ' - 'vdi: %(vdiname)s new size: %(size)s'), + LOG.error('Failed to resize vdi. ' + 'vdi: %(vdiname)s new size: %(size)s', {'vdiname': vdiname, 'size': size}) def get_volume_stats(self): @@ -310,7 +309,7 @@ class SheepdogClient(object): (_stdout, _stderr) = self._run_dog('node', 'info', '-r') except exception.SheepdogCmdError as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to get volume status. %s'), e) + LOG.error('Failed to get volume status. %s', e) return _stdout def get_vdi_info(self, vdiname): @@ -319,7 +318,7 @@ class SheepdogClient(object): (_stdout, _stderr) = self._run_dog('vdi', 'list', vdiname, '-r') except exception.SheepdogCmdError as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to get vdi info. %s'), e) + LOG.error('Failed to get vdi info. %s', e) return _stdout def update_node_list(self): @@ -327,7 +326,7 @@ class SheepdogClient(object): (_stdout, _stderr) = self._run_dog('node', 'list', '-r') except exception.SheepdogCmdError as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to get node list. %s'), e) + LOG.error('Failed to get node list. %s', e) node_list = [] stdout = _stdout.strip('\n') for line in stdout.split('\n'): @@ -520,7 +519,7 @@ class SheepdogDriver(driver.VolumeDriver): volume.name, volume.size) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Failed to create cloned volume %s.'), + LOG.error('Failed to create cloned volume %s.', volume.name) finally: # Delete temp Snapshot diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index ee888118639..8e5484e00eb 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -34,7 +34,7 @@ import six from cinder import context from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields @@ -216,8 +216,8 @@ class SolidFireDriver(san.SanISCSIDriver): if remote_info: self._set_active_cluster_info(remote_info['endpoint']) else: - LOG.error(_LE('Failed to initialize SolidFire driver to ' - 'a remote cluster specified at id: %s'), + LOG.error('Failed to initialize SolidFire driver to ' + 'a remote cluster specified at id: %s', self.failed_over_id) else: self._set_active_cluster_info() @@ -303,7 +303,7 @@ class SolidFireDriver(san.SanISCSIDriver): LOG.debug('Pairing already exists during init.') else: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Cluster pairing failed: %s'), ex.msg) + LOG.error('Cluster pairing failed: %s', ex.msg) LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id) remote_device['clusterPairID'] = pair_id return pair_id @@ -591,8 +591,8 @@ class SolidFireDriver(san.SanISCSIDriver): iteration_count += 1 if not found_volume: - LOG.error(_LE('Failed to retrieve volume SolidFire-' - 'ID: %s in get_by_account!'), sf_volume_id) + LOG.error('Failed to retrieve volume SolidFire-' + 'ID: %s in get_by_account!', sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} @@ -726,8 +726,8 @@ class SolidFireDriver(san.SanISCSIDriver): if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: - LOG.warning(_LW('More than one valid preset was ' - 'detected, using %s'), presets[0]) + LOG.warning('More than one valid preset was ' + 'detected, using %s', presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings @@ -827,10 +827,10 @@ class SolidFireDriver(san.SanISCSIDriver): # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error - LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid) + LOG.error("Volume %s, not found on SF Cluster.", uuid) if found_count > 1: - LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."), + LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.", {'count': found_count, 'uuid': uuid}) raise exception.DuplicateSfVolumeNames(vol_name=uuid) @@ -913,8 +913,8 @@ class SolidFireDriver(san.SanISCSIDriver): file_format}) except Exception as exc: vol = self._get_sf_volume(image_id) - LOG.error(_LE('Failed image conversion during ' - 'cache creation: %s'), + LOG.error('Failed image conversion during ' + 'cache creation: %s', exc) LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', vol['volumeID']) @@ -1204,8 +1204,8 @@ class SolidFireDriver(san.SanISCSIDriver): if image_meta['owner'] == volume['project_id']: public = True if not public: - LOG.warning(_LW("Requested image is not " - "accessible by current Tenant.")) + LOG.warning("Requested image is not " + "accessible by current Tenant.") return None, False try: @@ -1398,11 +1398,11 @@ class SolidFireDriver(san.SanISCSIDriver): sf_vol = None accounts = self._get_sfaccounts_for_tenant(volume['project_id']) if accounts is None: - LOG.error(_LE("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "delete_volume operation!"), volume['id']) - LOG.error(_LE("This usually means the volume was never " - "successfully created.")) + LOG.error("Account for Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "delete_volume operation!", volume['id']) + LOG.error("This usually means the volume was never " + "successfully created.") return for acc in accounts: @@ -1435,9 +1435,9 @@ class SolidFireDriver(san.SanISCSIDriver): if volume.get('multiattach'): self._remove_volume_from_vags(sf_vol['volumeID']) else: - LOG.error(_LE("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "delete_volume operation!"), volume['id']) + LOG.error("Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "delete_volume operation!", volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" @@ -1465,9 +1465,9 @@ class SolidFireDriver(san.SanISCSIDriver): def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: - LOG.error(_LE("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "create_snapshot operation!"), snapshot['volume_id']) + LOG.error("Account for Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "create_snapshot operation!", snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) @@ -1708,9 +1708,9 @@ class SolidFireDriver(san.SanISCSIDriver): sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: - LOG.error(_LE("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "extend_volume operation!"), volume['id']) + LOG.error("Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "extend_volume operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) params = { @@ -1780,9 +1780,9 @@ class SolidFireDriver(san.SanISCSIDriver): sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: - LOG.error(_LE("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "attach_volume operation!"), volume['id']) + LOG.error("Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "attach_volume operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] @@ -1806,9 +1806,9 @@ class SolidFireDriver(san.SanISCSIDriver): sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: - LOG.error(_LE("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "detach_volume operation!"), volume['id']) + LOG.error("Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "detach_volume operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] @@ -1828,9 +1828,9 @@ class SolidFireDriver(san.SanISCSIDriver): params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: - LOG.error(_LE("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "accept_transfer operation!"), volume['id']) + LOG.error("Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "accept_transfer operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant @@ -1947,9 +1947,9 @@ class SolidFireDriver(san.SanISCSIDriver): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: - LOG.error(_LE("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "unmanage operation!"), volume['id']) + LOG.error("Account for Volume ID %s was not found on " + "the SolidFire Cluster while attempting " + "unmanage operation!", volume['id']) raise exception.SolidFireAPIException(_("Failed to find account " "for volume.")) @@ -1990,18 +1990,18 @@ class SolidFireDriver(san.SanISCSIDriver): remote = rc break if not remote: - LOG.error(_LE("SolidFire driver received failover_host " - "but was unable to find specified replication " - "pair with id: %s."), secondary_id) + LOG.error("SolidFire driver received failover_host " + "but was unable to find specified replication " + "pair with id: %s.", secondary_id) raise exception.InvalidReplicationTarget else: remote = self.cluster_pairs[0] if not remote or not self.replication_enabled: - LOG.error(_LE("SolidFire driver received failover_host " - "request, however replication is NOT " - "enabled, or there are no available " - "targets to fail-over to.")) + LOG.error("SolidFire driver received failover_host " + "request, however replication is NOT " + "enabled, or there are no available " + "targets to fail-over to.") raise exception.UnableToFailOver(reason=_("Failover requested " "on non replicated " "backend.")) diff --git a/cinder/volume/drivers/synology/synology_common.py b/cinder/volume/drivers/synology/synology_common.py index 55746c633be..1a591a519dd 100644 --- a/cinder/volume/drivers/synology/synology_common.py +++ b/cinder/volume/drivers/synology/synology_common.py @@ -35,10 +35,10 @@ from six.moves import urllib from six import string_types from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.objects import snapshot from cinder.objects import volume +from cinder import utils from cinder.volume import utils as volutils @@ -278,7 +278,7 @@ def _connection_checker(func): self.new_session() continue else: - LOG.error(_LE('Try to renew session: [%s]'), e) + LOG.error('Try to renew session: [%s]', e) raise return inner_connection_checker @@ -420,7 +420,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_node_uuid.')) + LOG.exception('Failed to _get_node_uuid.') if (not self.check_value_valid(out, ['data', 'nodes'], list) or 0 >= len(out['data']['nodes']) @@ -447,7 +447,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_pool_status.')) + LOG.exception('Failed to _get_pool_status.') if not self.check_value_valid(out, ['data', 'volume'], object): raise exception.MalformedResponse(cmd='_get_pool_info', @@ -485,7 +485,7 @@ class SynoCommon(object): self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_pool_lun_provisioned_size.')) + LOG.exception('Failed to _get_pool_lun_provisioned_size.') if not self.check_value_valid(out, ['data', 'luns'], list): raise exception.MalformedResponse( @@ -516,7 +516,7 @@ class SynoCommon(object): self.check_response(out, uuid=lun_name) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_lun_info. [%s]'), lun_name) + LOG.exception('Failed to _get_lun_info. [%s]', lun_name) if not self.check_value_valid(out, ['data', 'lun'], object): raise exception.MalformedResponse(cmd='_get_lun_info', @@ -533,7 +533,7 @@ class SynoCommon(object): lun_info = self._get_lun_info(lun_name) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_lun_uuid. [%s]'), lun_name) + LOG.exception('Failed to _get_lun_uuid. [%s]', lun_name) if not self.check_value_valid(lun_info, ['uuid'], string_types): raise exception.MalformedResponse(cmd='_get_lun_uuid', @@ -551,8 +551,7 @@ class SynoCommon(object): ['status', 'is_action_locked']) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_lun_status. [%s]'), - lun_name) + LOG.exception('Failed to _get_lun_status. [%s]', lun_name) if not self.check_value_valid(lun_info, ['status'], string_types): raise exception.MalformedResponse(cmd='_get_lun_status', @@ -582,7 +581,7 @@ class SynoCommon(object): self.check_response(out, snapshot_id=snapshot_uuid) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_snapshot_info. [%s]'), + LOG.exception('Failed to _get_snapshot_info. [%s]', snapshot_uuid) if not self.check_value_valid(out, ['data', 'snapshot'], object): @@ -603,7 +602,7 @@ class SynoCommon(object): 'is_action_locked']) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _get_snapshot_info. [%s]'), + LOG.exception('Failed to _get_snapshot_info. [%s]', snapshot_uuid) if not self.check_value_valid(snapshot_info, ['status'], string_types): @@ -673,7 +672,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _target_create. [%s]'), + LOG.exception('Failed to _target_create. [%s]', identifier) if not self.check_value_valid(out, ['data', 'target_id']): @@ -699,7 +698,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _target_delete. [%d]'), trg_id) + LOG.exception('Failed to _target_delete. [%d]', trg_id) # is_map True for map, False for ummap def _lun_map_unmap_target(self, volume_name, is_map, trg_id): @@ -719,8 +718,8 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _lun_map_unmap_target.' - '[%(action)s][%(vol)s].'), + LOG.exception('Failed to _lun_map_unmap_target. ' + '[%(action)s][%(vol)s].', {'action': ('map_target' if is_map else 'unmap_target'), 'vol': volume_name}) @@ -743,7 +742,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _modify_lun_name [%s].'), name) + LOG.exception('Failed to _modify_lun_name [%s].', name) def _check_lun_status_normal(self, volume_name): status = '' @@ -755,7 +754,7 @@ class SynoCommon(object): eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to get lun status. [%s]'), + LOG.exception('Failed to get lun status. [%s]', volume_name) LOG.debug('Lun [%(vol)s], status [%(status)s].', @@ -773,7 +772,7 @@ class SynoCommon(object): eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to get snapshot status. [%s]'), + LOG.exception('Failed to get snapshot status. [%s]', snapshot_uuid) LOG.debug('Lun [%(snapshot)s], status [%(status)s].', @@ -836,7 +835,7 @@ class SynoCommon(object): self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _check_ds_version')) + LOG.exception('Failed to _check_ds_version') if not self.check_value_valid(out, ['data', 'firmware_ver'], @@ -869,7 +868,7 @@ class SynoCommon(object): self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _check_ds_ability')) + LOG.exception('Failed to _check_ds_ability') if not self.check_value_valid(out, ['data'], dict): raise exception.MalformedResponse(cmd='_check_ds_ability', @@ -915,7 +914,7 @@ class SynoCommon(object): elif (api.startswith('SYNO.Core.Storage.')): message, exc = self._check_storage_response(out, **kwargs) - LOG.exception(_LE('%(message)s'), {'message': message}) + LOG.exception('%(message)s', {'message': message}) raise exc @@ -934,14 +933,14 @@ class SynoCommon(object): curr_obj = obj for key in key_array: if key not in curr_obj: - LOG.error(_LE('key [%(key)s] is not in %(obj)s'), + LOG.error('key [%(key)s] is not in %(obj)s', {'key': key, 'obj': curr_obj}) return False curr_obj = curr_obj[key] if value_type and not isinstance(curr_obj, value_type): - LOG.error(_LE('[%(obj)s] is %(type)s, not %(value_type)s'), + LOG.error('[%(obj)s] is %(type)s, not %(value_type)s', {'obj': curr_obj, 'type': type(curr_obj), 'value_type': value_type}) @@ -975,7 +974,7 @@ class SynoCommon(object): lun_info = self._get_lun_info(lun_name, ['is_mapped']) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to _is_lun_mapped. [%s]'), lun_name) + LOG.exception('Failed to _is_lun_mapped. [%s]', lun_name) if not self.check_value_valid(lun_info, ['is_mapped'], bool): raise exception.MalformedResponse(cmd='_is_lun_mapped', @@ -1047,7 +1046,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create_volume. [%s]'), + LOG.exception('Failed to create_volume. [%s]', volume['name']) if not self._check_lun_status_normal(volume['name']): @@ -1065,10 +1064,10 @@ class SynoCommon(object): self.check_response(out) except exception.SynoLUNNotExist: - LOG.warning(_LW('LUN does not exist')) + LOG.warning('LUN does not exist') except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to delete_volume. [%s]'), + LOG.exception('Failed to delete_volume. [%s]', volume['name']) def create_cloned_volume(self, volume, src_vref): @@ -1085,7 +1084,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create_cloned_volume. [%s]'), + LOG.exception('Failed to create_cloned_volume. [%s]', volume['name']) if not self._check_lun_status_normal(volume['name']): @@ -1107,7 +1106,7 @@ class SynoCommon(object): self.check_response(out) except Exception as e: - LOG.exception(_LE('Failed to extend_volume. [%s]'), + LOG.exception('Failed to extend_volume. [%s]', volume['name']) raise exception.ExtendVolumeError(reason=e.msg) @@ -1137,7 +1136,7 @@ class SynoCommon(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create_snapshot. [%s]'), + LOG.exception('Failed to create_snapshot. [%s]', snapshot['volume']['name']) if not self.check_value_valid(resp, @@ -1177,7 +1176,7 @@ class SynoCommon(object): return except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to delete_snapshot. [%s]'), + LOG.exception('Failed to delete_snapshot. [%s]', snapshot['id']) def create_volume_from_snapshot(self, volume, snapshot): @@ -1197,12 +1196,11 @@ class SynoCommon(object): except exception.SnapshotMetadataNotFound: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to get snapshot UUID. [%s]'), + LOG.exception('Failed to get snapshot UUID. [%s]', snapshot['id']) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create_volume_from_snapshot. ' - '[%s]'), + LOG.exception('Failed to create_volume_from_snapshot. [%s]', snapshot['id']) if not self._check_lun_status_normal(volume['name']): @@ -1273,7 +1271,7 @@ class SynoCommon(object): iscsi_properties['auth_username'] = auth_username iscsi_properties['auth_password'] = auth_password except Exception: - LOG.error(_LE('Invalid provider_auth: %s'), auth) + LOG.error('Invalid provider_auth: %s', auth) return iscsi_properties diff --git a/cinder/volume/drivers/synology/synology_iscsi.py b/cinder/volume/drivers/synology/synology_iscsi.py index 6c2c70f2ab9..7c6170b7e9f 100644 --- a/cinder/volume/drivers/synology/synology_iscsi.py +++ b/cinder/volume/drivers/synology/synology_iscsi.py @@ -17,7 +17,6 @@ from oslo_utils import excutils from cinder import exception from cinder import interface -from cinder.i18n import _LE, _LW from cinder.volume import driver from cinder.volume.drivers.synology import synology_common as common @@ -67,8 +66,8 @@ class SynoISCSIDriver(driver.ISCSIDriver): """Extend an existing volume's size.""" if volume['size'] >= new_size: - LOG.error(_LE('New size is smaller than original size. ' - 'New: [%(new)d] Old: [%(old)d]'), + LOG.error('New size is smaller than original size. ' + 'New: [%(new)d] Old: [%(old)d]', {'new': new_size, 'old': volume['size']}) return @@ -107,7 +106,7 @@ class SynoISCSIDriver(driver.ISCSIDriver): self.stats['driver_version'] = self.VERSION except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to get_volume_stats.')) + LOG.exception('Failed to get_volume_stats.') return self.stats @@ -123,7 +122,7 @@ class SynoISCSIDriver(driver.ISCSIDriver): iqn, trg_id, provider_auth = (self.common.create_iscsi_export (volume['name'], volume['id'])) except Exception as e: - LOG.exception(_LE('Failed to remove_export.')) + LOG.exception('Failed to remove_export.') raise exception.ExportFailure(reason=e) model_update['provider_location'] = (self.common.get_provider_location @@ -137,7 +136,7 @@ class SynoISCSIDriver(driver.ISCSIDriver): if not self.common.is_lun_mapped(volume['name']): return except exception.SynoLUNNotExist: - LOG.warning(_LW("Volume not exist")) + LOG.warning("Volume not exist") return try: @@ -145,7 +144,7 @@ class SynoISCSIDriver(driver.ISCSIDriver): (volume['provider_location'])) self.common.remove_iscsi_export(volume['name'], trg_id) except Exception as e: - LOG.exception(_LE('Failed to remove_export.')) + LOG.exception('Failed to remove_export.') raise exception.RemoveExportException(volume=volume, reason=e.msg) @@ -156,7 +155,7 @@ class SynoISCSIDriver(driver.ISCSIDriver): iscsi_properties = self.common.get_iscsi_properties(volume) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to initialize_connection.')) + LOG.exception('Failed to initialize_connection.') volume_type = self.configuration.safe_get('iscsi_protocol') or 'iscsi' diff --git a/cinder/volume/drivers/tegile.py b/cinder/volume/drivers/tegile.py index 51294f68cd4..35d3a804685 100644 --- a/cinder/volume/drivers/tegile.py +++ b/cinder/volume/drivers/tegile.py @@ -26,9 +26,9 @@ from oslo_utils import units import six from cinder import exception -from cinder import utils -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import utils as volume_utils @@ -203,7 +203,7 @@ class TegileIntelliFlashVolumeDriver(san.SanDriver): self._api_executor.send_api_request(method='createVolume', params=params) - LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."), + LOG.info("Created volume %(volname)s, volume id %(volid)s.", {'volname': volume['name'], 'volid': volume['id']}) return self.get_additional_info(volume, pool, self._default_project) @@ -252,8 +252,8 @@ class TegileIntelliFlashVolumeDriver(san.SanDriver): params.append(snap_name) params.append(False) - LOG.info(_LI('Creating snapshot for volume_name=%(vol)s' - ' snap_name=%(name)s snap_description=%(desc)s'), + LOG.info('Creating snapshot for volume_name=%(vol)s' + ' snap_name=%(name)s snap_description=%(desc)s', {'vol': volume_name, 'name': snap_name, 'desc': snap_description}) @@ -377,8 +377,8 @@ class TegileIntelliFlashVolumeDriver(san.SanDriver): self._stats = data except Exception as e: - LOG.warning(_LW('TegileIntelliFlashVolumeDriver(%(clsname)s) ' - '_update_volume_stats failed: %(error)s'), + LOG.warning('TegileIntelliFlashVolumeDriver(%(clsname)s) ' + '_update_volume_stats failed: %(error)s', {'clsname': self.__class__.__name__, 'error': e}) diff --git a/cinder/volume/drivers/tintri.py b/cinder/volume/drivers/tintri.py index 0dbb017772d..d76c3e7906e 100644 --- a/cinder/volume/drivers/tintri.py +++ b/cinder/volume/drivers/tintri.py @@ -31,10 +31,10 @@ import requests from six.moves import urllib from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers import nfs @@ -156,13 +156,13 @@ class TintriDriver(driver.ManageableVD, with self._get_client() as c: c.delete_snapshot(snapshot.provider_id) else: - LOG.info(_LI('Snapshot %s not found'), snapshot.name) + LOG.info('Snapshot %s not found', snapshot.name) def _check_ops(self, required_ops, configuration): """Ensures that the options we care about are set.""" for op in required_ops: if not getattr(configuration, op): - LOG.error(_LE('Configuration value %s is not set.'), op) + LOG.error('Configuration value %s is not set.', op) raise exception.InvalidConfigurationValue(option=op, value=None) @@ -182,7 +182,7 @@ class TintriDriver(driver.ManageableVD, try: self.extend_volume(volume, vol_size) except Exception: - LOG.error(_LE('Resizing %s failed. Cleaning volume.'), + LOG.error('Resizing %s failed. Cleaning volume.', volume.name) self._delete_file(path) raise @@ -270,8 +270,8 @@ class TintriDriver(driver.ManageableVD, try: c.delete_snapshot(uuid) except Exception: - LOG.exception(_LE('Unexpected exception during ' - 'cache cleanup of snapshot %s'), + LOG.exception('Unexpected exception during ' + 'cache cleanup of snapshot %s', uuid) else: LOG.debug('Cache cleanup: nothing to clean') @@ -356,7 +356,7 @@ class TintriDriver(driver.ManageableVD, try: self.extend_volume(volume, vol_size) except Exception: - LOG.error(_LE('Resizing %s failed. Cleaning volume.'), + LOG.error('Resizing %s failed. Cleaning volume.', volume.name) self._delete_file(path) raise @@ -367,7 +367,7 @@ class TintriDriver(driver.ManageableVD, """Fetches the image from image_service and write it to the volume.""" super(TintriDriver, self).copy_image_to_volume( context, volume, image_service, image_id) - LOG.info(_LI('Copied image to volume %s using regular download.'), + LOG.info('Copied image to volume %s using regular download.', volume['name']) self._create_image_snapshot(volume['name'], volume['provider_location'], image_id, @@ -376,7 +376,7 @@ class TintriDriver(driver.ManageableVD, def _create_image_snapshot(self, volume_name, share, image_id, image_name): """Creates an image snapshot.""" snapshot_name = img_prefix + image_id - LOG.info(_LI('Creating image snapshot %s'), snapshot_name) + LOG.info('Creating image snapshot %s', snapshot_name) (host, path) = self._get_export_ip_path(None, share) volume_path = '%s/%s' % (path, volume_name) @@ -392,8 +392,8 @@ class TintriDriver(driver.ManageableVD, try: return _do_snapshot() except Exception as e: - LOG.warning(_LW('Exception while creating image %(image_id)s ' - 'snapshot. Exception: %(exc)s'), + LOG.warning('Exception while creating image %(image_id)s ' + 'snapshot. Exception: %(exc)s', {'image_id': image_id, 'exc': e}) def _find_image_snapshot(self, image_id): @@ -405,7 +405,7 @@ class TintriDriver(driver.ManageableVD, """Clones volume from image snapshot.""" file_path = self._get_volume_path(share, dst) if not os.path.exists(file_path): - LOG.info(_LI('Cloning from snapshot to destination %s'), dst) + LOG.info('Cloning from snapshot to destination %s', dst) self._clone_snapshot(snapshot_id, dst, volume_id=None, share=share) @@ -417,7 +417,7 @@ class TintriDriver(driver.ManageableVD, self._execute(*cmd, run_as_root=self._execute_as_root) return True except Exception as e: - LOG.warning(_LW('Exception during deleting %s'), e) + LOG.warning('Exception during deleting %s', e) return False def _move_file(self, source_path, dest_path): @@ -426,7 +426,7 @@ class TintriDriver(driver.ManageableVD, @utils.synchronized(dest_path, external=True) def _do_move(src, dst): if os.path.exists(dst): - LOG.warning(_LW('Destination %s already exists.'), dst) + LOG.warning('Destination %s already exists.', dst) return False self._execute('mv', src, dst, run_as_root=self._execute_as_root) return True @@ -434,7 +434,7 @@ class TintriDriver(driver.ManageableVD, try: return _do_move(source_path, dest_path) except Exception as e: - LOG.warning(_LW('Exception moving file %(src)s. Message: %(e)s'), + LOG.warning('Exception moving file %(src)s. Message: %(e)s', {'src': source_path, 'e': e}) return False @@ -470,8 +470,8 @@ class TintriDriver(driver.ManageableVD, if cloned: post_clone = self._post_clone_image(volume) except Exception as e: - LOG.info(_LI('Image cloning unsuccessful for image ' - '%(image_id)s. Message: %(msg)s'), + LOG.info('Image cloning unsuccessful for image ' + '%(image_id)s. Message: %(msg)s', {'image_id': image_id, 'msg': e}) vol_path = self.local_path(volume) volume['provider_location'] = None @@ -486,7 +486,7 @@ class TintriDriver(driver.ManageableVD, def _clone_from_snapshot(self, volume, image_id, snapshot_id): """Clones a copy from image snapshot.""" cloned = False - LOG.info(_LI('Cloning image %s from snapshot.'), image_id) + LOG.info('Cloning image %s from snapshot.', image_id) for share in self._mounted_shares: # Repeat tries in other shares if failed in some LOG.debug('Image share: %s', share) @@ -499,13 +499,13 @@ class TintriDriver(driver.ManageableVD, volume['provider_location'] = share break except Exception: - LOG.warning(_LW('Unexpected exception during ' - 'image cloning in share %s'), share) + LOG.warning('Unexpected exception during ' + 'image cloning in share %s', share) return cloned def _direct_clone(self, volume, image_location, image_id, image_name): """Clones directly in nfs share.""" - LOG.info(_LI('Checking image clone %s from glance share.'), image_id) + LOG.info('Checking image clone %s from glance share.', image_id) cloned = False image_location = self._get_image_nfs_url(image_location) share = self._is_cloneable_share(image_location) @@ -535,7 +535,7 @@ class TintriDriver(driver.ManageableVD, volume_id=None, share=share, dst=dst_share, image_id=image_id) cloned = True else: - LOG.info(_LI('Image will locally be converted to raw %s'), + LOG.info('Image will locally be converted to raw %s', image_id) dst = '%s/%s' % (dst_path, volume['name']) image_utils.convert_image(img_path, dst, 'raw', @@ -554,7 +554,7 @@ class TintriDriver(driver.ManageableVD, def _post_clone_image(self, volume): """Performs operations post image cloning.""" - LOG.info(_LI('Performing post clone for %s'), volume['name']) + LOG.info('Performing post clone for %s', volume['name']) vol_path = self.local_path(volume) self._set_rw_permissions(vol_path) self._resize_image_file(vol_path, volume['size']) @@ -566,7 +566,7 @@ class TintriDriver(driver.ManageableVD, if self._is_file_size_equal(path, new_size): return else: - LOG.info(_LI('Resizing file to %sG'), new_size) + LOG.info('Resizing file to %sG', new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root) if self._is_file_size_equal(path, new_size): @@ -614,7 +614,7 @@ class TintriDriver(driver.ManageableVD, LOG.debug('Found share match %s', sh) return sh except Exception: - LOG.warning(_LW('Unexpected exception while listing used share.')) + LOG.warning('Unexpected exception while listing used share.') def _get_image_nfs_url(self, image_location): """Gets direct url for nfs backend. @@ -721,7 +721,7 @@ class TintriDriver(driver.ManageableVD, raise exception.VolumeDriverException(msg) self._set_rw_permissions(dst) - LOG.info(_LI('Manage volume %s'), volume['name']) + LOG.info('Manage volume %s', volume['name']) return {'provider_location': nfs_share} def manage_existing_get_size(self, volume, existing_ref): @@ -754,7 +754,7 @@ class TintriDriver(driver.ManageableVD, :param volume: Cinder volume to unmanage """ volume_path = self.local_path(volume) - LOG.info(_LI('Unmanage volume %s'), volume_path) + LOG.info('Unmanage volume %s', volume_path) def _convert_volume_share(self, volume_share): """Converts the share name to IP address.""" @@ -810,8 +810,7 @@ class TintriDriver(driver.ManageableVD, self._ensure_share_mounted(share) mounted_image_shares.append(share) except Exception: - LOG.exception(_LE( - 'Exception during mounting.')) + LOG.exception('Exception during mounting.') self._mounted_image_shares = mounted_image_shares # Mount Cinder shares diff --git a/cinder/volume/drivers/violin/v7000_common.py b/cinder/volume/drivers/violin/v7000_common.py index 7a237c690e0..bb3f81f0087 100644 --- a/cinder/volume/drivers/violin/v7000_common.py +++ b/cinder/volume/drivers/violin/v7000_common.py @@ -42,7 +42,7 @@ from oslo_utils import units from cinder import context from cinder.db.sqlalchemy import api from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import utils from cinder.volume import volume_types @@ -54,7 +54,7 @@ try: except ImportError: vmemclient = None else: - LOG.info(_LI("Running with vmemclient version: %s"), + LOG.info("Running with vmemclient version: %s", vmemclient.__version__) @@ -126,7 +126,7 @@ class V7000Common(object): if (self.config.violin_dedup_only_pools == [] and self.config.violin_dedup_capable_pools == []): - LOG.warning(_LW("Storage pools not configured.")) + LOG.warning("Storage pools not configured.") raise exception.InvalidInput( reason=_('Storage pool configuration is ' 'mandatory for external head')) @@ -137,7 +137,7 @@ class V7000Common(object): msg = _('vmemclient python library not found') raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("CONCERTO version: %s"), self.vmem_mg.version) + LOG.info("CONCERTO version: %s", self.vmem_mg.version) if not self._is_supported_vmos_version(self.vmem_mg.version): msg = _('CONCERTO version is not supported') @@ -196,7 +196,7 @@ class V7000Common(object): LOG.debug("Lun %s already exists, continuing.", volume['id']) except Exception: - LOG.exception(_LE("Lun create for %s failed!"), volume['id']) + LOG.exception("Lun create for %s failed!", volume['id']) raise @utils.synchronized('vmem-lun') @@ -221,12 +221,12 @@ class V7000Common(object): LOG.debug("Lun %s already deleted, continuing.", volume['id']) except exception.ViolinBackendErrExists: - LOG.exception(_LE("Lun %s has dependent snapshots, " - "skipping lun deletion."), volume['id']) + LOG.exception("Lun %s has dependent snapshots, " + "skipping lun deletion.", volume['id']) raise exception.VolumeIsBusy(volume_name=volume['id']) except Exception: - LOG.exception(_LE("Lun delete for %s failed!"), volume['id']) + LOG.exception("Lun delete for %s failed!", volume['id']) raise def _extend_lun(self, volume, new_size): @@ -264,7 +264,7 @@ class V7000Common(object): volume['id'], delta_mb) except Exception: - LOG.exception(_LE("LUN extend failed!")) + LOG.exception("LUN extend failed!") raise def _create_lun_snapshot(self, snapshot): @@ -307,8 +307,8 @@ class V7000Common(object): priority=CONCERTO_DEFAULT_PRIORITY, enable_notification=False) except Exception: - LOG.exception(_LE("Lun create snapshot for " - "volume %(vol)s snapshot %(snap)s failed!"), + LOG.exception("Lun create snapshot for " + "volume %(vol)s snapshot %(snap)s failed!", {'vol': cinder_volume_id, 'snap': cinder_snapshot_id}) raise @@ -382,8 +382,8 @@ class V7000Common(object): self._check_error_code(result) except Exception: - LOG.exception(_LE("Copy snapshot to volume for " - "snapshot %(snap)s volume %(vol)s failed!"), + LOG.exception("Copy snapshot to volume for " + "snapshot %(snap)s volume %(vol)s failed!", {'snap': cinder_snapshot_id, 'vol': cinder_volume_id}) raise @@ -431,8 +431,8 @@ class V7000Common(object): self._check_error_code(result) except Exception: - LOG.exception(_LE("Create new lun from lun for source " - "%(src)s => destination %(dest)s failed!"), + LOG.exception("Create new lun from lun for source " + "%(src)s => destination %(dest)s failed!", {'src': src_vol['id'], 'dest': dest_vol['id']}) raise @@ -1064,8 +1064,8 @@ class V7000Common(object): 'snap_id': cinder_snapshot_id}) raise loopingcall.LoopingCallDone(retvalue=True) else: - LOG.warning(_LW("Delete snapshot %(snap)s of %(vol)s " - "encountered temporary error: %(msg)s"), + LOG.warning("Delete snapshot %(snap)s of %(vol)s " + "encountered temporary error: %(msg)s", {'snap': cinder_snapshot_id, 'vol': cinder_volume_id, 'msg': ans['msg']}) diff --git a/cinder/volume/drivers/violin/v7000_fcp.py b/cinder/volume/drivers/violin/v7000_fcp.py index 38fe59bb449..82ebae1390f 100644 --- a/cinder/volume/drivers/violin/v7000_fcp.py +++ b/cinder/volume/drivers/violin/v7000_fcp.py @@ -38,7 +38,7 @@ driver documentation for more information. from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver @@ -73,7 +73,7 @@ class V7000FCPDriver(driver.FibreChannelDriver): self.common = v7000_common.V7000Common(self.configuration) self.lookup_service = fczm_utils.create_lookup_service() - LOG.info(_LI("Initialized driver %(name)s version: %(vers)s"), + LOG.info("Initialized driver %(name)s version: %(vers)s", {'name': self.__class__.__name__, 'vers': self.VERSION}) def do_setup(self, context): @@ -222,7 +222,7 @@ class V7000FCPDriver(driver.FibreChannelDriver): [volume['id'], connector['host']]) except exception.ViolinBackendErr: - LOG.exception(_LE("Backend returned err for lun export.")) + LOG.exception("Backend returned err for lun export.") raise except Exception: @@ -230,7 +230,7 @@ class V7000FCPDriver(driver.FibreChannelDriver): reason=_('LUN export failed!')) lun_id = self._get_lun_id(volume['id'], connector['host']) - LOG.info(_LI("Exported lun %(vol_id)s on lun_id %(lun_id)s."), + LOG.info("Exported lun %(vol_id)s on lun_id %(lun_id)s.", {'vol_id': volume['id'], 'lun_id': lun_id}) return lun_id @@ -243,7 +243,7 @@ class V7000FCPDriver(driver.FibreChannelDriver): """ v = self.common.vmem_mg - LOG.info(_LI("Unexporting lun %s."), volume['id']) + LOG.info("Unexporting lun %s.", volume['id']) try: self.common._send_cmd(v.lun.unassign_client_lun, @@ -251,11 +251,11 @@ class V7000FCPDriver(driver.FibreChannelDriver): volume['id'], connector['host'], True) except exception.ViolinBackendErr: - LOG.exception(_LE("Backend returned err for lun export.")) + LOG.exception("Backend returned err for lun export.") raise except Exception: - LOG.exception(_LE("LUN unexport failed!")) + LOG.exception("LUN unexport failed!") raise def _update_volume_stats(self): diff --git a/cinder/volume/drivers/violin/v7000_iscsi.py b/cinder/volume/drivers/violin/v7000_iscsi.py index 57d3f07a0cb..e5a09e6dcff 100644 --- a/cinder/volume/drivers/violin/v7000_iscsi.py +++ b/cinder/volume/drivers/violin/v7000_iscsi.py @@ -35,8 +35,8 @@ import uuid from oslo_log import log as logging from cinder import exception +from cinder.i18n import _ from cinder import interface -from cinder.i18n import _, _LE, _LI, _LW from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.violin import v7000_common @@ -65,7 +65,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): self.configuration.append_config_values(san.san_opts) self.common = v7000_common.V7000Common(self.configuration) - LOG.info(_LI("Initialized driver %(name)s version: %(vers)s"), + LOG.info("Initialized driver %(name)s version: %(vers)s", {'name': self.__class__.__name__, 'vers': self.VERSION}) def do_setup(self, context): @@ -82,7 +82,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): # Getting iscsi IPs from the array is incredibly expensive, # so only do it once. if not self.configuration.violin_iscsi_target_ips: - LOG.warning(_LW("iSCSI target ip addresses not configured. ")) + LOG.warning("iSCSI target ip addresses not configured.") self.gateway_iscsi_ip_addresses = ( self.common.vmem_mg.utility.get_iscsi_interfaces()) else: @@ -239,7 +239,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): [volume['id'], connector['host']]) except exception.ViolinBackendErr: - LOG.exception(_LE("Backend returned error for lun export.")) + LOG.exception("Backend returned error for lun export.") raise except Exception: @@ -247,7 +247,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): reason=_('LUN export failed!')) lun_id = self._get_lun_id(volume['id'], connector['host']) - LOG.info(_LI("Exported lun %(vol_id)s on lun_id %(lun_id)s."), + LOG.info("Exported lun %(vol_id)s on lun_id %(lun_id)s.", {'vol_id': volume['id'], 'lun_id': lun_id}) return lun_id @@ -263,7 +263,7 @@ class V7000ISCSIDriver(driver.ISCSIDriver): """ v = self.common.vmem_mg - LOG.info(_LI("Unexporting lun %(vol)s host is %(host)s."), + LOG.info("Unexporting lun %(vol)s host is %(host)s.", {'vol': volume['id'], 'host': connector['host']}) try: @@ -272,11 +272,11 @@ class V7000ISCSIDriver(driver.ISCSIDriver): volume['id'], target, True) except exception.ViolinBackendErrNotFound: - LOG.info(_LI("Lun %s already unexported, continuing..."), + LOG.info("Lun %s already unexported, continuing...", volume['id']) except Exception: - LOG.exception(_LE("LUN unexport failed!")) + LOG.exception("LUN unexport failed!") msg = _("LUN unexport failed") raise exception.ViolinBackendErr(message=msg) diff --git a/cinder/volume/drivers/vmware/datastore.py b/cinder/volume/drivers/vmware/datastore.py index eab7c7675bc..d9824bbd390 100644 --- a/cinder/volume/drivers/vmware/datastore.py +++ b/cinder/volume/drivers/vmware/datastore.py @@ -23,7 +23,6 @@ from oslo_log import log as logging from oslo_vmware import pbm from oslo_vmware import vim_util -from cinder.i18n import _LE from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions @@ -68,7 +67,7 @@ class DatastoreSelector(object): """ profile_id = pbm.get_profile_id_by_name(self._session, profile_name) if profile_id is None: - LOG.error(_LE("Storage profile: %s cannot be found in vCenter."), + LOG.error("Storage profile: %s cannot be found in vCenter.", profile_name) raise vmdk_exceptions.ProfileNotFoundException( storage_profile=profile_name) diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py index bb06e513565..b1ef9cab92b 100644 --- a/cinder/volume/drivers/vmware/vmdk.py +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -41,7 +41,7 @@ from oslo_vmware import pbm from oslo_vmware import vim_util from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.vmware import datastore as hub @@ -333,8 +333,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): """ backing = self.volumeops.get_backing(volume['name']) if not backing: - LOG.info(_LI("Backing not available, no operation " - "to be performed.")) + LOG.info("Backing not available, no operation " + "to be performed.") return self.volumeops.delete_backing(backing) @@ -475,14 +475,14 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): elif self._clusters: hosts = self._get_hosts(self._clusters) if not hosts: - LOG.error(_LE("There are no valid hosts available in " - "configured cluster(s): %s."), self._clusters) + LOG.error("There are no valid hosts available in " + "configured cluster(s): %s.", self._clusters) raise vmdk_exceptions.NoValidHostException() best_candidate = self.ds_sel.select_datastore(req, hosts=hosts) if not best_candidate: - LOG.error(_LE("There is no valid datastore satisfying " - "requirements: %s."), req) + LOG.error("There is no valid datastore satisfying " + "requirements: %s.", req) raise vmdk_exceptions.NoValidDatastoreException() return best_candidate @@ -582,8 +582,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): if not backing: # Create a backing in case it does not exist under the # host managing the instance. - LOG.info(_LI("There is no backing for the volume: %s. " - "Need to create one."), volume.name) + LOG.info("There is no backing for the volume: %s. " + "Need to create one.", volume.name) backing = self._create_backing(volume, host) else: # Relocate volume is necessary @@ -595,7 +595,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): if not backing: # Create a backing in case it does not exist. It is a bad use # case to boot from an empty volume. - LOG.warning(_LW("Trying to boot from an empty volume: %s."), + LOG.warning("Trying to boot from an empty volume: %s.", volume.name) # Create backing backing = self._create_backing(volume) @@ -652,12 +652,12 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): raise exception.InvalidVolume(msg) backing = self.volumeops.get_backing(snapshot['volume_name']) if not backing: - LOG.info(_LI("There is no backing, so will not create " - "snapshot: %s."), snapshot['name']) + LOG.info("There is no backing, so will not create " + "snapshot: %s.", snapshot['name']) return self.volumeops.create_snapshot(backing, snapshot['name'], snapshot['display_description']) - LOG.info(_LI("Successfully created snapshot: %s."), snapshot['name']) + LOG.info("Successfully created snapshot: %s.", snapshot['name']) def create_snapshot(self, snapshot): """Creates a snapshot. @@ -765,8 +765,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self.volumeops.delete_vmdk_file( descriptor_ds_file_path, dc_ref) except exceptions.VimException: - LOG.warning(_LW("Error occurred while deleting temporary " - "disk: %s."), + LOG.warning("Error occurred while deleting temporary disk: %s.", descriptor_ds_file_path, exc_info=True) def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref, @@ -779,8 +778,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): dest_path.get_descriptor_ds_file_path(), dest_dc_ref) except exceptions.VimException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while copying %(src)s to " - "%(dst)s."), + LOG.exception("Error occurred while copying %(src)s to " + "%(dst)s.", {'src': src_path.get_descriptor_ds_file_path(), 'dst': dest_path.get_descriptor_ds_file_path()}) finally: @@ -887,8 +886,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): except Exception: # Delete the descriptor. with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while copying image: " - "%(image_id)s to %(path)s."), + LOG.exception("Error occurred while copying image: " + "%(image_id)s to %(path)s.", {'path': path.get_descriptor_ds_file_path(), 'image_id': image_id}) LOG.debug("Deleting descriptor: %s.", @@ -897,8 +896,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self.volumeops.delete_file( path.get_descriptor_ds_file_path(), dc_ref) except exceptions.VimException: - LOG.warning(_LW("Error occurred while deleting " - "descriptor: %s."), + LOG.warning("Error occurred while deleting " + "descriptor: %s.", path.get_descriptor_ds_file_path(), exc_info=True) @@ -930,7 +929,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): try: self.volumeops.delete_backing(backing) except exceptions.VimException: - LOG.warning(_LW("Error occurred while deleting backing: %s."), + LOG.warning("Error occurred while deleting backing: %s.", backing, exc_info=True) def _create_volume_from_non_stream_optimized_image( @@ -1027,9 +1026,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): except Exception: # Delete backing and virtual disk created from image. with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while creating " - "volume: %(id)s" - " from image: %(image_id)s."), + LOG.exception("Error occurred while creating " + "volume: %(id)s" + " from image: %(image_id)s.", {'id': volume['id'], 'image_id': image_id}) self._delete_temp_backing(backing) @@ -1103,15 +1102,15 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while copying image: %(id)s " - "to volume: %(vol)s."), + LOG.exception("Error occurred while copying image: %(id)s " + "to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) backing = self.volumeops.get_backing(volume['name']) if backing: # delete the backing self.volumeops.delete_backing(backing) - LOG.info(_LI("Done copying image: %(id)s to volume: %(vol)s."), + LOG.info("Done copying image: %(id)s to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) def _extend_backing(self, backing, new_size_in_gb): @@ -1180,8 +1179,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while copying image: %(id)s " - "to volume: %(vol)s."), + LOG.exception("Error occurred while copying image: %(id)s " + "to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) LOG.debug("Volume: %(id)s created from image: %(image_id)s.", @@ -1229,7 +1228,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): # get backing vm of volume and its vmdk path backing = self.volumeops.get_backing(volume['name']) if not backing: - LOG.info(_LI("Backing not found, creating for volume: %s"), + LOG.info("Backing not found, creating for volume: %s", volume['name']) backing = self._create_backing(volume) vmdk_file_path = self.volumeops.get_vmdk_path(backing) @@ -1252,7 +1251,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1) - LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s"), + LOG.info("Done copying volume %(vol)s to a new image %(img)s", {'vol': volume['name'], 'img': image_meta['name']}) def _in_use(self, volume): @@ -1282,7 +1281,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): """ # Can't attempt retype if the volume is in use. if self._in_use(volume): - LOG.warning(_LW("Volume: %s is in use, can't retype."), + LOG.warning("Volume: %s is in use, can't retype.", volume['name']) return False @@ -1352,8 +1351,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): best_candidate = self._select_datastore(req) except vmdk_exceptions.NoValidDatastoreException: # No candidate datastores; can't retype. - LOG.warning(_LW("There are no datastores matching new " - "requirements; can't retype volume: %s."), + LOG.warning("There are no datastores matching new " + "requirements; can't retype volume: %s.", volume['name']) return False @@ -1392,9 +1391,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): volume['id']) except exceptions.VimException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while cloning " - "backing:" - " %s during retype."), + LOG.exception("Error occurred while cloning backing: " + "%s during retype.", backing) if renamed and not new_backing: LOG.debug("Undo rename of backing: %(backing)s; " @@ -1407,10 +1405,10 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self.volumeops.rename_backing(backing, volume['name']) except exceptions.VimException: - LOG.warning(_LW("Changing backing: " - "%(backing)s name from " - "%(new_name)s to %(old_name)s " - "failed."), + LOG.warning("Changing backing: " + "%(backing)s name from " + "%(new_name)s to %(old_name)s " + "failed.", {'backing': backing, 'new_name': tmp_name, 'old_name': volume['name']}) @@ -1445,27 +1443,27 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): vol_name = volume['name'] backing = self.volumeops.get_backing(vol_name) if not backing: - LOG.info(_LI("There is no backing for volume: %s; no need to " - "extend the virtual disk."), vol_name) + LOG.info("There is no backing for volume: %s; no need to " + "extend the virtual disk.", vol_name) return # try extending vmdk in place try: self._extend_backing(backing, new_size) - LOG.info(_LI("Successfully extended volume: %(vol)s to size: " - "%(size)s GB."), + LOG.info("Successfully extended volume: %(vol)s to size: " + "%(size)s GB.", {'vol': vol_name, 'size': new_size}) return except exceptions.NoDiskSpaceException: - LOG.warning(_LW("Unable to extend volume: %(vol)s to size: " - "%(size)s on current datastore due to insufficient" - " space."), + LOG.warning("Unable to extend volume: %(vol)s to size: " + "%(size)s on current datastore due to insufficient" + " space.", {'vol': vol_name, 'size': new_size}) # Insufficient disk space; relocate the volume to a different datastore # and retry extend. - LOG.info(_LI("Relocating volume: %s to a different datastore due to " - "insufficient disk space on current datastore."), + LOG.info("Relocating volume: %s to a different datastore due to " + "insufficient disk space on current datastore.", vol_name) try: create_params = {CREATE_PARAM_DISK_SIZE: new_size} @@ -1477,12 +1475,12 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self._extend_backing(backing, new_size) except exceptions.VMwareDriverException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to extend volume: %(vol)s to size: " - "%(size)s GB."), + LOG.error("Failed to extend volume: %(vol)s to size: " + "%(size)s GB.", {'vol': vol_name, 'size': new_size}) - LOG.info(_LI("Successfully extended volume: %(vol)s to size: " - "%(size)s GB."), + LOG.info("Successfully extended volume: %(vol)s to size: " + "%(size)s GB.", {'vol': vol_name, 'size': new_size}) @contextlib.contextmanager @@ -1598,8 +1596,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): return vm_ref except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error occurred while creating temporary " - "backing.")) + LOG.exception("Error occurred while creating temporary " + "backing.") backing = self.volumeops.get_backing(name) if backing is not None: self._delete_temp_backing(backing) @@ -1666,9 +1664,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self.volumeops.rename_backing(backing, volume['name']) except exceptions.VimException: - LOG.warning(_LW("Cannot undo volume rename; old " - "name was %(old_name)s and new " - "name is %(new_name)s."), + LOG.warning("Cannot undo volume rename; old " + "name was %(old_name)s and new " + "name is %(new_name)s.", {'old_name': volume['name'], 'new_name': tmp_backing_name}, exc_info=True) @@ -1851,11 +1849,11 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): """ version_str = self.configuration.vmware_host_version if version_str: - LOG.info(_LI("Using overridden vmware_host_version from config: " - "%s"), version_str) + LOG.info("Using overridden vmware_host_version from config: %s", + version_str) else: version_str = vim_util.get_vc_version(self.session) - LOG.info(_LI("Fetched vCenter server version: %s"), version_str) + LOG.info("Fetched vCenter server version: %s", version_str) return version_str def _validate_vcenter_version(self, vc_version): @@ -1869,10 +1867,10 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): vc_version, same_major=False): # TODO(vbala): enforce vCenter version 5.5 in Pike release. - LOG.warning(_LW('Running Cinder with a VMware vCenter version ' - 'less than %(ver)s is deprecated. The minimum ' - 'required version of vCenter server will be raised' - ' to %(ver)s in the 11.0.0 release.'), + LOG.warning('Running Cinder with a VMware vCenter version ' + 'less than %(ver)s is deprecated. The minimum ' + 'required version of vCenter server will be raised' + ' to %(ver)s in the 11.0.0 release.', {'ver': self.NEXT_MIN_SUPPORTED_VC_VERSION}) def do_setup(self, context): @@ -1890,8 +1888,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): same_major=False)): self.pbm_wsdl = pbm.get_pbm_wsdl_location(self._vc_version) if not self.pbm_wsdl: - LOG.error(_LE("Not able to configure PBM for vCenter server: " - "%s"), self._vc_version) + LOG.error("Not able to configure PBM for vCenter server: %s", + self._vc_version) raise exceptions.VMwareDriverException() self._storage_policy_enabled = True # Destroy current session so that it is recreated with pbm enabled @@ -1909,11 +1907,11 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): if cluster_names: self._clusters = self.volumeops.get_cluster_refs( cluster_names).values() - LOG.info(_LI("Using compute cluster(s): %s."), cluster_names) + LOG.info("Using compute cluster(s): %s.", cluster_names) - LOG.info(_LI("Successfully setup driver: %(driver)s for server: " - "%(ip)s."), {'driver': self.__class__.__name__, - 'ip': self.configuration.vmware_host_ip}) + LOG.info("Successfully setup driver: %(driver)s for server: " + "%(ip)s.", {'driver': self.__class__.__name__, + 'ip': self.configuration.vmware_host_ip}) def _get_volume_group_folder(self, datacenter, project_id): """Get inventory folder for organizing volume backings. @@ -2059,7 +2057,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): # the size of the source volume to the volume size. if volume['size'] > src_vsize: self._extend_backing(clone, volume['size']) - LOG.info(_LI("Successfully created clone: %s."), clone) + LOG.info("Successfully created clone: %s.", clone) def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. @@ -2073,17 +2071,17 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self._verify_volume_creation(volume) backing = self.volumeops.get_backing(snapshot['volume_name']) if not backing: - LOG.info(_LI("There is no backing for the snapshotted volume: " - "%(snap)s. Not creating any backing for the " - "volume: %(vol)s."), + LOG.info("There is no backing for the snapshotted volume: " + "%(snap)s. Not creating any backing for the " + "volume: %(vol)s.", {'snap': snapshot['name'], 'vol': volume['name']}) return snapshot_moref = self.volumeops.get_snapshot(backing, snapshot['name']) if not snapshot_moref: - LOG.info(_LI("There is no snapshot point for the snapshotted " - "volume: %(snap)s. Not creating any backing for " - "the volume: %(vol)s."), + LOG.info("There is no snapshot point for the snapshotted " + "volume: %(snap)s. Not creating any backing for " + "the volume: %(vol)s.", {'snap': snapshot['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) @@ -2110,8 +2108,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver): self._verify_volume_creation(volume) backing = self.volumeops.get_backing(src_vref['name']) if not backing: - LOG.info(_LI("There is no backing for the source volume: %(src)s. " - "Not creating any backing for volume: %(vol)s."), + LOG.info("There is no backing for the source volume: %(src)s. " + "Not creating any backing for volume: %(vol)s.", {'src': src_vref['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index c43c93b3661..f31e742c285 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -24,7 +24,7 @@ from oslo_vmware import exceptions from oslo_vmware import vim_util from six.moves import urllib -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions @@ -316,7 +316,7 @@ class VMwareVolumeOps(object): backing) LOG.debug("Initiated deletion of VM backing: %s.", backing) self._session.wait_for_task(task) - LOG.info(_LI("Deleted the VM backing: %s."), backing) + LOG.info("Deleted the VM backing: %s.", backing) # TODO(kartikaditya) Keep the methods not specific to volume in # a different file @@ -562,8 +562,8 @@ class VMwareVolumeOps(object): newCapacityKb=size_in_kb, eagerZero=eager_zero) self._session.wait_for_task(task) - LOG.info(_LI("Successfully extended virtual disk: %(path)s to " - "%(size)s GB."), + LOG.info("Successfully extended virtual disk: %(path)s to " + "%(size)s GB.", {'path': path, 'size': requested_size_in_gb}) def _create_controller_config_spec(self, adapter_type): @@ -741,7 +741,7 @@ class VMwareVolumeOps(object): pool=resource_pool, host=host) task_info = self._session.wait_for_task(task) backing = task_info.result - LOG.info(_LI("Successfully created volume backing: %s."), backing) + LOG.info("Successfully created volume backing: %s.", backing) return backing def create_backing(self, name, size_kb, disk_type, folder, resource_pool, @@ -907,8 +907,8 @@ class VMwareVolumeOps(object): backing, spec=relocate_spec) LOG.debug("Initiated relocation of volume backing: %s.", backing) self._session.wait_for_task(task) - LOG.info(_LI("Successfully relocated volume backing: %(backing)s " - "to datastore: %(ds)s and resource pool: %(rp)s."), + LOG.info("Successfully relocated volume backing: %(backing)s " + "to datastore: %(ds)s and resource pool: %(rp)s.", {'backing': backing, 'ds': datastore, 'rp': resource_pool}) def move_backing_to_folder(self, backing, folder): @@ -925,9 +925,9 @@ class VMwareVolumeOps(object): LOG.debug("Initiated move of volume backing: %(backing)s into the " "folder: %(fol)s.", {'backing': backing, 'fol': folder}) self._session.wait_for_task(task) - LOG.info(_LI("Successfully moved volume " - "backing: %(backing)s into the " - "folder: %(fol)s."), {'backing': backing, 'fol': folder}) + LOG.info("Successfully moved volume " + "backing: %(backing)s into the " + "folder: %(fol)s.", {'backing': backing, 'fol': folder}) def create_snapshot(self, backing, name, description, quiesce=False): """Create snapshot of the backing with given name and description. @@ -949,8 +949,8 @@ class VMwareVolumeOps(object): "named: %(name)s.", {'backing': backing, 'name': name}) task_info = self._session.wait_for_task(task) snapshot = task_info.result - LOG.info(_LI("Successfully created snapshot: %(snap)s for volume " - "backing: %(backing)s."), + LOG.info("Successfully created snapshot: %(snap)s for volume " + "backing: %(backing)s.", {'snap': snapshot, 'backing': backing}) return snapshot @@ -1011,8 +1011,8 @@ class VMwareVolumeOps(object): {'name': name, 'backing': backing}) snapshot = self.get_snapshot(backing, name) if not snapshot: - LOG.info(_LI("Did not find the snapshot: %(name)s for backing: " - "%(backing)s. Need not delete anything."), + LOG.info("Did not find the snapshot: %(name)s for backing: " + "%(backing)s. Need not delete anything.", {'name': name, 'backing': backing}) return task = self._session.invoke_api(self._session.vim, @@ -1022,8 +1022,8 @@ class VMwareVolumeOps(object): "%(backing)s.", {'name': name, 'backing': backing}) self._session.wait_for_task(task) - LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: " - "%(backing)s."), {'backing': backing, 'name': name}) + LOG.info("Successfully deleted snapshot: %(name)s of backing: " + "%(backing)s.", {'backing': backing, 'name': name}) def _get_folder(self, backing): """Get parent folder of the backing. @@ -1121,7 +1121,7 @@ class VMwareVolumeOps(object): LOG.debug("Initiated clone of backing: %s.", name) task_info = self._session.wait_for_task(task) new_backing = task_info.result - LOG.info(_LI("Successfully created clone: %s."), new_backing) + LOG.info("Successfully created clone: %s.", new_backing) return new_backing def _reconfigure_backing(self, backing, reconfig_spec): @@ -1194,7 +1194,7 @@ class VMwareVolumeOps(object): :param backing: VM to be renamed :param new_name: new VM name """ - LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."), + LOG.info("Renaming backing VM: %(backing)s to %(new_name)s.", {'backing': backing, 'new_name': new_name}) rename_task = self._session.invoke_api(self._session.vim, @@ -1203,7 +1203,7 @@ class VMwareVolumeOps(object): newName=new_name) LOG.debug("Task: %s created for renaming VM.", rename_task) self._session.wait_for_task(rename_task) - LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."), + LOG.info("Backing VM: %(backing)s renamed to %(new_name)s.", {'backing': backing, 'new_name': new_name}) @@ -1298,7 +1298,7 @@ class VMwareVolumeOps(object): datacenter=datacenter) LOG.debug("Initiated deletion via task: %s.", task) self._session.wait_for_task(task) - LOG.info(_LI("Successfully deleted file: %s."), file_path) + LOG.info("Successfully deleted file: %s.", file_path) def create_datastore_folder(self, ds_name, folder_path, datacenter): """Creates a datastore folder. @@ -1318,7 +1318,7 @@ class VMwareVolumeOps(object): fileManager, name=ds_folder_path, datacenter=datacenter) - LOG.info(_LI("Created datastore folder: %s."), folder_path) + LOG.info("Created datastore folder: %s.", folder_path) except exceptions.FileAlreadyExistsException: LOG.debug("Datastore folder: %s already exists.", folder_path) @@ -1354,8 +1354,7 @@ class VMwareVolumeOps(object): if device.__class__.__name__ == "VirtualDisk": return device - LOG.error(_LE("Virtual disk device of " - "backing: %s not found."), backing) + LOG.error("Virtual disk device of backing: %s not found.", backing) raise vmdk_exceptions.VirtualDiskNotFoundException() def get_vmdk_path(self, backing): @@ -1474,7 +1473,7 @@ class VMwareVolumeOps(object): LOG.debug("Initiated copying disk data via task: %s.", task) self._session.wait_for_task(task) - LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."), + LOG.info("Successfully copied disk at: %(src)s to: %(dest)s.", {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) def move_vmdk_file(self, src_dc_ref, src_vmdk_file_path, @@ -1518,7 +1517,7 @@ class VMwareVolumeOps(object): datacenter=dc_ref) LOG.debug("Initiated deleting vmdk file via task: %s.", task) self._session.wait_for_task(task) - LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path) + LOG.info("Deleted vmdk file: %s.", vmdk_file_path) def _get_all_clusters(self): clusters = {} @@ -1544,7 +1543,7 @@ class VMwareVolumeOps(object): clusters = self._get_all_clusters() for name in names: if name not in clusters: - LOG.error(_LE("Compute cluster: %s not found."), name) + LOG.error("Compute cluster: %s not found.", name) raise vmdk_exceptions.ClusterNotFoundException(cluster=name) clusters_ref[name] = clusters[name] diff --git a/cinder/volume/drivers/vzstorage.py b/cinder/volume/drivers/vzstorage.py index 2d177e4bd3b..abfa2c32bdf 100644 --- a/cinder/volume/drivers/vzstorage.py +++ b/cinder/volume/drivers/vzstorage.py @@ -27,7 +27,7 @@ from oslo_utils import imageutils from oslo_utils import units from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils @@ -403,7 +403,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): @remotefs_drv.locked_volume_id_operation def extend_volume(self, volume, size_gb): - LOG.info(_LI('Extending volume %s.'), volume.id) + LOG.info('Extending volume %s.', volume.id) volume_format = self.get_volume_format(volume) self._extend_volume(volume, size_gb, volume_format) @@ -411,7 +411,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): volume_path = self.local_path(volume) self._check_extend_volume_support(volume, size_gb) - LOG.info(_LI('Resizing file to %sG...'), size_gb) + LOG.info('Resizing file to %sG...', size_gb) self._do_extend_volume(volume_path, size_gb, volume_format) @@ -540,8 +540,8 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): self._delete(mounted_path) self._delete(mounted_path + ".qemu_img_info") else: - LOG.info(_LI("Skipping deletion of volume %s " - "as it does not exist."), mounted_path) + LOG.info("Skipping deletion of volume %s " + "as it does not exist.", mounted_path) info_path = self._local_path_volume_info(volume) self._delete(info_path) @@ -602,7 +602,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path, empty_if_missing=True) if snapshot.id not in snap_info: - LOG.warning(_LW("Snapshot %s doesn't exist in snap_info"), + LOG.warning("Snapshot %s doesn't exist in snap_info", snapshot.id) return @@ -679,7 +679,7 @@ class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): image_meta) def _create_cloned_volume(self, volume, src_vref): - LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'), + LOG.info('Cloning volume %(src)s to volume %(dst)s', {'src': src_vref.id, 'dst': volume.id}) diff --git a/cinder/volume/drivers/windows/smbfs.py b/cinder/volume/drivers/windows/smbfs.py index 21cb9910a81..2104f539fd0 100644 --- a/cinder/volume/drivers/windows/smbfs.py +++ b/cinder/volume/drivers/windows/smbfs.py @@ -27,7 +27,7 @@ from oslo_utils import fileutils from oslo_utils import units from cinder import exception -from cinder.i18n import _, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.volume.drivers import remotefs as remotefs_drv @@ -415,8 +415,8 @@ class WindowsSmbfsDriver(remotefs_drv.RemoteFSSnapDriver): def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: - LOG.warning(_LW('Volume %s does not have provider_location ' - 'specified, skipping.'), volume.name) + LOG.warning('Volume %s does not have provider_location ' + 'specified, skipping.', volume.name) return self._ensure_share_mounted(volume.provider_location) @@ -444,8 +444,8 @@ class WindowsSmbfsDriver(remotefs_drv.RemoteFSSnapDriver): smbfs_share) total_allocated = self._get_total_allocated(smbfs_share) return_value = [total_size, total_available, total_allocated] - LOG.info(_LI('Smb share %(share)s Total size %(size)s ' - 'Total allocated %(allocated)s'), + LOG.info('Smb share %(share)s Total size %(size)s ' + 'Total allocated %(allocated)s', {'share': smbfs_share, 'size': total_size, 'allocated': total_allocated}) return [float(x) for x in return_value] @@ -500,7 +500,7 @@ class WindowsSmbfsDriver(remotefs_drv.RemoteFSSnapDriver): @remotefs_drv.locked_volume_id_operation @update_allocation_data() def extend_volume(self, volume, size_gb): - LOG.info(_LI('Extending volume %s.'), volume.id) + LOG.info('Extending volume %s.', volume.id) self._check_extend_volume_support(volume, size_gb) self._extend_volume(volume, size_gb) @@ -508,7 +508,7 @@ class WindowsSmbfsDriver(remotefs_drv.RemoteFSSnapDriver): def _extend_volume(self, volume, size_gb): volume_path = self.local_path(volume) - LOG.info(_LI('Resizing file %(volume_path)s to %(size_gb)sGB.'), + LOG.info('Resizing file %(volume_path)s to %(size_gb)sGB.', dict(volume_path=volume_path, size_gb=size_gb)) self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi, diff --git a/cinder/volume/drivers/xio.py b/cinder/volume/drivers/xio.py index a6720dd3daf..771b36ae1be 100644 --- a/cinder/volume/drivers/xio.py +++ b/cinder/volume/drivers/xio.py @@ -22,7 +22,6 @@ from six.moves import urllib from cinder import context from cinder import exception -from cinder.i18n import _LE, _LI, _LW from cinder import interface from cinder.volume import driver from cinder.volume.drivers.san import san @@ -95,15 +94,15 @@ class XIOISEDriver(driver.VolumeDriver): LOG.debug("XIOISEDriver check_for_setup_error called.") # The san_ip must always be set if self.configuration.san_ip == "": - LOG.error(_LE("san ip must be configured!")) + LOG.error("san ip must be configured!") RaiseXIODriverException() # The san_login must always be set if self.configuration.san_login == "": - LOG.error(_LE("san_login must be configured!")) + LOG.error("san_login must be configured!") RaiseXIODriverException() # The san_password must always be set if self.configuration.san_password == "": - LOG.error(_LE("san_password must be configured!")) + LOG.error("san_password must be configured!") RaiseXIODriverException() return @@ -120,7 +119,7 @@ class XIOISEDriver(driver.VolumeDriver): if status != 200: # unsuccessful - this is fatal as we need the global id # to build REST requests. - LOG.error(_LE("Array query failed - No response (%d)!"), status) + LOG.error("Array query failed - No response (%d)!", status) RaiseXIODriverException() # Successfully fetched QUERY info. Parse out globalid along with # ipaddress for Controller 1 and Controller 2. We assign primary @@ -135,7 +134,7 @@ class XIOISEDriver(driver.VolumeDriver): self.configuration.ise_qos = False capabilities = xml_tree.find('capabilities') if capabilities is None: - LOG.error(_LE("Array query failed. No capabilities in response!")) + LOG.error("Array query failed. No capabilities in response!") RaiseXIODriverException() for node in capabilities: if node.tag != 'capability': @@ -153,19 +152,19 @@ class XIOISEDriver(driver.VolumeDriver): support['thin-clones'] = True # Make sure ISE support necessary features if not support['clones']: - LOG.error(_LE("ISE FW version is not compatible with OpenStack!")) + LOG.error("ISE FW version is not compatible with OpenStack!") RaiseXIODriverException() # set up thin provisioning support self.configuration.san_thin_provision = support['thin-clones'] # Fill in global id, primary and secondary ip addresses globalid = xml_tree.find('globalid') if globalid is None: - LOG.error(_LE("Array query failed. No global id in XML response!")) + LOG.error("Array query failed. No global id in XML response!") RaiseXIODriverException() self.ise_globalid = globalid.text controllers = xml_tree.find('controllers') if controllers is None: - LOG.error(_LE("Array query failed. No controllers in response!")) + LOG.error("Array query failed. No controllers in response!") RaiseXIODriverException() for node in controllers: if node.tag != 'controller': @@ -204,7 +203,7 @@ class XIOISEDriver(driver.VolumeDriver): # this call will populate globalid self._send_query() if self.ise_globalid is None: - LOG.error(_LE("ISE globalid not set!")) + LOG.error("ISE globalid not set!") RaiseXIODriverException() return self.ise_globalid @@ -215,7 +214,7 @@ class XIOISEDriver(driver.VolumeDriver): self.ise_primary_ip = self.configuration.san_ip if self.ise_primary_ip == '': # No IP - fatal. - LOG.error(_LE("Primary IP must be set!")) + LOG.error("Primary IP must be set!") RaiseXIODriverException() return self.ise_primary_ip @@ -412,7 +411,7 @@ class XIOISEDriver(driver.VolumeDriver): if secondary_ip is '': # if secondary is not setup yet, then assert # connection on primary and secondary ip failed - LOG.error(_LE("Connection to %s failed and no secondary!"), + LOG.error("Connection to %s failed and no secondary!", primary_ip) RaiseXIODriverException() # swap primary for secondary ip in URL @@ -423,7 +422,7 @@ class XIOISEDriver(driver.VolumeDriver): # connection failed on both IPs - break out of the loop break # connection on primary and secondary ip failed - LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"), + LOG.error("Could not connect to %(primary)s or %(secondary)s!", {'primary': primary_ip, 'secondary': secondary_ip}) RaiseXIODriverException() @@ -464,7 +463,7 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('GET', url) status = resp['status'] if status != 200: - LOG.warning(_LW("IOnetworks GET failed (%d)"), status) + LOG.warning("IOnetworks GET failed (%d)", status) return chap # Got a good response. Parse out CHAP info. First check if CHAP is # enabled and if so parse out username and password. @@ -494,7 +493,7 @@ class XIOISEDriver(driver.VolumeDriver): status = resp['status'] if status != 200: # Not good. Throw an exception. - LOG.error(_LE("Controller GET failed (%d)"), status) + LOG.error("Controller GET failed (%d)", status) RaiseXIODriverException() # Good response. Parse out IQN that matches iscsi_ip_address # passed in from cinder.conf. IQN is 'hidden' in globalid field. @@ -519,7 +518,7 @@ class XIOISEDriver(driver.VolumeDriver): if target_iqn != '': return target_iqn # Did not find a matching IQN. Upsetting. - LOG.error(_LE("Failed to get IQN!")) + LOG.error("Failed to get IQN!") RaiseXIODriverException() def find_target_wwns(self): @@ -532,7 +531,7 @@ class XIOISEDriver(driver.VolumeDriver): status = resp['status'] if status != 200: # Not good. Throw an exception. - LOG.error(_LE("Controller GET failed (%d)"), status) + LOG.error("Controller GET failed (%d)", status) RaiseXIODriverException() # Good response. Parse out globalid (WWN) of endpoint that matches # protocol and type (array). @@ -559,7 +558,7 @@ class XIOISEDriver(driver.VolumeDriver): status = resp['status'] if status != 200: # Not good. Throw an exception. - LOG.error(_LE("Failed to get allocation information (%d)!"), + LOG.error("Failed to get allocation information (%d)!", status) RaiseXIODriverException() # Good response. Parse out LUN. @@ -570,7 +569,7 @@ class XIOISEDriver(driver.VolumeDriver): if luntag is not None: return luntag.text # Did not find LUN. Throw an exception. - LOG.error(_LE("Failed to get LUN information!")) + LOG.error("Failed to get LUN information!") RaiseXIODriverException() def _get_volume_info(self, vol_name): @@ -589,21 +588,21 @@ class XIOISEDriver(driver.VolumeDriver): url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) resp = self._send_cmd('GET', url, {'name': vol_name}) if resp['status'] != 200: - LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."), + LOG.warning("Could not get status for %(name)s (%(status)d).", {'name': vol_name, 'status': resp['status']}) return vol_info # Good response. Parse down to Volume tag in list of one. root = etree.fromstring(resp['content']) volume_node = root.find('volume') if volume_node is None: - LOG.warning(_LW("No volume node in XML content.")) + LOG.warning("No volume node in XML content.") return vol_info # Location can be found as an attribute in the volume node tag. vol_info['location'] = volume_node.attrib['self'] # Find status tag status = volume_node.find('status') if status is None: - LOG.warning(_LW("No status payload for volume %s."), vol_name) + LOG.warning("No status payload for volume %s.", vol_name) return vol_info # Fill in value and string from status tag attributes. vol_info['value'] = status.attrib['value'] @@ -628,7 +627,7 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('GET', url, {'name': volume['name'], 'hostname': hostname}) if resp['status'] != 200: - LOG.error(_LE("Could not GET allocation information (%d)!"), + LOG.error("Could not GET allocation information (%d)!", resp['status']) RaiseXIODriverException() # Good response. Find the allocation based on volume name. @@ -688,12 +687,12 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('POST', url, params) status = resp['status'] if status == 201: - LOG.info(_LI("Volume %s presented."), volume['name']) + LOG.info("Volume %s presented.", volume['name']) elif status == 409: - LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"), + LOG.warning("Volume %(name)s already presented (%(status)d)!", {'name': volume['name'], 'status': status}) else: - LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"), + LOG.error("Failed to present volume %(name)s (%(status)d)!", {'name': volume['name'], 'status': status}) RaiseXIODriverException() # Fetch LUN. In theory the LUN should be what caller requested. @@ -719,8 +718,8 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('GET', url, {'hostname': hostname}) status = resp['status'] if status != 200: - LOG.error(_LE("Failed to get allocation information: " - "%(host)s (%(status)d)!"), + LOG.error("Failed to get allocation information: " + "%(host)s (%(status)d)!", {'host': hostname, 'status': status}) RaiseXIODriverException() # Good response. Count the number of allocations. @@ -753,7 +752,7 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('GET', url, params) status = resp['status'] if resp['status'] != 200: - LOG.error(_LE("Could not find any hosts (%s)"), status) + LOG.error("Could not find any hosts (%s)", status) RaiseXIODriverException() # Good response. Try to match up a host based on end point string. host_tree = etree.fromstring(resp['content']) @@ -810,7 +809,7 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('POST', url, params) status = resp['status'] if status != 201 and status != 409: - LOG.error(_LE("POST for host create failed (%s)!"), status) + LOG.error("POST for host create failed (%s)!", status) RaiseXIODriverException() # Successfully created host entry. Return host name. return hostname @@ -837,7 +836,7 @@ class XIOISEDriver(driver.VolumeDriver): if vol_info['value'] == '0': LOG.debug('Source volume %s ready.', volume_name) else: - LOG.error(_LE("Source volume %s not ready!"), volume_name) + LOG.error("Source volume %s not ready!", volume_name) RaiseXIODriverException() # Prepare snapshot # get extra_specs and qos specs from source volume @@ -863,7 +862,7 @@ class XIOISEDriver(driver.VolumeDriver): args, retries) if resp['status'] != 202: # clone prepare failed - bummer - LOG.error(_LE("Prepare clone failed for %s."), clone['name']) + LOG.error("Prepare clone failed for %s.", clone['name']) RaiseXIODriverException() # clone prepare request accepted # make sure not to continue until clone prepared @@ -875,13 +874,13 @@ class XIOISEDriver(driver.VolumeDriver): if PREPARED_STATUS in clone_info['details']: LOG.debug('Clone %s prepared.', clone['name']) else: - LOG.error(_LE("Clone %s not in prepared state!"), clone['name']) + LOG.error("Clone %s not in prepared state!", clone['name']) RaiseXIODriverException() # Clone prepared, now commit the create resp = self._send_cmd('PUT', clone_info['location'], {clone_type: 'true'}) if resp['status'] != 201: - LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"), + LOG.error("Commit clone failed: %(name)s (%(status)d)!", {'name': clone['name'], 'status': resp['status']}) RaiseXIODriverException() # Clone create request accepted. Make sure not to return until clone @@ -892,9 +891,9 @@ class XIOISEDriver(driver.VolumeDriver): clone_info = self._wait_for_completion(self._help_wait_for_status, args, retries) if OPERATIONAL_STATUS in clone_info['string']: - LOG.info(_LI("Clone %s created."), clone['name']) + LOG.info("Clone %s created.", clone['name']) else: - LOG.error(_LE("Commit failed for %s!"), clone['name']) + LOG.error("Commit failed for %s!", clone['name']) RaiseXIODriverException() return @@ -958,7 +957,7 @@ class XIOISEDriver(driver.VolumeDriver): status = resp['status'] if status != 200: # Request failed. Return what we have, which isn't much. - LOG.warning(_LW("Could not get pool information (%s)!"), status) + LOG.warning("Could not get pool information (%s)!", status) return (pools, vol_cnt) # Parse out available (free) and used. Add them up to get total. xml_tree = etree.fromstring(resp['content']) @@ -1147,7 +1146,7 @@ class XIOISEDriver(driver.VolumeDriver): 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if resp['status'] != 201: - LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"), + LOG.error("Failed to create volume: %(name)s (%(status)s)", {'name': volume['name'], 'status': resp['status']}) RaiseXIODriverException() # Good response. Make sure volume is in operational state before @@ -1160,9 +1159,9 @@ class XIOISEDriver(driver.VolumeDriver): args, retries) if OPERATIONAL_STATUS in vol_info['string']: # Ready. - LOG.info(_LI("Volume %s created"), volume['name']) + LOG.info("Volume %s created", volume['name']) else: - LOG.error(_LE("Failed to create volume %s."), volume['name']) + LOG.error("Failed to create volume %s.", volume['name']) RaiseXIODriverException() return @@ -1193,7 +1192,7 @@ class XIOISEDriver(driver.VolumeDriver): # in response. Used for DELETE call below. vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': - LOG.warning(_LW("%s not found!"), volume['name']) + LOG.warning("%s not found!", volume['name']) return # Make DELETE call. args = {} @@ -1204,7 +1203,7 @@ class XIOISEDriver(driver.VolumeDriver): retries = self.configuration.ise_completion_retries resp = self._wait_for_completion(self._help_call_method, args, retries) if resp['status'] != 204: - LOG.warning(_LW("DELETE call failed for %s!"), volume['name']) + LOG.warning("DELETE call failed for %s!", volume['name']) return # DELETE call successful, now wait for completion. # We do that by waiting for the REST call to return Volume Not Found. @@ -1217,11 +1216,11 @@ class XIOISEDriver(driver.VolumeDriver): args, retries) if NOTFOUND_STATUS in vol_info['string']: # Volume no longer present on the backend. - LOG.info(_LI("Successfully deleted %s."), volume['name']) + LOG.info("Successfully deleted %s.", volume['name']) return # If we come here it means the volume is still present # on the backend. - LOG.error(_LE("Timed out deleting %s!"), volume['name']) + LOG.error("Timed out deleting %s!", volume['name']) return def delete_volume(self, volume): @@ -1240,7 +1239,7 @@ class XIOISEDriver(driver.VolumeDriver): # in response. Used for PUT call below. vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': - LOG.error(_LE("modify volume: %s does not exist!"), volume['name']) + LOG.error("modify volume: %s does not exist!", volume['name']) RaiseXIODriverException() # Make modify volume REST call using PUT. # Location from above is used as identifier. @@ -1249,7 +1248,7 @@ class XIOISEDriver(driver.VolumeDriver): if status == 201: LOG.debug("Volume %s modified.", volume['name']) return True - LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."), + LOG.error("Modify volume PUT failed: %(name)s (%(status)d).", {'name': volume['name'], 'status': status}) RaiseXIODriverException() @@ -1258,7 +1257,7 @@ class XIOISEDriver(driver.VolumeDriver): LOG.debug("extend_volume called") ret = self._modify_volume(volume, {'size': new_size}) if ret is True: - LOG.info(_LI("volume %(name)s extended to %(size)d."), + LOG.info("volume %(name)s extended to %(size)d.", {'name': volume['name'], 'size': new_size}) return @@ -1270,14 +1269,14 @@ class XIOISEDriver(driver.VolumeDriver): 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if ret is True: - LOG.info(_LI("Volume %s retyped."), volume['name']) + LOG.info("Volume %s retyped.", volume['name']) return True def manage_existing(self, volume, ise_volume_ref): """Convert an existing ISE volume to a Cinder volume.""" LOG.debug("X-IO manage_existing called") if 'source-name' not in ise_volume_ref: - LOG.error(_LE("manage_existing: No source-name in ref!")) + LOG.error("manage_existing: No source-name in ref!") RaiseXIODriverException() # copy the source-name to 'name' for modify volume use ise_volume_ref['name'] = ise_volume_ref['source-name'] @@ -1289,20 +1288,20 @@ class XIOISEDriver(driver.VolumeDriver): 'IOPSmax': qos['maxIOPS'], 'IOPSburst': qos['burstIOPS']}) if ret is True: - LOG.info(_LI("Volume %s converted."), ise_volume_ref['name']) + LOG.info("Volume %s converted.", ise_volume_ref['name']) return ret def manage_existing_get_size(self, volume, ise_volume_ref): """Get size of an existing ISE volume.""" LOG.debug("X-IO manage_existing_get_size called") if 'source-name' not in ise_volume_ref: - LOG.error(_LE("manage_existing_get_size: No source-name in ref!")) + LOG.error("manage_existing_get_size: No source-name in ref!") RaiseXIODriverException() ref_name = ise_volume_ref['source-name'] # get volume status including size vol_info = self._get_volume_info(ref_name) if vol_info['location'] == '': - LOG.error(_LE("manage_existing_get_size: %s does not exist!"), + LOG.error("manage_existing_get_size: %s does not exist!", ref_name) RaiseXIODriverException() return int(vol_info['size']) @@ -1312,7 +1311,7 @@ class XIOISEDriver(driver.VolumeDriver): LOG.debug("X-IO unmanage called") vol_info = self._get_volume_info(volume['name']) if vol_info['location'] == '': - LOG.error(_LE("unmanage: Volume %s does not exist!"), + LOG.error("unmanage: Volume %s does not exist!", volume['name']) RaiseXIODriverException() # This is a noop. ISE does not store any Cinder specific information. @@ -1331,7 +1330,7 @@ class XIOISEDriver(driver.VolumeDriver): host = self._find_host(endpoints) if host['name'] == '': # host still not found, this is fatal. - LOG.error(_LE("Host could not be found!")) + LOG.error("Host could not be found!") RaiseXIODriverException() elif host['type'].upper() != 'OPENSTACK': # Make sure host type is marked as OpenStack host @@ -1339,7 +1338,7 @@ class XIOISEDriver(driver.VolumeDriver): resp = self._send_cmd('PUT', host['locator'], params) status = resp['status'] if status != 201 and status != 409: - LOG.error(_LE("Host PUT failed (%s)."), status) + LOG.error("Host PUT failed (%s).", status) RaiseXIODriverException() # We have a host object. target_lun = '' @@ -1401,7 +1400,7 @@ class XIOISEISCSIDriver(driver.ISCSIDriver): # The iscsi_ip_address must always be set. if self.configuration.iscsi_ip_address == '': - LOG.error(_LE("iscsi_ip_address must be set!")) + LOG.error("iscsi_ip_address must be set!") RaiseXIODriverException() # Setup common driver self.driver = XIOISEDriver(configuration=self.configuration) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py index 97603baa7cc..148c2f0c235 100644 --- a/cinder/volume/drivers/zadara.py +++ b/cinder/volume/drivers/zadara.py @@ -25,7 +25,7 @@ import requests import six from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import interface from cinder.volume import driver @@ -436,8 +436,8 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: - LOG.warning(_LW('Volume %s could not be found. ' - 'It might be already deleted'), name) + LOG.warning('Volume %s could not be found. ' + 'It might be already deleted', name) return # Check attachment info and detach from all @@ -485,16 +485,16 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): cg_name = self._get_volume_cg_name(volume_name) if not cg_name: # If the volume isn't present, then don't attempt to delete - LOG.warning(_LW('snapshot: original volume %s not found, ' - 'skipping delete operation'), + LOG.warning('snapshot: original volume %s not found, ' + 'skipping delete operation', volume_name) return snap_id = self._get_snap_id(cg_name, snapshot['name']) if not snap_id: # If the snapshot isn't present, then don't attempt to delete - LOG.warning(_LW('snapshot: snapshot %s not found, ' - 'skipping delete operation'), snapshot['name']) + LOG.warning('snapshot: snapshot %s not found, ' + 'skipping delete operation', snapshot['name']) return self.vpsa.send_cmd('delete_snapshot', @@ -510,12 +510,12 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): % snapshot['volume_name']) cg_name = self._get_volume_cg_name(volume_name) if not cg_name: - LOG.error(_LE('Volume %(name)s not found'), {'name': volume_name}) + LOG.error('Volume %(name)s not found', {'name': volume_name}) raise exception.VolumeNotFound(volume_id=volume['id']) snap_id = self._get_snap_id(cg_name, snapshot['name']) if not snap_id: - LOG.error(_LE('Snapshot %(name)s not found'), + LOG.error('Snapshot %(name)s not found', {'name': snapshot['name']}) raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) @@ -538,7 +538,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): % src_vref['name']) cg_name = self._get_volume_cg_name(volume_name) if not cg_name: - LOG.error(_LE('Volume %(name)s not found'), {'name': volume_name}) + LOG.error('Volume %(name)s not found', {'name': volume_name}) raise exception.VolumeNotFound(volume_id=volume['id']) self.vpsa.send_cmd('create_clone', diff --git a/cinder/volume/drivers/zfssa/restclient.py b/cinder/volume/drivers/zfssa/restclient.py index 588f4569c6f..50aeeddcfcb 100644 --- a/cinder/volume/drivers/zfssa/restclient.py +++ b/cinder/volume/drivers/zfssa/restclient.py @@ -24,8 +24,6 @@ import six from six.moves import http_client from six.moves import urllib -from cinder.i18n import _LE, _LI - LOG = log.getLogger(__name__) @@ -179,7 +177,7 @@ class RestClientURL(object): self.headers['x-auth-session'] = \ result.get_header('x-auth-session') self.do_logout = True - LOG.info(_LI('ZFSSA version: %s'), + LOG.info('ZFSSA version: %s', result.get_header('x-zfssa-version')) elif result.status == http_client.NOT_FOUND: @@ -299,20 +297,20 @@ class RestClientURL(object): if err.code == http_client.NOT_FOUND: LOG.debug('REST Not Found: %s', err.code) else: - LOG.error(_LE('REST Not Available: %s'), err.code) + LOG.error('REST Not Available: %s', err.code) if err.code == http_client.SERVICE_UNAVAILABLE and \ retry < maxreqretries: retry += 1 time.sleep(1) - LOG.error(_LE('Server Busy retry request: %s'), retry) + LOG.error('Server Busy retry request: %s', retry) continue if (err.code == http_client.UNAUTHORIZED or err.code == http_client.INTERNAL_SERVER_ERROR) and \ '/access/v1' not in zfssaurl: try: - LOG.error(_LE('Authorizing request: %(zfssaurl)s ' - 'retry: %(retry)d .'), + LOG.error('Authorizing request: %(zfssaurl)s ' + 'retry: %(retry)d.', {'zfssaurl': zfssaurl, 'retry': retry}) self._authorize() req.add_header('x-auth-session', @@ -326,7 +324,7 @@ class RestClientURL(object): return RestResult(err=err) except urllib.error.URLError as err: - LOG.error(_LE('URLError: %s'), err.reason) + LOG.error('URLError: %s', err.reason) raise RestClientError(-1, name="ERR_URLError", message=err.reason) diff --git a/cinder/volume/drivers/zfssa/webdavclient.py b/cinder/volume/drivers/zfssa/webdavclient.py index 763050f5329..9b04960f3ba 100644 --- a/cinder/volume/drivers/zfssa/webdavclient.py +++ b/cinder/volume/drivers/zfssa/webdavclient.py @@ -22,7 +22,7 @@ from six.moves import http_client from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ LOG = log.getLogger(__name__) @@ -112,14 +112,14 @@ class ZFSSAWebDAVClient(object): response = urllib.request.urlopen( # nosec request, timeout=None) except urllib.error.HTTPError as err: - LOG.error(_LE('WebDAV returned with %(code)s error during ' - '%(method)s call.'), + LOG.error('WebDAV returned with %(code)s error during ' + '%(method)s call.', {'code': err.code, 'method': method}) if err.code == http_client.INTERNAL_SERVER_ERROR: - LOG.error(_LE('WebDAV operation failed with error code: ' - '%(code)s reason: %(reason)s Retry attempt ' - '%(retry)s in progress.'), + LOG.error('WebDAV operation failed with error code: ' + '%(code)s reason: %(reason)s Retry attempt ' + '%(retry)s in progress.', {'code': err.code, 'reason': err.reason, 'retry': retry}) diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py index c76cb9227c8..43fabc34195 100644 --- a/cinder/volume/drivers/zfssa/zfssaiscsi.py +++ b/cinder/volume/drivers/zfssa/zfssaiscsi.py @@ -25,10 +25,10 @@ from oslo_utils import units import six from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface +from cinder import utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.zfssa import zfssarest @@ -145,7 +145,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): Project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration - LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) + LOG.info('Connecting to host: %s.', lcfg.san_ip) self.zfssa = factory_zfssa() self.tgt_zfssa = factory_zfssa() self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) @@ -193,13 +193,13 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): zfssa_initiator['iqn'], zfssa_initiator_group) else: - LOG.warning(_LW('zfssa_initiator_config not found. ' - 'Using deprecated configuration options.')) + LOG.warning('zfssa_initiator_config not found. ' + 'Using deprecated configuration options.') if (not lcfg.zfssa_initiator and (not lcfg.zfssa_initiator_group and lcfg.zfssa_initiator_group != 'default')): - LOG.error(_LE('zfssa_initiator cannot be empty when ' - 'creating a zfssa_initiator_group.')) + LOG.error('zfssa_initiator cannot be empty when ' + 'creating a zfssa_initiator_group.') raise exception.InvalidConfigurationValue( value='', option='zfssa_initiator') @@ -207,9 +207,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): if (lcfg.zfssa_initiator != '' and (lcfg.zfssa_initiator_group == '' or lcfg.zfssa_initiator_group == 'default')): - LOG.warning(_LW('zfssa_initiator: %(ini)s' - ' wont be used on ' - 'zfssa_initiator_group= %(inigrp)s.'), + LOG.warning('zfssa_initiator: %(ini)s wont be used on ' + 'zfssa_initiator_group= %(inigrp)s.', {'ini': lcfg.zfssa_initiator, 'inigrp': lcfg.zfssa_initiator_group}) @@ -339,9 +338,9 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): # if for some reason the volume no longer exists # on the backend if 'Error Getting Volume' in ex.message: - LOG.error(_LE("Volume ID %s was not found on " - "the zfssa device while attempting " - "delete_volume operation."), volume['id']) + LOG.error("Volume ID %s was not found on " + "the zfssa device while attempting " + "delete_volume operation.", volume['id']) return # Delete clone temp snapshot. see create_cloned_volume() @@ -390,7 +389,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): snapshot['volume_name'], snapshot['name']) if numclones > 0: - LOG.error(_LE('Snapshot %s: has clones'), snapshot['name']) + LOG.error('Snapshot %s: has clones', snapshot['name']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) self.zfssa.delete_snapshot(lcfg.zfssa_pool, @@ -502,9 +501,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): try: self.create_volume_from_snapshot(volume, zfssa_snapshot) except exception.VolumeBackendAPIException: - LOG.error(_LE('Clone Volume: ' - '%(volume)s failed from source volume: ' - '%(src_vref)s'), + LOG.error('Clone Volume: %(volume)s failed from source volume: ' + '%(src_vref)s', {'volume': volume['name'], 'src_vref': src_vref['name']}) # Cleanup snapshot @@ -549,8 +547,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi)) if cachevol_size > volume['size']: - exception_msg = (_LE('Image size %(img_size)dGB is larger ' - 'than volume size %(vol_size)dGB.'), + exception_msg = ('Image size %(img_size)dGB is larger ' + 'than volume size %(vol_size)dGB.', {'img_size': cachevol_size, 'vol_size': volume['size']}) LOG.error(exception_msg) @@ -576,8 +574,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): if cachevol_size < volume['size']: self.extend_volume(volume, volume['size']) except exception.VolumeBackendAPIException as exc: - exception_msg = (_LE('Cannot clone image %(image)s to ' - 'volume %(volume)s. Error: %(error)s.'), + exception_msg = ('Cannot clone image %(image)s to ' + 'volume %(volume)s. Error: %(error)s.', {'volume': volume['name'], 'image': image_meta['id'], 'error': exc.msg}) @@ -853,25 +851,24 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): (tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup, tgt_repl_ip) = loc_info.split(':') except ValueError: - LOG.error(_LE("Location info needed for backend enabled volume " - "migration not in correct format: %s. Continuing " - "with generic volume migration."), loc_info) + LOG.error("Location info needed for backend enabled volume " + "migration not in correct format: %s. Continuing " + "with generic volume migration.", loc_info) return default_ret if tgt_repl_ip == '': - msg = _LE("zfssa_replication_ip not set in cinder.conf. " + LOG.error("zfssa_replication_ip not set in cinder.conf. " "zfssa_replication_ip is needed for backend enabled " "volume migration. Continuing with generic volume " "migration.") - LOG.error(msg) return default_ret src_pool = lcfg.zfssa_pool src_project = lcfg.zfssa_project try: - LOG.info(_LI('Connecting to target host: %s for backend enabled ' - 'migration.'), tgt_host) + LOG.info('Connecting to target host: %s for backend enabled ' + 'migration.', tgt_host) self.tgt_zfssa.set_host(tgt_host) self.tgt_zfssa.login(auth_str) @@ -945,7 +942,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): return(True, None) except Exception: - LOG.error(_LE("Error migrating volume: %s"), volume['name']) + LOG.error("Error migrating volume: %s", volume['name']) raise def update_migrated_volume(self, ctxt, volume, new_volume, @@ -1017,7 +1014,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): lcfg.zfssa_cache_project, cache['share']) except exception.VolumeBackendAPIException: - LOG.warning(_LW("Volume %s exists but can't be deleted."), + LOG.warning("Volume %s exists but can't be deleted.", cache['share']) def manage_existing(self, volume, existing_ref): @@ -1041,8 +1038,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): schema={"custom:cinder_managed": True}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to rename volume %(existing)s to " - "%(new)s. Volume manage failed."), + LOG.error("Failed to rename volume %(existing)s to " + "%(new)s. Volume manage failed.", {'existing': existing_vol['name'], 'new': new_vol_name}) return None @@ -1069,8 +1066,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDriver): schema={"custom:cinder_managed": False}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to rename volume %(existing)s to " - "%(new)s. Volume unmanage failed."), + LOG.error("Failed to rename volume %(existing)s to " + "%(new)s. Volume unmanage failed.", {'existing': volume['name'], 'new': new_name}) return None diff --git a/cinder/volume/drivers/zfssa/zfssanfs.py b/cinder/volume/drivers/zfssa/zfssanfs.py index c047cde8509..f8d17bf5d0f 100644 --- a/cinder/volume/drivers/zfssa/zfssanfs.py +++ b/cinder/volume/drivers/zfssa/zfssanfs.py @@ -27,11 +27,11 @@ from oslo_utils import units import six from cinder import exception -from cinder import utils -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects.volume import Volume +from cinder import utils from cinder.volume.drivers import nfs from cinder.volume.drivers.san import san from cinder.volume.drivers.zfssa import zfssarest @@ -121,13 +121,13 @@ class ZFSSANFSDriver(nfs.NfsDriver): except OSError as exc: if exc.errno != errno.ENOENT: raise - LOG.error(_LE('%s is not installed.'), package) + LOG.error('%s is not installed.', package) else: msg = utils.build_or_str(packages, '%s needs to be installed.') raise exception.NfsException(msg) lcfg = self.configuration - LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) + LOG.info('Connecting to host: %s.', lcfg.san_ip) host = lcfg.san_ip user = lcfg.san_login @@ -199,7 +199,7 @@ class ZFSSANFSDriver(nfs.NfsDriver): try: self._ensure_share_mounted(self.mount_path) except Exception as exc: - LOG.error(_LE('Exception during mounting %s.'), exc) + LOG.error('Exception during mounting %s.', exc) self._mounted_shares = [self.mount_path] LOG.debug('Available shares %s', self._mounted_shares) @@ -226,7 +226,7 @@ class ZFSSANFSDriver(nfs.NfsDriver): def create_snapshot(self, snapshot): """Creates a snapshot of a volume.""" - LOG.info(_LI('Creating snapshot: %s'), snapshot['name']) + LOG.info('Creating snapshot: %s', snapshot['name']) lcfg = self.configuration snap_name = self._create_snapshot_name() self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, @@ -249,13 +249,13 @@ class ZFSSANFSDriver(nfs.NfsDriver): def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - LOG.info(_LI('Deleting snapshot: %s'), snapshot['name']) + LOG.info('Deleting snapshot: %s', snapshot['name']) self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot, method='COPY'): - LOG.info(_LI('Creatng volume from snapshot. volume: %s'), + LOG.info('Creatng volume from snapshot. volume: %s', volume['name']) - LOG.info(_LI('Source Snapshot: %s'), snapshot['name']) + LOG.info('Source Snapshot: %s', snapshot['name']) self._ensure_shares_mounted() self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'], @@ -270,10 +270,10 @@ class ZFSSANFSDriver(nfs.NfsDriver): except Exception: vol_path = self.local_path(volume) with excutils.save_and_reraise_exception(): - LOG.error(_LE('Error in extending volume size: Volume: ' - '%(volume)s Vol_Size: %(vol_size)d with ' - 'Snapshot: %(snapshot)s Snap_Size: ' - '%(snap_size)d'), + LOG.error('Error in extending volume size: Volume: ' + '%(volume)s Vol_Size: %(vol_size)d with ' + 'Snapshot: %(snapshot)s Snap_Size: ' + '%(snap_size)d', {'volume': volume['name'], 'vol_size': volume['size'], 'snapshot': snapshot['name'], @@ -288,8 +288,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): def create_cloned_volume(self, volume, src_vref): """Creates a snapshot and then clones the snapshot into a volume.""" - LOG.info(_LI('new cloned volume: %s'), volume['name']) - LOG.info(_LI('source volume for cloning: %s'), src_vref['name']) + LOG.info('new cloned volume: %s', volume['name']) + LOG.info('source volume for cloning: %s', src_vref['name']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], @@ -310,7 +310,7 @@ class ZFSSANFSDriver(nfs.NfsDriver): super(ZFSSANFSDriver, self).delete_volume(volume) if vol_props['origin'].startswith(lcfg.zfssa_cache_directory): - LOG.info(_LI('Checking origin %(origin)s of volume %(volume)s.'), + LOG.info('Checking origin %(origin)s of volume %(volume)s.', {'origin': vol_props['origin'], 'volume': volume.name}) self._check_origin(vol_props['origin']) @@ -352,8 +352,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi)) if cachevol_size > volume['size']: - exception_msg = (_LE('Image size %(img_size)dGB is larger ' - 'than volume size %(vol_size)dGB.'), + exception_msg = ('Image size %(img_size)dGB is larger ' + 'than volume size %(vol_size)dGB.', {'img_size': cachevol_size, 'vol_size': volume['size']}) LOG.error(exception_msg) @@ -382,8 +382,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): clone_vol = self.create_cloned_volume(volume, cache_vol) self._update_origin(volume['name'], cachevol_name) except exception.VolumeBackendAPIException as exc: - exception_msg = (_LE('Cannot clone image %(image)s to ' - 'volume %(volume)s. Error: %(error)s.'), + exception_msg = ('Cannot clone image %(image)s to ' + 'volume %(volume)s. Error: %(error)s.', {'volume': volume['name'], 'image': image_meta['id'], 'error': exc.msg}) @@ -627,16 +627,16 @@ class ZFSSANFSDriver(nfs.NfsDriver): try: (tgt_asn, tgt_share) = loc_info.split(':') except ValueError: - LOG.error(_LE("Location info needed for backend enabled volume " - "migration not in correct format: %s. Continuing " - "with generic volume migration."), loc_info) + LOG.error("Location info needed for backend enabled volume " + "migration not in correct format: %s. Continuing " + "with generic volume migration.", loc_info) return default_ret src_asn = self.zfssa.get_asn() if tgt_asn == src_asn and lcfg.zfssa_nfs_share == tgt_share: - LOG.info(_LI('Source and destination ZFSSA shares are the same. ' - 'Do nothing. volume: %s'), volume['name']) + LOG.info('Source and destination ZFSSA shares are the same. ' + 'Do nothing. volume: %s', volume['name']) return (True, None) return (False, None) @@ -684,8 +684,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): try: self.zfssa.rename_volume(existing_vol_name, volume['name']) except Exception: - LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. " - "Volume manage failed."), + LOG.error("Failed to rename volume %(existing)s to %(new)s. " + "Volume manage failed.", {'existing': existing_vol_name, 'new': volume['name']}) raise @@ -695,8 +695,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): {'cinder_managed': 'True'}) except Exception: self.zfssa.rename_volume(volume['name'], existing_vol_name) - LOG.error(_LE("Failed to set properties for volume %(existing)s. " - "Volume manage failed."), + LOG.error("Failed to set properties for volume %(existing)s. " + "Volume manage failed.", {'existing': volume['name']}) raise @@ -757,8 +757,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): try: self.zfssa.rename_volume(volume['name'], new_name) except Exception: - LOG.error(_LE("Failed to rename volume %(existing)s to %(new)s. " - "Volume unmanage failed."), + LOG.error("Failed to rename volume %(existing)s to %(new)s. " + "Volume unmanage failed.", {'existing': volume['name'], 'new': new_name}) raise @@ -767,8 +767,8 @@ class ZFSSANFSDriver(nfs.NfsDriver): self.zfssa.set_file_props(new_name, {'cinder_managed': 'False'}) except Exception: self.zfssa.rename_volume(new_name, volume['name']) - LOG.error(_LE("Failed to set properties for volume %(existing)s. " - "Volume unmanage failed."), + LOG.error("Failed to set properties for volume %(existing)s. " + "Volume unmanage failed.", {'existing': volume['name']}) raise diff --git a/cinder/volume/drivers/zfssa/zfssarest.py b/cinder/volume/drivers/zfssa/zfssarest.py index b7ac7bd5ead..355900531bf 100644 --- a/cinder/volume/drivers/zfssa/zfssarest.py +++ b/cinder/volume/drivers/zfssa/zfssarest.py @@ -20,7 +20,7 @@ from oslo_log import log from oslo_service import loopingcall from cinder import exception -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder.volume.drivers.zfssa import restclient from cinder.volume.drivers.zfssa import webdavclient @@ -772,13 +772,13 @@ class ZFSSAApi(object): ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: - exception_msg = (_LE('Error Getting ' - 'Snapshot: %(snapshot)s of ' - 'Volume: %(lun)s in ' - 'Pool: %(pool)s, ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d, ' - 'Message: %(ret.data)s.'), + exception_msg = ('Error Getting ' + 'Snapshot: %(snapshot)s of ' + 'Volume: %(lun)s in ' + 'Pool: %(pool)s, ' + 'Project: %(project)s ' + 'Return code: %(ret.status)d, ' + 'Message: %(ret.data)s.', {'snapshot': snapshot, 'lun': lun, 'pool': pool, @@ -808,10 +808,10 @@ class ZFSSAApi(object): ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: - LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: ' - '%(initiatorgroup)s Pool: %(pool)s Project: ' - '%(project)s Return code: %(ret.status)d Message: ' - '%(ret.data)s.'), + LOG.error('Error Setting Volume: %(lun)s to InitiatorGroup: ' + '%(initiatorgroup)s Pool: %(pool)s Project: ' + '%(project)s Return code: %(ret.status)d Message: ' + '%(ret.data)s.', {'lun': lun, 'initiatorgroup': initiatorgroup, 'pool': pool, @@ -995,7 +995,7 @@ class ZFSSAApi(object): svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']} ret = self.rclient.get(svc) if ret.status == restclient.Status.OK: - LOG.warning(_LW('Property %s already exists.'), schema['property']) + LOG.warning('Property %s already exists.', schema['property']) return ret = self.rclient.post(base, schema) @@ -1265,7 +1265,7 @@ class ZFSSANfsApi(ZFSSAApi): try: self.webdavclient.request(src_file=filename, method='DELETE') except Exception: - exception_msg = (_LE('Cannot delete file %s.'), filename) + exception_msg = ('Cannot delete file %s.', filename) LOG.error(exception_msg) def set_file_props(self, file, specs): diff --git a/cinder/volume/drivers/zte/zte_ks.py b/cinder/volume/drivers/zte/zte_ks.py index 01344c64317..bc650300ae7 100644 --- a/cinder/volume/drivers/zte/zte_ks.py +++ b/cinder/volume/drivers/zte/zte_ks.py @@ -26,7 +26,7 @@ import six from six.moves import urllib from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver @@ -127,7 +127,7 @@ class ZTEVolumeDriver(driver.VolumeDriver): LOG.debug('Response Data: method %(method)s res %(res)s.', {'method': method, 'res': response}) except Exception: - LOG.exception(_LE('Bad response from server.')) + LOG.exception('Bad response from server.') msg = (_('_call failed.')) raise exception.VolumeBackendAPIException(data=msg) res_json = json.loads(response) @@ -205,11 +205,10 @@ class ZTEVolumeDriver(driver.VolumeDriver): if ret['returncode'] == zte_pub.ZTE_SUCCESS: return sid else: - LOG.info(_LI('heartbeat failed. Return code:' - ' %(ret)s.'), + LOG.info('heartbeat failed. Return code: %(ret)s.', {'ret': ret['returncode']}) except Exception: - LOG.exception(_LE('_get_sessionid error.')) + LOG.exception('_get_sessionid error.') self._change_server() return self._user_login() @@ -783,7 +782,7 @@ class ZteISCSIDriver(ZTEVolumeDriver, driver.ISCSIDriver): iscsi_info['Initiator'] = initiator_list except Exception: - LOG.exception(_LE('_get_iscsi_info error.')) + LOG.exception('_get_iscsi_info error.') raise return iscsi_info @@ -820,8 +819,8 @@ class ZteISCSIDriver(ZTEVolumeDriver, driver.ISCSIDriver): ip_ctrl = self._get_target_ip_ctrl(iscsiip) if ip_ctrl is None: - LOG.exception(_LE('_get_tgt_iqn:get iscsi ip ctrl fail, ' - 'IP is %s.'), iscsiip) + LOG.exception('_get_tgt_iqn:get iscsi ip ctrl fail, ' + 'IP is %s.', iscsiip) return None # get the ctrl iqn diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py index 54daa222420..d0b96637de2 100644 --- a/cinder/volume/flows/api/create_volume.py +++ b/cinder/volume/flows/api/create_volume.py @@ -22,7 +22,7 @@ from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LW +from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import policy @@ -260,9 +260,9 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): context, img_vol_type) except exception.VolumeTypeNotFoundByName: - LOG.warning(_LW("Failed to retrieve volume_type from image " - "metadata. '%(img_vol_type)s' doesn't match " - "any volume types."), + LOG.warning("Failed to retrieve volume_type from image " + "metadata. '%(img_vol_type)s' doesn't match " + "any volume types.", {'img_vol_type': img_vol_type}) return None @@ -321,9 +321,9 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): availability_zone = ( CONF.default_availability_zone or CONF.storage_availability_zone) - LOG.warning(_LW("Availability zone '%(s_az)s' " - "not found, falling back to " - "'%(s_fallback_az)s'."), + LOG.warning("Availability zone '%(s_az)s' " + "not found, falling back to " + "'%(s_fallback_az)s'.", {'s_az': original_az, 's_fallback_az': availability_zone}) else: @@ -395,9 +395,8 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask): if volume_type: current_volume_type_id = volume_type.get('id') if current_volume_type_id != snapshot['volume_type_id']: - msg = _LW("Volume type will be changed to " - "be the same as the source volume.") - LOG.warning(msg) + LOG.warning("Volume type will be changed to " + "be the same as the source volume.") return snapshot['volume_type_id'] else: return volume_type.get('id') @@ -589,7 +588,7 @@ class EntryCreateTask(flow_utils.CinderTask): # # NOTE(harlowja): Being unable to destroy a volume is pretty # bad though!! - LOG.exception(_LE("Failed destroying volume entry %s"), volume.id) + LOG.exception("Failed destroying volume entry %s", volume.id) class QuotaReserveTask(flow_utils.CinderTask): @@ -650,8 +649,8 @@ class QuotaReserveTask(flow_utils.CinderTask): except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. - LOG.exception(_LE("Failed rolling back quota for" - " %s reservations"), reservations) + LOG.exception("Failed rolling back quota for" + " %s reservations", reservations) class QuotaCommitTask(flow_utils.CinderTask): @@ -697,8 +696,8 @@ class QuotaCommitTask(flow_utils.CinderTask): QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: - LOG.exception(_LE("Failed to update quota for deleting " - "volume: %s"), volume['id']) + LOG.exception("Failed to update quota for deleting " + "volume: %s", volume['id']) class VolumeCastTask(flow_utils.CinderTask): @@ -804,11 +803,11 @@ class VolumeCastTask(flow_utils.CinderTask): # Restore the source volume status and set the volume to error status. common.restore_source_status(context, self.db, kwargs) common.error_out(volume) - LOG.error(_LE("Volume %s: create failed"), volume.id) + LOG.error("Volume %s: create failed", volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info - LOG.error(_LE('Unexpected build error:'), exc_info=exc_info) + LOG.error('Unexpected build error:', exc_info=exc_info) def get_flow(db_api, image_service_api, availability_zones, create_what, diff --git a/cinder/volume/flows/api/manage_existing.py b/cinder/volume/flows/api/manage_existing.py index ca23c006e09..9c7cb543770 100644 --- a/cinder/volume/flows/api/manage_existing.py +++ b/cinder/volume/flows/api/manage_existing.py @@ -18,7 +18,6 @@ from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils -from cinder.i18n import _LE from cinder import objects from cinder.objects import fields from cinder.volume.flows import common @@ -87,7 +86,7 @@ class EntryCreateTask(flow_utils.CinderTask): try: self.db.volume_destroy(context.elevated(), vol_id) except exception.CinderException: - LOG.exception(_LE("Failed destroying volume entry: %s."), vol_id) + LOG.exception("Failed destroying volume entry: %s.", vol_id) class ManageCastTask(flow_utils.CinderTask): @@ -116,11 +115,11 @@ class ManageCastTask(flow_utils.CinderTask): def revert(self, context, result, flow_failures, volume, **kwargs): # Restore the source volume status and set the volume to error status. common.error_out(volume, status='error_managing') - LOG.error(_LE("Volume %s: manage failed."), volume.id) + LOG.error("Volume %s: manage failed.", volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info - LOG.error(_LE('Unexpected build error:'), exc_info=exc_info) + LOG.error('Unexpected build error:', exc_info=exc_info) def get_flow(scheduler_rpcapi, db_api, create_what): diff --git a/cinder/volume/flows/common.py b/cinder/volume/flows/common.py index 2b81c4a74dc..939c5ca920b 100644 --- a/cinder/volume/flows/common.py +++ b/cinder/volume/flows/common.py @@ -20,8 +20,6 @@ from oslo_log import log as logging import six from cinder import exception -from cinder.i18n import _LE - LOG = logging.getLogger(__name__) @@ -58,9 +56,9 @@ def restore_source_status(context, db, volume_spec): except exception.CinderException: # NOTE(harlowja): Don't let this cause further exceptions since this is # a non-critical failure. - LOG.exception(_LE("Failed setting source " - "volume %(source_volid)s back to" - " its initial %(source_status)s status") % + LOG.exception("Failed setting source " + "volume %(source_volid)s back to" + " its initial %(source_status)s status", {'source_status': source_status, 'source_volid': source_volid}) @@ -87,8 +85,8 @@ def error_out(resource, reason=None, status='error'): resource.save() except Exception: # Don't let this cause further exceptions. - LOG.exception(_LE("Failed setting %(object_type)s %(object_id)s to " - " %(status)s status."), + LOG.exception("Failed setting %(object_type)s %(object_id)s to " + " %(status)s status.", {'object_type': resource.obj_name(), 'object_id': resource.id, 'status': status}) diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py index d57ebda9818..660b36f82f8 100644 --- a/cinder/volume/flows/manager/create_volume.py +++ b/cinder/volume/flows/manager/create_volume.py @@ -24,7 +24,7 @@ from taskflow.types import failure as ft from cinder import context as cinder_context from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import glance from cinder.image import image_utils from cinder import objects @@ -118,7 +118,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): volume.save() except exception.CinderException: # Don't let updating the state cause the rescheduling to fail. - LOG.exception(_LE("Volume %s: update volume state failed."), + LOG.exception("Volume %s: update volume state failed.", volume.id) def _reschedule(self, context, cause, request_spec, filter_properties, @@ -174,7 +174,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): # error and return. if not self.do_reschedule: common.error_out(volume) - LOG.error(_LE("Volume %s: create failed"), volume.id) + LOG.error("Volume %s: create failed", volume.id) return False # Check if we have a cause which can tell us not to reschedule and @@ -182,7 +182,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out(volume) - LOG.error(_LE("Volume %s: create failed"), volume.id) + LOG.error("Volume %s: create failed", volume.id) return False # Use a different context when rescheduling. @@ -195,8 +195,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask): self._post_reschedule(volume) return True except exception.CinderException: - LOG.exception(_LE("Volume %s: rescheduling failed"), - volume.id) + LOG.exception("Volume %s: rescheduling failed", volume.id) return False @@ -227,7 +226,7 @@ class ExtractVolumeRefTask(flow_utils.CinderTask): reason = _('Volume create failed while extracting volume ref.') common.error_out(volume, reason) - LOG.error(_LE("Volume %s: create failed"), volume.id) + LOG.error("Volume %s: create failed", volume.id) class ExtractVolumeSpecTask(flow_utils.CinderTask): @@ -355,8 +354,8 @@ class NotifyVolumeActionTask(flow_utils.CinderTask): # If notification sending of volume database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for volumes to operate - LOG.exception(_LE("Failed notifying about the volume" - " action %(event)s for volume %(volume_id)s"), + LOG.exception("Failed notifying about the volume" + " action %(event)s for volume %(volume_id)s", {'event': self.event_suffix, 'volume_id': volume.id}) @@ -458,10 +457,9 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): snapshot.volume_id) make_bootable = originating_vref.bootable except exception.CinderException as ex: - LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s " - "bootable" - " flag using the provided glance snapshot " - "%(snapshot_ref_id)s volume reference"), + LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable" + " flag using the provided glance snapshot " + "%(snapshot_ref_id)s volume reference", {'snapshot_id': snapshot_id, 'snapshot_ref_id': snapshot.volume_id}) raise exception.MetadataUpdateFailure(reason=ex) @@ -476,8 +474,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): volume.bootable = True volume.save() except exception.CinderException as ex: - LOG.exception(_LE("Failed updating volume %(volume_id)s bootable " - "flag to true"), {'volume_id': volume.id}) + LOG.exception("Failed updating volume %(volume_id)s bootable " + "flag to true", {'volume_id': volume.id}) raise exception.MetadataUpdateFailure(reason=ex) def _create_from_source_volume(self, context, volume, source_volid, @@ -536,17 +534,17 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): self.driver.copy_image_to_volume( context, volume, image_service, image_id) except processutils.ProcessExecutionError as ex: - LOG.exception(_LE("Failed to copy image %(image_id)s to volume: " - "%(volume_id)s"), + LOG.exception("Failed to copy image %(image_id)s to volume: " + "%(volume_id)s", {'volume_id': volume.id, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: - LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), + LOG.exception("Failed to copy image to volume: %(volume_id)s", {'volume_id': volume.id}) raise exception.ImageUnacceptable(ex) except Exception as ex: - LOG.exception(_LE("Failed to copy image %(image_id)s to " - "volume: %(volume_id)s"), + LOG.exception("Failed to copy image %(image_id)s to " + "volume: %(volume_id)s", {'volume_id': volume.id, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) @@ -607,7 +605,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): if (image_meta.get('container_format') != 'bare' or image_meta.get('disk_format') != 'raw'): - LOG.info(_LI("Requested image %(id)s is not in raw format."), + LOG.info("Requested image %(id)s is not in raw format.", {'id': image_meta.get('id')}) return None, False @@ -629,13 +627,13 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): image_owner = m['value'] if (image_meta['owner'] != volume['project_id'] and image_meta['owner'] != image_owner): - LOG.info(_LI("Skipping image volume %(id)s because " - "it is not accessible by current Tenant."), + LOG.info("Skipping image volume %(id)s because " + "it is not accessible by current Tenant.", {'id': image_volume.id}) continue - LOG.info(_LI("Will clone a volume from the image volume " - "%(id)s."), {'id': image_volume.id}) + LOG.info("Will clone a volume from the image volume " + "%(id)s.", {'id': image_volume.id}) break else: LOG.debug("No accessible image volume for image %(id)s found.", @@ -647,7 +645,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): self._cleanup_cg_in_volume(volume) return ret, True except (NotImplementedError, exception.CinderException): - LOG.exception(_LE('Failed to clone image volume %(id)s.'), + LOG.exception('Failed to clone image volume %(id)s.', {'id': image_volume['id']}) return None, False @@ -666,8 +664,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): volume.update(model_update) volume.save() except exception.CinderException: - LOG.exception(_LE("Failed updating volume %(volume_id)s with " - "%(updates)s"), + LOG.exception("Failed updating volume %(volume_id)s with " + "%(updates)s", {'volume_id': volume.id, 'updates': model_update}) self._copy_image_to_volume(context, volume, image_id, image_location, @@ -705,12 +703,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): ) return model_update, True except NotImplementedError: - LOG.warning(_LW('Backend does not support creating image-volume ' - 'clone. Image will be downloaded from Glance.')) + LOG.warning('Backend does not support creating image-volume ' + 'clone. Image will be downloaded from Glance.') except exception.CinderException as e: - LOG.warning(_LW('Failed to create volume from image-volume cache, ' - 'image will be downloaded from Glance. Error: ' - '%(exception)s'), {'exception': e}) + LOG.warning('Failed to create volume from image-volume cache, ' + 'image will be downloaded from Glance. Error: ' + '%(exception)s', {'exception': e}) return None, False def _create_from_image(self, context, volume, @@ -765,8 +763,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): if self.image_volume_cache and not cloned: internal_context = cinder_context.get_internal_tenant_context() if not internal_context: - LOG.info(_LI('Unable to get Cinder internal context, will ' - 'not use image-volume cache.')) + LOG.info('Unable to get Cinder internal context, will ' + 'not use image-volume cache.') else: model_update, cloned = self._create_from_image_cache( context, @@ -847,8 +845,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ - LOG.error(_LE("Unable to create volume. " - "Volume driver %s not initialized"), driver_name) + LOG.error("Unable to create volume. " + "Volume driver %s not initialized", driver_name) raise exception.DriverNotInitialized() # NOTE(xyang): Populate consistencygroup_id and consistencygroup @@ -861,8 +859,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): volume.consistencygroup = cg create_type = volume_spec.pop('type', None) - LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s " - "with specification: %(volume_spec)s"), + LOG.info("Volume %(volume_id)s: being created as %(create_type)s " + "with specification: %(volume_spec)s", {'volume_spec': volume_spec, 'volume_id': volume_id, 'create_type': create_type}) if create_type == 'raw': @@ -893,8 +891,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): # If somehow the update failed we want to ensure that the # failure is logged (but not try rescheduling since the volume at # this point has been created). - LOG.exception(_LE("Failed updating model of volume %(volume_id)s " - "with creation provided model %(model)s"), + LOG.exception("Failed updating model of volume %(volume_id)s " + "with creation provided model %(model)s", {'volume_id': volume_id, 'model': model_update}) raise @@ -945,12 +943,12 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask): # Now use the parent to notify. super(CreateVolumeOnFinishTask, self).execute(context, volume) except exception.CinderException: - LOG.exception(_LE("Failed updating volume %(volume_id)s with " - "%(update)s"), {'volume_id': volume.id, - 'update': update}) + LOG.exception("Failed updating volume %(volume_id)s with " + "%(update)s", {'volume_id': volume.id, + 'update': update}) # Even if the update fails, the volume is ready. - LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): " - "created successfully"), + LOG.info("Volume %(volume_name)s (%(volume_id)s): " + "created successfully", {'volume_name': volume_spec['volume_name'], 'volume_id': volume.id}) diff --git a/cinder/volume/flows/manager/manage_existing.py b/cinder/volume/flows/manager/manage_existing.py index 0b54b07265c..d55f9cec0e1 100644 --- a/cinder/volume/flows/manager/manage_existing.py +++ b/cinder/volume/flows/manager/manage_existing.py @@ -19,7 +19,7 @@ from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder.volume.flows.api import create_volume as create_api from cinder.volume.flows import common as flow_common from cinder.volume.flows.manager import create_volume as create_mgr @@ -43,8 +43,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): def execute(self, context, volume, manage_existing_ref): driver_name = self.driver.__class__.__name__ if not self.driver.initialized: - LOG.error(_LE("Unable to manage existing volume. " - "Volume driver %s not initialized.") % driver_name) + LOG.error("Unable to manage existing volume. " + "Volume driver %s not initialized.", driver_name) flow_common.error_out(volume, _("Volume driver %s not " "initialized.") % driver_name, status='error_managing') @@ -71,7 +71,7 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): reason = _('Volume manage failed.') flow_common.error_out(volume, reason=reason, status='error_managing') - LOG.error(_LE("Volume %s: manage failed."), volume.id) + LOG.error("Volume %s: manage failed.", volume.id) class ManageExistingTask(flow_utils.CinderTask): @@ -95,8 +95,8 @@ class ManageExistingTask(flow_utils.CinderTask): volume.update(model_update) volume.save() except exception.CinderException: - LOG.exception(_LE("Failed updating model of volume %(volume_id)s" - " with creation provided model %(model)s") % + LOG.exception("Failed updating model of volume %(volume_id)s" + " with creation provided model %(model)s", {'volume_id': volume.id, 'model': model_update}) raise diff --git a/cinder/volume/flows/manager/manage_existing_snapshot.py b/cinder/volume/flows/manager/manage_existing_snapshot.py index 735e332cd88..9f4cab26aa3 100644 --- a/cinder/volume/flows/manager/manage_existing_snapshot.py +++ b/cinder/volume/flows/manager/manage_existing_snapshot.py @@ -20,7 +20,7 @@ from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import objects from cinder import quota from cinder import quota_utils @@ -61,7 +61,7 @@ class ExtractSnapshotRefTask(flow_utils.CinderTask): return flow_common.error_out(result) - LOG.error(_LE("Snapshot %s: create failed"), result.id) + LOG.error("Snapshot %s: create failed", result.id) class NotifySnapshotActionTask(flow_utils.CinderTask): @@ -87,8 +87,8 @@ class NotifySnapshotActionTask(flow_utils.CinderTask): # If notification sending of snapshot database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for snapshots to operate - LOG.exception(_LE("Failed notifying about the snapshot " - "action %(event)s for snapshot %(snp_id)s."), + LOG.exception("Failed notifying about the snapshot " + "action %(event)s for snapshot %(snp_id)s.", {'event': self.event_suffix, 'snp_id': snapshot_id}) @@ -107,8 +107,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask): if not self.driver.initialized: driver_name = (self.driver.configuration. safe_get('volume_backend_name')) - LOG.error(_LE("Unable to manage existing snapshot. " - "Volume driver %s not initialized."), driver_name) + LOG.error("Unable to manage existing snapshot. " + "Volume driver %s not initialized.", driver_name) flow_common.error_out(snapshot_ref, reason=_("Volume driver %s " "not initialized.") % driver_name) @@ -174,8 +174,8 @@ class QuotaReserveTask(flow_utils.CinderTask): except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. - LOG.exception(_LE("Failed rolling back quota for" - " %s reservations."), reservations) + LOG.exception("Failed rolling back quota for" + " %s reservations.", reservations) class QuotaCommitTask(flow_utils.CinderTask): @@ -218,8 +218,8 @@ class QuotaCommitTask(flow_utils.CinderTask): QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: - LOG.exception(_LE("Failed to update quota while deleting " - "snapshots: %s"), snapshot['id']) + LOG.exception("Failed to update quota while deleting " + "snapshots: %s", snapshot['id']) class ManageExistingTask(flow_utils.CinderTask): @@ -245,9 +245,9 @@ class ManageExistingTask(flow_utils.CinderTask): snapshot_object.update(model_update) snapshot_object.save() except exception.CinderException: - LOG.exception(_LE("Failed updating model of snapshot " - "%(snapshot_id)s with creation provided model " - "%(model)s."), + LOG.exception("Failed updating model of snapshot " + "%(snapshot_id)s with creation provided model " + "%(model)s.", {'snapshot_id': snapshot_ref['id'], 'model': model_update}) raise @@ -287,11 +287,11 @@ class CreateSnapshotOnFinishTask(NotifySnapshotActionTask): # Now use the parent to notify. super(CreateSnapshotOnFinishTask, self).execute(context, snapshot) except exception.CinderException: - LOG.exception(_LE("Failed updating snapshot %(snapshot_id)s with " - "%(update)s."), {'snapshot_id': snapshot_id, - 'update': update}) + LOG.exception("Failed updating snapshot %(snapshot_id)s with " + "%(update)s.", {'snapshot_id': snapshot_id, + 'update': update}) # Even if the update fails, the snapshot is ready. - LOG.info(_LI("Snapshot %s created successfully."), snapshot_id) + LOG.info("Snapshot %s created successfully.", snapshot_id) def get_flow(context, db, driver, host, snapshot_id, ref): diff --git a/cinder/volume/group_types.py b/cinder/volume/group_types.py index 70c72b4c1d5..26bacda0da6 100644 --- a/cinder/volume/group_types.py +++ b/cinder/volume/group_types.py @@ -22,7 +22,7 @@ from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -47,7 +47,7 @@ def create(context, description=description), projects=projects) except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') raise exception.GroupTypeCreateFailed(name=name, group_specs=group_specs) return type_ref @@ -64,7 +64,7 @@ def update(context, id, name, description, is_public=None): dict(name=name, description=description, is_public=is_public)) except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') raise exception.GroupTypeUpdateFailed(id=id) @@ -127,8 +127,8 @@ def get_default_group_type(): except exception.GroupTypeNotFoundByName: # Couldn't find group type with the name in default_group_type # flag, record this issue and move on - LOG.exception(_LE('Default group type is not found. ' - 'Please check default_group_type config.')) + LOG.exception('Default group type is not found. ' + 'Please check default_group_type config.') return grp_type @@ -148,8 +148,8 @@ def get_default_cgsnapshot_type(): except exception.GroupTypeNotFoundByName: # Couldn't find DEFAULT_CGSNAPSHOT_TYPE group type. # Record this issue and move on. - LOG.exception(_LE('Default cgsnapshot type %s is not found.') - % DEFAULT_CGSNAPSHOT_TYPE) + LOG.exception('Default cgsnapshot type %s is not found.', + DEFAULT_CGSNAPSHOT_TYPE) return grp_type diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index d2316db7809..a78bf8ad05b 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -60,11 +60,11 @@ from cinder import coordination from cinder import db from cinder import exception from cinder import flow_utils -from cinder import keymgr as key_manager -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.image import cache as image_cache from cinder.image import glance from cinder.image import image_utils +from cinder import keymgr as key_manager from cinder import manager from cinder.message import api as message_api from cinder.message import defined_messages @@ -202,8 +202,8 @@ class VolumeManager(manager.CleanableManager, # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: - LOG.warning(_LW("Driver path %s is deprecated, update your " - "configuration to the new path."), volume_driver) + LOG.warning("Driver path %s is deprecated, update your " + "configuration to the new path.", volume_driver) volume_driver = MAPPING[volume_driver] vol_db_empty = self._set_voldb_empty_at_startup_indicator( @@ -221,14 +221,14 @@ class VolumeManager(manager.CleanableManager, constants.VOLUME_BINARY) except exception.ServiceNotFound: # NOTE(jdg): This is to solve problems with unit tests - LOG.info(_LI("Service not found for updating " - "active_backend_id, assuming default " - "for driver init.")) + LOG.info("Service not found for updating " + "active_backend_id, assuming default " + "for driver init.") else: curr_active_backend_id = service.active_backend_id if self.configuration.suppress_requests_ssl_warnings: - LOG.warning(_LW("Suppressing requests library SSL Warnings")) + LOG.warning("Suppressing requests library SSL Warnings") requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings( @@ -245,8 +245,8 @@ class VolumeManager(manager.CleanableManager, active_backend_id=curr_active_backend_id) if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE: - msg = _LE('Active-Active configuration is not currently supported ' - 'by driver %s.') % volume_driver + msg = _('Active-Active configuration is not currently supported ' + 'by driver %s.') % volume_driver LOG.error(msg) raise exception.VolumeDriverException(message=msg) @@ -261,7 +261,7 @@ class VolumeManager(manager.CleanableManager, self.extra_capabilities = {} except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Invalid JSON: %s"), + LOG.error("Invalid JSON: %s", self.driver.configuration.extra_capabilities) # Check if a per-backend AZ has been specified @@ -284,10 +284,10 @@ class VolumeManager(manager.CleanableManager, max_cache_size, max_cache_entries ) - LOG.info(_LI('Image-volume cache enabled for host %(host)s.'), + LOG.info('Image-volume cache enabled for host %(host)s.', {'host': self.host}) else: - LOG.info(_LI('Image-volume cache disabled for host %(host)s.'), + LOG.info('Image-volume cache disabled for host %(host)s.', {'host': self.host}) self.image_volume_cache = None @@ -301,7 +301,7 @@ class VolumeManager(manager.CleanableManager, try: pool = self.driver.get_pool(volume) except Exception: - LOG.exception(_LE('Fetch volume pool name failed.'), + LOG.exception('Fetch volume pool name failed.', resource=volume) return @@ -342,10 +342,10 @@ class VolumeManager(manager.CleanableManager, vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None) if len(vol_entries) == 0: - LOG.info(_LI("Determined volume DB was empty at startup.")) + LOG.info("Determined volume DB was empty at startup.") return True else: - LOG.info(_LI("Determined volume DB was not empty at startup.")) + LOG.info("Determined volume DB was not empty at startup.") return False def _sync_provider_info(self, ctxt, volumes, snapshots): @@ -390,8 +390,8 @@ class VolumeManager(manager.CleanableManager, def _include_resources_in_cluster(self, ctxt): - LOG.info(_LI('Including all resources from host %(host)s in cluster ' - '%(cluster)s.'), + LOG.info('Including all resources from host %(host)s in cluster ' + '%(cluster)s.', {'host': self.host, 'cluster': self.cluster}) num_vols = objects.VolumeList.include_in_cluster( ctxt, self.cluster, host=self.host) @@ -399,9 +399,9 @@ class VolumeManager(manager.CleanableManager, ctxt, self.cluster, host=self.host) num_cache = db.image_volume_cache_include_in_cluster( ctxt, self.cluster, host=self.host) - LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, ' - 'and %(num_cache)s image volume caches from host ' - '%(host)s have been included in cluster %(cluster)s.'), + LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, ' + 'and %(num_cache)s image volume caches from host ' + '%(host)s have been included in cluster %(cluster)s.', {'num_vols': num_vols, 'num_cgs': num_cgs, 'host': self.host, 'cluster': self.cluster, 'num_cache': num_cache}) @@ -413,10 +413,10 @@ class VolumeManager(manager.CleanableManager, utils.log_unsupported_driver_warning(self.driver) if not self.configuration.enable_unsupported_driver: - LOG.error(_LE("Unsupported drivers are disabled." - " You can re-enable by adding " - "enable_unsupported_driver=True to the " - "driver section in cinder.conf"), + LOG.error("Unsupported drivers are disabled." + " You can re-enable by adding " + "enable_unsupported_driver=True to the " + "driver section in cinder.conf", resource={'type': 'driver', 'id': self.__class__.__name__}) return @@ -426,14 +426,14 @@ class VolumeManager(manager.CleanableManager, if added_to_cluster: self._include_resources_in_cluster(ctxt) - LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"), + LOG.info("Starting volume driver %(driver_name)s (%(version)s)", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: - LOG.exception(_LE("Failed to initialize driver."), + LOG.exception("Failed to initialize driver.", resource={'type': 'driver', 'id': self.__class__.__name__}) # we don't want to continue since we failed @@ -462,15 +462,15 @@ class VolumeManager(manager.CleanableManager, if volume['status'] in ['in-use']: self.driver.ensure_export(ctxt, volume) except Exception: - LOG.exception(_LE("Failed to re-export volume, " - "setting to ERROR."), + LOG.exception("Failed to re-export volume, " + "setting to ERROR.", resource=volume) volume.conditional_update({'status': 'error'}, {'status': 'in-use'}) # All other cleanups are processed by parent class CleanableManager except Exception: - LOG.exception(_LE("Error during re-export on driver init."), + LOG.exception("Error during re-export on driver init.", resource=volume) return @@ -487,7 +487,7 @@ class VolumeManager(manager.CleanableManager, # collect and publish service capabilities self.publish_service_capabilities(ctxt) - LOG.info(_LI("Driver initialization completed successfully."), + LOG.info("Driver initialization completed successfully.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) @@ -496,8 +496,8 @@ class VolumeManager(manager.CleanableManager, **kwargs) def init_host_with_rpc(self): - LOG.info(_LI("Initializing RPC dependent components of volume " - "driver %(driver_name)s (%(version)s)"), + LOG.info("Initializing RPC dependent components of volume " + "driver %(driver_name)s (%(version)s)", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) @@ -506,8 +506,8 @@ class VolumeManager(manager.CleanableManager, utils.log_unsupported_driver_warning(self.driver) utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: - LOG.error(_LE("Cannot complete RPC initialization because " - "driver isn't initialized properly."), + LOG.error("Cannot complete RPC initialization because " + "driver isn't initialized properly.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) return @@ -521,8 +521,7 @@ class VolumeManager(manager.CleanableManager, constants.VOLUME_BINARY) except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Service not found for updating " - "replication_status.")) + LOG.error("Service not found for updating replication_status.") if service.replication_status != ( fields.ReplicationStatus.FAILED_OVER): @@ -532,7 +531,7 @@ class VolumeManager(manager.CleanableManager, service.replication_status = fields.ReplicationStatus.DISABLED service.save() - LOG.info(_LI("Driver post RPC initialization completed successfully."), + LOG.info("Driver post RPC initialization completed successfully.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) @@ -675,7 +674,7 @@ class VolumeManager(manager.CleanableManager, # volume stats as these are decremented on delete. self._update_allocated_capacity(volume) - LOG.info(_LI("Created volume successfully."), resource=volume) + LOG.info("Created volume successfully.", resource=volume) return volume.id def _check_is_our_resource(self, resource): @@ -773,7 +772,7 @@ class VolumeManager(manager.CleanableManager, else: self.driver.delete_volume(volume) except exception.VolumeIsBusy: - LOG.error(_LE("Unable to delete busy volume."), + LOG.error("Unable to delete busy volume.", resource=volume) # If this is a destination volume, we have to clear the database # record to avoid user confusion. @@ -807,7 +806,7 @@ class VolumeManager(manager.CleanableManager, project_id=project_id, **reserve_opts) except Exception: - LOG.exception(_LE("Failed to update usages deleting volume."), + LOG.exception("Failed to update usages deleting volume.", resource=volume) # Delete glance metadata if it exists @@ -843,9 +842,9 @@ class VolumeManager(manager.CleanableManager, self.publish_service_capabilities(context) - msg = _LI("Deleted volume successfully.") + msg = "Deleted volume successfully." if unmanage_only: - msg = _LI("Unmanaged volume successfully.") + msg = "Unmanaged volume successfully." LOG.info(msg, resource=volume) def _clear_db(self, context, is_migrating_dest, volume_ref, status): @@ -854,9 +853,9 @@ class VolumeManager(manager.CleanableManager, # in the exception handling part. if is_migrating_dest: volume_ref.destroy() - LOG.error(_LE("Unable to delete the destination volume " - "during volume migration, (NOTE: database " - "record needs to be deleted)."), resource=volume_ref) + LOG.error("Unable to delete the destination volume " + "during volume migration, (NOTE: database " + "record needs to be deleted).", resource=volume_ref) else: volume_ref.status = status volume_ref.save() @@ -900,9 +899,9 @@ class VolumeManager(manager.CleanableManager, # volume glance metadata table pass except exception.CinderException as ex: - LOG.exception(_LE("Failed updating snapshot" - " metadata using the provided volumes" - " %(volume_id)s metadata"), + LOG.exception("Failed updating snapshot" + " metadata using the provided volumes" + " %(volume_id)s metadata", {'volume_id': snapshot.volume_id}, resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR @@ -914,7 +913,7 @@ class VolumeManager(manager.CleanableManager, snapshot.save() self._notify_about_snapshot_usage(context, snapshot, "create.end") - LOG.info(_LI("Create snapshot completed successfully"), + LOG.info("Create snapshot completed successfully", resource=snapshot) return snapshot.id @@ -944,7 +943,7 @@ class VolumeManager(manager.CleanableManager, else: self.driver.delete_snapshot(snapshot) except exception.SnapshotIsBusy: - LOG.error(_LE("Delete snapshot failed, due to snapshot busy."), + LOG.error("Delete snapshot failed, due to snapshot busy.", resource=snapshot) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() @@ -972,7 +971,7 @@ class VolumeManager(manager.CleanableManager, **reserve_opts) except Exception: reservations = None - LOG.exception(_LE("Update snapshot usages failed."), + LOG.exception("Update snapshot usages failed.", resource=snapshot) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() @@ -982,9 +981,9 @@ class VolumeManager(manager.CleanableManager, if reservations: QUOTAS.commit(context, reservations, project_id=project_id) - msg = _LI("Delete snapshot completed successfully.") + msg = "Delete snapshot completed successfully." if unmanage_only: - msg = _LI("Unmanage snapshot completed successfully.") + msg = "Unmanage snapshot completed successfully." LOG.info(msg, resource=snapshot) @coordination.synchronized('{volume_id}') @@ -1057,9 +1056,9 @@ class VolumeManager(manager.CleanableManager, # and the volume status updated. utils.require_driver_initialized(self.driver) - LOG.info(_LI('Attaching volume %(volume_id)s to instance ' - '%(instance)s at mountpoint %(mount)s on host ' - '%(host)s.'), + LOG.info('Attaching volume %(volume_id)s to instance ' + '%(instance)s at mountpoint %(mount)s on host ' + '%(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) @@ -1081,7 +1080,7 @@ class VolumeManager(manager.CleanableManager, mode) self._notify_about_volume_usage(context, volume, "attach.end") - LOG.info(_LI("Attach volume completed successfully."), + LOG.info("Attach volume completed successfully.", resource=volume) return attachment @@ -1101,7 +1100,7 @@ class VolumeManager(manager.CleanableManager, attachment = objects.VolumeAttachment.get_by_id(context, attachment_id) except exception.VolumeAttachmentNotFound: - LOG.info(_LI("Volume detach called, but volume not attached."), + LOG.info("Volume detach called, but volume not attached.", resource=volume) # We need to make sure the volume status is set to the correct # status. It could be in detaching status now, and we don't @@ -1125,7 +1124,7 @@ class VolumeManager(manager.CleanableManager, else: # there aren't any attachments for this volume. # so set the status to available and move on. - LOG.info(_LI("Volume detach called, but volume not attached."), + LOG.info("Volume detach called, but volume not attached.", resource=volume) volume.status = 'available' volume.attach_status = fields.VolumeAttachStatus.DETACHED @@ -1139,8 +1138,8 @@ class VolumeManager(manager.CleanableManager, # and the volume status updated. utils.require_driver_initialized(self.driver) - LOG.info(_LI('Detaching volume %(volume_id)s from instance ' - '%(instance)s.'), + LOG.info('Detaching volume %(volume_id)s from instance ' + '%(instance)s.', {'volume_id': volume_id, 'instance': attachment.get('instance_uuid')}, resource=volume) @@ -1164,19 +1163,19 @@ class VolumeManager(manager.CleanableManager, self.driver.remove_export(context.elevated(), volume) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Detach volume failed, due to " - "uninitialized driver."), + LOG.exception("Detach volume failed, due to " + "uninitialized driver.", resource=volume) except Exception as ex: - LOG.exception(_LE("Detach volume failed, due to " - "remove-export failure."), + LOG.exception("Detach volume failed, due to " + "remove-export failure.", resource=volume) raise exception.RemoveExportException(volume=volume_id, reason=six.text_type(ex)) volume.finish_detach(attachment.id) self._notify_about_volume_usage(context, volume, "detach.end") - LOG.info(_LI("Detach volume completed successfully."), resource=volume) + LOG.info("Detach volume completed successfully.", resource=volume) def _create_image_cache_volume_entry(self, ctx, volume_ref, image_id, image_meta): @@ -1188,9 +1187,9 @@ class VolumeManager(manager.CleanableManager, image_volume = None try: if not self.image_volume_cache.ensure_space(ctx, volume_ref): - LOG.warning(_LW('Unable to ensure space for image-volume in' - ' cache. Will skip creating entry for image' - ' %(image)s on %(service)s.'), + LOG.warning('Unable to ensure space for image-volume in' + ' cache. Will skip creating entry for image' + ' %(image)s on %(service)s.', {'image': image_id, 'service': volume_ref.service_topic_queue}) return @@ -1199,8 +1198,8 @@ class VolumeManager(manager.CleanableManager, volume_ref, image_meta) if not image_volume: - LOG.warning(_LW('Unable to clone image_volume for image ' - '%(image_id)s will not create cache entry.'), + LOG.warning('Unable to clone image_volume for image ' + '%(image_id)s will not create cache entry.', {'image_id': image_id}) return @@ -1211,8 +1210,8 @@ class VolumeManager(manager.CleanableManager, image_meta ) except exception.CinderException as e: - LOG.warning(_LW('Failed to create new image-volume cache entry.' - ' Error: %(exception)s'), {'exception': e}) + LOG.warning('Failed to create new image-volume cache entry.' + ' Error: %(exception)s', {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume) @@ -1236,9 +1235,9 @@ class VolumeManager(manager.CleanableManager, image_volume = objects.Volume(context=ctx, **new_vol_values) image_volume.create() except Exception as ex: - LOG.exception(_LE('Create clone_image_volume: %(volume_id)s' - 'for image %(image_id)s, ' - 'failed (Exception: %(except)s)'), + LOG.exception('Create clone_image_volume: %(volume_id)s' + 'for image %(image_id)s, ' + 'failed (Exception: %(except)s)', {'volume_id': volume.id, 'image_id': image_meta['id'], 'except': ex}) @@ -1260,14 +1259,14 @@ class VolumeManager(manager.CleanableManager, False) return image_volume except exception.CinderException: - LOG.exception(_LE('Failed to clone volume %(volume_id)s for ' - 'image %(image_id)s.'), + LOG.exception('Failed to clone volume %(volume_id)s for ' + 'image %(image_id)s.', {'volume_id': volume.id, 'image_id': image_meta['id']}) try: self.delete_volume(ctx, image_volume) except exception.CinderException: - LOG.exception(_LE('Could not delete the image volume %(id)s.'), + LOG.exception('Could not delete the image volume %(id)s.', {'id': volume.id}) return @@ -1305,18 +1304,18 @@ class VolumeManager(manager.CleanableManager, ctx, image_meta['id'], uri, {}) except (exception.NotAuthorized, exception.Invalid, exception.NotFound): - LOG.exception(_LE('Failed to register image volume location ' - '%(uri)s.'), {'uri': uri}) + LOG.exception('Failed to register image volume location ' + '%(uri)s.', {'uri': uri}) if not image_registered: - LOG.warning(_LW('Registration of image volume URI %(uri)s ' - 'to image %(image_id)s failed.'), + LOG.warning('Registration of image volume URI %(uri)s ' + 'to image %(image_id)s failed.', {'uri': uri, 'image_id': image_meta['id']}) try: self.delete_volume(image_volume_context, image_volume) except exception.CinderException: - LOG.exception(_LE('Could not delete failed image volume ' - '%(id)s.'), {'id': image_volume.id}) + LOG.exception('Could not delete failed image volume ' + '%(id)s.', {'id': image_volume.id}) return False image_volume_meta['glance_image_id'] = image_meta['id'] @@ -1359,8 +1358,8 @@ class VolumeManager(manager.CleanableManager, {'image_id': image_meta['id']}, resource=volume) except Exception as error: - LOG.error(_LE("Upload volume to image encountered an error " - "(image-id: %(image_id)s)."), + LOG.error("Upload volume to image encountered an error " + "(image-id: %(image_id)s).", {'image_id': image_meta['id']}, resource=volume) if image_service is not None: @@ -1379,7 +1378,7 @@ class VolumeManager(manager.CleanableManager, finally: self.db.volume_update_status_based_on_attachment(context, volume_id) - LOG.info(_LI("Copy volume to image completed successfully."), + LOG.info("Copy volume to image completed successfully.", resource=volume) def _delete_image(self, context, image_id, image_service): @@ -1388,13 +1387,13 @@ class VolumeManager(manager.CleanableManager, image_meta = image_service.show(context, image_id) image_status = image_meta.get('status') if image_status == 'queued' or image_status == 'saving': - LOG.warning(_LW("Deleting image in unexpected status: " - "%(image_status)s."), + LOG.warning("Deleting image in unexpected status: " + "%(image_status)s.", {'image_status': image_status}, resource={'type': 'image', 'id': image_id}) image_service.delete(context, image_id) except Exception: - LOG.warning(_LW("Image delete encountered an error."), + LOG.warning("Image delete encountered an error.", exc_info=True, resource={'type': 'image', 'id': image_id}) @@ -1504,7 +1503,7 @@ class VolumeManager(manager.CleanableManager, volume.update(model_update) volume.save() except exception.CinderException as ex: - LOG.exception(_LE("Model update failed."), resource=volume) + LOG.exception("Model update failed.", resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) try: @@ -1519,7 +1518,7 @@ class VolumeManager(manager.CleanableManager, raise exception.VolumeBackendAPIException(data=err_msg) conn_info = self._parse_connection_options(context, volume, conn_info) - LOG.info(_LI("Initialize volume connection completed successfully."), + LOG.info("Initialize volume connection completed successfully.", resource=volume) return conn_info @@ -1542,7 +1541,7 @@ class VolumeManager(manager.CleanableManager, % {'err': six.text_type(err)}) LOG.exception(err_msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info(_LI("Terminate volume connection completed successfully."), + LOG.info("Terminate volume connection completed successfully.", resource=volume_ref) def remove_export(self, context, volume_id): @@ -1556,7 +1555,7 @@ class VolumeManager(manager.CleanableManager, LOG.exception(msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=msg) - LOG.info(_LI("Remove volume export completed successfully."), + LOG.info("Remove volume export completed successfully.", resource=volume_ref) def accept_transfer(self, context, volume_id, new_user, new_project): @@ -1583,14 +1582,14 @@ class VolumeManager(manager.CleanableManager, model_update) except exception.CinderException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Update volume model for " - "transfer operation failed."), + LOG.exception("Update volume model for " + "transfer operation failed.", resource=volume_ref) self.db.volume_update(context.elevated(), volume_id, {'status': 'error'}) - LOG.info(_LI("Transfer volume completed successfully."), + LOG.info("Transfer volume completed successfully.", resource=volume_ref) return model_update @@ -1632,7 +1631,7 @@ class VolumeManager(manager.CleanableManager, conn = rpcapi.initialize_connection(ctxt, volume, properties) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to attach volume %(vol)s."), + LOG.error("Failed to attach volume %(vol)s.", {'vol': volume['id']}) self.db.volume_update(ctxt, volume['id'], {'status': status}) @@ -1652,8 +1651,8 @@ class VolumeManager(manager.CleanableManager, encryption) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to attach volume encryptor" - " %(vol)s."), {'vol': volume['id']}) + LOG.error("Failed to attach volume encryptor" + " %(vol)s.", {'vol': volume['id']}) self._detach_volume(ctxt, attach_info, volume, properties) return attach_info @@ -1682,8 +1681,8 @@ class VolumeManager(manager.CleanableManager, self.remove_export(ctxt, volume['id']) except Exception as err: with excutils.save_and_reraise_exception(): - LOG.error(_LE('Unable to terminate volume connection: ' - '%(err)s.') % {'err': err}) + LOG.error('Unable to terminate volume connection: ' + '%(err)s.' % {'err': err}) def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): """Copy data from src_vol to dest_vol.""" @@ -1714,7 +1713,7 @@ class VolumeManager(manager.CleanableManager, attach_encryptor=attach_encryptor) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to attach source volume for copy.")) + LOG.error("Failed to attach source volume for copy.") self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, remote=dest_remote, attach_encryptor=attach_encryptor) @@ -1739,7 +1738,7 @@ class VolumeManager(manager.CleanableManager, copy_error = False except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."), + LOG.error("Failed to copy volume %(src)s to %(dest)s.", {'src': src_vol['id'], 'dest': dest_vol['id']}) finally: try: @@ -1841,8 +1840,8 @@ class VolumeManager(manager.CleanableManager, new_volume.id) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( - "Failed to copy volume %(vol1)s to %(vol2)s"), { + LOG.exception( + "Failed to copy volume %(vol1)s to %(vol2)s", { 'vol1': volume.id, 'vol2': new_volume.id}) self._clean_temporary_volume(ctxt, volume, new_volume) @@ -1862,9 +1861,9 @@ class VolumeManager(manager.CleanableManager, rpcapi = volume_rpcapi.VolumeAPI() rpcapi.delete_volume(ctxt, new_volume) except exception.VolumeNotFound: - LOG.info(_LI("Couldn't find the temporary volume " - "%(vol)s in the database. There is no need " - "to clean up this volume."), + LOG.info("Couldn't find the temporary volume " + "%(vol)s in the database. There is no need " + "to clean up this volume.", {'vol': new_volume.id}) else: # If we're in the completing phase don't delete the @@ -1875,15 +1874,15 @@ class VolumeManager(manager.CleanableManager, new_volume.migration_status = None new_volume.save() except exception.VolumeNotFound: - LOG.info(_LI("Couldn't find destination volume " - "%(vol)s in the database. The entry might be " - "successfully deleted during migration " - "completion phase."), + LOG.info("Couldn't find destination volume " + "%(vol)s in the database. The entry might be " + "successfully deleted during migration " + "completion phase.", {'vol': new_volume.id}) - LOG.warning(_LW("Failed to migrate volume. The destination " - "volume %(vol)s is not deleted since the " - "source volume may have been deleted."), + LOG.warning("Failed to migrate volume. The destination " + "volume %(vol)s is not deleted since the " + "source volume may have been deleted.", {'vol': new_volume.id}) def migrate_volume_completion(self, ctxt, volume, new_volume, error=False): @@ -1905,8 +1904,8 @@ class VolumeManager(manager.CleanableManager, orig_volume_status = volume.previous_status if error: - LOG.info(_LI("migrate_volume_completion is cleaning up an error " - "for volume %(vol1)s (temporary volume %(vol2)s"), + LOG.info("migrate_volume_completion is cleaning up an error " + "for volume %(vol1)s (temporary volume %(vol2)s", {'vol1': volume['id'], 'vol2': new_volume.id}) rpcapi.delete_volume(ctxt, new_volume) updates = {'migration_status': 'error', @@ -1929,9 +1928,9 @@ class VolumeManager(manager.CleanableManager, try: self.detach_volume(ctxt, volume.id, attachment.id) except Exception as ex: - LOG.error(_LE("Detach migration source volume " - "%(volume.id)s from instance " - "%(instance_id)s failed: %(err)s"), + LOG.error("Detach migration source volume " + "%(volume.id)s from instance " + "%(instance_id)s failed: %(err)s", {'err': ex, 'volume.id': volume.id, 'instance_id': attachment.id}, @@ -1970,11 +1969,11 @@ class VolumeManager(manager.CleanableManager, try: rpcapi.delete_volume(ctxt, updated_new) except Exception as ex: - LOG.error(_LE('Failed to request async delete of migration source ' - 'vol %(vol)s: %(err)s'), + LOG.error('Failed to request async delete of migration source ' + 'vol %(vol)s: %(err)s', {'vol': volume.id, 'err': ex}) - LOG.info(_LI("Complete-Migrate volume completed successfully."), + LOG.info("Complete-Migrate volume completed successfully.", resource=volume) return volume.id @@ -2034,7 +2033,7 @@ class VolumeManager(manager.CleanableManager, updates.update(status_update) volume.update(updates) volume.save() - LOG.info(_LI("Migrate volume completed successfully."), + LOG.info("Migrate volume completed successfully.", resource=volume) @periodic_task.periodic_task @@ -2046,8 +2045,8 @@ class VolumeManager(manager.CleanableManager, config_group = ('(config name %s)' % self.driver.configuration.config_group) - LOG.warning(_LW("Update driver status failed: %(config_group)s " - "is uninitialized."), + LOG.warning("Update driver status failed: %(config_group)s " + "is uninitialized.", {'config_group': config_group}, resource={'type': 'driver', 'id': self.driver.__class__.__name__}) @@ -2209,7 +2208,7 @@ class VolumeManager(manager.CleanableManager, try: self.driver.extend_volume(volume, new_size) except Exception: - LOG.exception(_LE("Extend volume failed."), + LOG.exception("Extend volume failed.", resource=volume) try: self.db.volume_update(context, volume.id, @@ -2240,7 +2239,7 @@ class VolumeManager(manager.CleanableManager, self._notify_about_volume_usage( context, volume, "resize.end", extra_usage_info={'size': int(new_size)}) - LOG.info(_LI("Extend volume completed successfully."), + LOG.info("Extend volume completed successfully.", resource=volume) def _is_our_backend(self, host, cluster_name): @@ -2352,12 +2351,12 @@ class VolumeManager(manager.CleanableManager, retyped = ret if retyped: - LOG.info(_LI("Volume %s: retyped successfully"), volume.id) + LOG.info("Volume %s: retyped successfully.", volume.id) except Exception: retyped = False - LOG.exception(_LE("Volume %s: driver error when trying to " - "retype, falling back to generic " - "mechanism."), volume.id) + LOG.exception("Volume %s: driver error when trying to " + "retype, falling back to generic " + "mechanism.", volume.id) # We could not change the type, so we need to migrate the volume, where # the destination volume will be of the new type @@ -2415,7 +2414,7 @@ class VolumeManager(manager.CleanableManager, context, volume, "retype", extra_usage_info={'volume_type': new_type_id}) self.publish_service_capabilities(context) - LOG.info(_LI("Retype volume completed successfully."), + LOG.info("Retype volume completed successfully.", resource=volume) @staticmethod @@ -2441,7 +2440,7 @@ class VolumeManager(manager.CleanableManager, self._update_stats_for_managed(vol_ref) - LOG.info(_LI("Manage existing volume completed successfully."), + LOG.info("Manage existing volume completed successfully.", resource=vol_ref) return vol_ref.id @@ -2503,8 +2502,8 @@ class VolumeManager(manager.CleanableManager, utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Listing manageable volumes failed, due " - "to uninitialized driver.")) + LOG.exception("Listing manageable volumes failed, due " + "to uninitialized driver.") cinder_volumes = self._get_my_volumes(ctxt) try: @@ -2515,8 +2514,8 @@ class VolumeManager(manager.CleanableManager, from_primitives(ctxt, driver_entries)) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Listing manageable volumes failed, due " - "to driver error.")) + LOG.exception("Listing manageable volumes failed, due " + "to driver error.") return driver_entries def create_consistencygroup(self, context, group): @@ -2546,7 +2545,7 @@ class VolumeManager(manager.CleanableManager, try: utils.require_driver_initialized(self.driver) - LOG.info(_LI("Group %s: creating"), group.name) + LOG.info("Group %s: creating", group.name) if is_generic_group: try: model_update = self.driver.create_group(context, @@ -2579,14 +2578,13 @@ class VolumeManager(manager.CleanableManager, with excutils.save_and_reraise_exception(): group.status = fields.GroupStatus.ERROR group.save() - LOG.error(_LE("Group %s: create failed"), + LOG.error("Group %s: create failed", group.name) group.status = status group.created_at = timeutils.utcnow() group.save() - LOG.info(_LI("Group %s: created successfully"), - group.name) + LOG.info("Group %s: created successfully", group.name) if is_generic_group: self._notify_about_group_usage( @@ -2595,7 +2593,7 @@ class VolumeManager(manager.CleanableManager, self._notify_about_consistencygroup_usage( context, group, "create.end") - LOG.info(_LI("Create group completed successfully."), + LOG.info("Create group completed successfully.", resource={'type': 'group', 'id': group.id}) return group @@ -2618,9 +2616,9 @@ class VolumeManager(manager.CleanableManager, cgsnapshot = objects.CGSnapshot.get_by_id( context, cgsnapshot.id) except exception.CgSnapshotNotFound: - LOG.error(_LE("Create consistency group " - "from snapshot-%(snap)s failed: " - "SnapshotNotFound."), + LOG.error("Create consistency group " + "from snapshot-%(snap)s failed: " + "SnapshotNotFound.", {'snap': cgsnapshot.id}, resource={'type': 'consistency_group', 'id': group.id}) @@ -2646,9 +2644,9 @@ class VolumeManager(manager.CleanableManager, source_cg = objects.ConsistencyGroup.get_by_id( context, source_cg.id) except exception.ConsistencyGroupNotFound: - LOG.error(_LE("Create consistency group " - "from source cg-%(cg)s failed: " - "ConsistencyGroupNotFound."), + LOG.error("Create consistency group " + "from source cg-%(cg)s failed: " + "ConsistencyGroupNotFound.", {'cg': source_cg.id}, resource={'type': 'consistency_group', 'id': group.id}) @@ -2705,8 +2703,8 @@ class VolumeManager(manager.CleanableManager, with excutils.save_and_reraise_exception(): group.status = 'error' group.save() - LOG.error(_LE("Create consistency group " - "from source %(source)s failed."), + LOG.error("Create consistency group " + "from source %(source)s failed.", {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) @@ -2728,8 +2726,8 @@ class VolumeManager(manager.CleanableManager, self._notify_about_consistencygroup_usage( context, group, "create.end") - LOG.info(_LI("Create consistency group " - "from source-%(source)s completed successfully."), + LOG.info("Create consistency group " + "from source-%(source)s completed successfully.", {'source': source_name}, resource={'type': 'consistency_group', 'id': group.id}) @@ -2753,9 +2751,8 @@ class VolumeManager(manager.CleanableManager, group_snapshot = objects.GroupSnapshot.get_by_id( context, group_snapshot.id) except exception.GroupSnapshotNotFound: - LOG.error(_LE("Create group " - "from snapshot-%(snap)s failed: " - "SnapshotNotFound."), + LOG.error("Create group from snapshot-%(snap)s failed: " + "SnapshotNotFound.", {'snap': group_snapshot.id}, resource={'type': 'group', 'id': group.id}) @@ -2781,9 +2778,9 @@ class VolumeManager(manager.CleanableManager, source_group = objects.Group.get_by_id( context, source_group.id) except exception.GroupNotFound: - LOG.error(_LE("Create group " - "from source group-%(group)s failed: " - "GroupNotFound."), + LOG.error("Create group " + "from source group-%(group)s failed: " + "GroupNotFound.", {'group': source_group.id}, resource={'type': 'group', 'id': group.id}) @@ -2866,8 +2863,8 @@ class VolumeManager(manager.CleanableManager, with excutils.save_and_reraise_exception(): group.status = 'error' group.save() - LOG.error(_LE("Create group " - "from source %(source)s failed."), + LOG.error("Create group " + "from source %(source)s failed.", {'source': source_name}, resource={'type': 'group', 'id': group.id}) @@ -2890,8 +2887,8 @@ class VolumeManager(manager.CleanableManager, self._notify_about_group_usage( context, group, "create.end") - LOG.info(_LI("Create group " - "from source-%(source)s completed successfully."), + LOG.info("Create group " + "from source-%(source)s completed successfully.", {'source': source_name}, resource={'type': 'group', 'id': group.id}) @@ -2945,8 +2942,8 @@ class VolumeManager(manager.CleanableManager, found_snaps = [snap for snap in snapshots if snap['id'] == vol['snapshot_id']] if not found_snaps: - LOG.error(_LE("Source snapshot cannot be found for target " - "volume %(volume_id)s."), + LOG.error("Source snapshot cannot be found for target " + "volume %(volume_id)s.", {'volume_id': vol['id']}) raise exception.SnapshotNotFound( snapshot_id=vol['snapshot_id']) @@ -2968,8 +2965,8 @@ class VolumeManager(manager.CleanableManager, found_source_vols = [source_vol for source_vol in source_vols if source_vol['id'] == vol['source_volid']] if not found_source_vols: - LOG.error(_LE("Source volumes cannot be found for target " - "volume %(volume_id)s."), + LOG.error("Source volumes cannot be found for target " + "volume %(volume_id)s.", {'volume_id': vol['id']}) raise exception.VolumeNotFound( volume_id=vol['source_volid']) @@ -2999,7 +2996,7 @@ class VolumeManager(manager.CleanableManager, update['multiattach'] = True except exception.SnapshotNotFound: - LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."), + LOG.error("Source snapshot %(snapshot_id)s cannot be found.", {'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) @@ -3008,8 +3005,8 @@ class VolumeManager(manager.CleanableManager, group.save() raise except exception.VolumeNotFound: - LOG.error(_LE("The source volume %(volume_id)s " - "cannot be found."), + LOG.error("The source volume %(volume_id)s " + "cannot be found.", {'volume_id': snapshot.volume_id}) self.db.volume_update(context, vol['id'], {'status': 'error'}) @@ -3018,9 +3015,9 @@ class VolumeManager(manager.CleanableManager, group.save() raise except exception.CinderException as ex: - LOG.error(_LE("Failed to update %(volume_id)s" - " metadata using the provided snapshot" - " %(snapshot_id)s metadata."), + LOG.error("Failed to update %(volume_id)s" + " metadata using the provided snapshot" + " %(snapshot_id)s metadata.", {'volume_id': vol['id'], 'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], @@ -3120,8 +3117,8 @@ class VolumeManager(manager.CleanableManager, **reserve_opts) except Exception: cgreservations = None - LOG.exception(_LE("Delete consistency group " - "failed to update usages."), + LOG.exception("Delete consistency group " + "failed to update usages.", resource={'type': 'consistency_group', 'id': group.id}) @@ -3138,8 +3135,8 @@ class VolumeManager(manager.CleanableManager, **reserve_opts) except Exception: reservations = None - LOG.exception(_LE("Delete consistency group " - "failed to update usages."), + LOG.exception("Delete consistency group " + "failed to update usages.", resource={'type': 'consistency_group', 'id': group.id}) @@ -3162,8 +3159,8 @@ class VolumeManager(manager.CleanableManager, self._notify_about_consistencygroup_usage( context, group, "delete.end", volumes) self.publish_service_capabilities(context) - LOG.info(_LI("Delete consistency group " - "completed successfully."), + LOG.info("Delete consistency group " + "completed successfully.", resource={'type': 'consistency_group', 'id': group.id}) @@ -3251,8 +3248,8 @@ class VolumeManager(manager.CleanableManager, **reserve_opts) except Exception: grpreservations = None - LOG.exception(_LE("Delete group " - "failed to update usages."), + LOG.exception("Delete group " + "failed to update usages.", resource={'type': 'group', 'id': group.id}) @@ -3269,8 +3266,8 @@ class VolumeManager(manager.CleanableManager, **reserve_opts) except Exception: reservations = None - LOG.exception(_LE("Delete group " - "failed to update usages."), + LOG.exception("Delete group " + "failed to update usages.", resource={'type': 'group', 'id': group.id}) @@ -3293,8 +3290,8 @@ class VolumeManager(manager.CleanableManager, self._notify_about_group_usage( context, group, "delete.end") self.publish_service_capabilities(context) - LOG.info(_LI("Delete group " - "completed successfully."), + LOG.info("Delete group " + "completed successfully.", resource={'type': 'group', 'id': group.id}) @@ -3395,9 +3392,9 @@ class VolumeManager(manager.CleanableManager, try: add_vol_ovo = objects.Volume.get_by_id(context, add_vol) except exception.VolumeNotFound: - LOG.error(_LE("Update consistency group " - "failed to add volume-%(volume_id)s: " - "VolumeNotFound."), + LOG.error("Update consistency group " + "failed to add volume-%(volume_id)s: " + "VolumeNotFound.", {'volume_id': add_vol}, resource={'type': 'consistency_group', 'id': group.id}) @@ -3418,9 +3415,9 @@ class VolumeManager(manager.CleanableManager, try: remove_vol_ref = self.db.volume_get(context, remove_vol) except exception.VolumeNotFound: - LOG.error(_LE("Update consistency group " - "failed to remove volume-%(volume_id)s: " - "VolumeNotFound."), + LOG.error("Update consistency group " + "failed to remove volume-%(volume_id)s: " + "VolumeNotFound.", {'volume_id': remove_vol}, resource={'type': 'consistency_group', 'id': group.id}) @@ -3468,8 +3465,8 @@ class VolumeManager(manager.CleanableManager, except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred in the volume driver when " - "updating consistency group %(group_id)s."), + LOG.error("Error occurred in the volume driver when " + "updating consistency group %(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -3481,8 +3478,8 @@ class VolumeManager(manager.CleanableManager, {'status': 'error'}) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when updating consistency " - "group %(group_id)s."), + LOG.error("Error occurred when updating consistency " + "group %(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -3508,8 +3505,7 @@ class VolumeManager(manager.CleanableManager, self._notify_about_consistencygroup_usage( context, group, "update.end") - LOG.info(_LI("Update consistency group " - "completed successfully."), + LOG.info("Update consistency group completed successfully.", resource={'type': 'consistency_group', 'id': group.id}) @@ -3533,9 +3529,9 @@ class VolumeManager(manager.CleanableManager, try: add_vol_ref = objects.Volume.get_by_id(context, add_vol) except exception.VolumeNotFound: - LOG.error(_LE("Update group " - "failed to add volume-%(volume_id)s: " - "VolumeNotFound."), + LOG.error("Update group " + "failed to add volume-%(volume_id)s: " + "VolumeNotFound.", {'volume_id': add_vol_ref.id}, resource={'type': 'group', 'id': group.id}) @@ -3556,9 +3552,9 @@ class VolumeManager(manager.CleanableManager, try: remove_vol_ref = objects.Volume.get_by_id(context, remove_vol) except exception.VolumeNotFound: - LOG.error(_LE("Update group " - "failed to remove volume-%(volume_id)s: " - "VolumeNotFound."), + LOG.error("Update group " + "failed to remove volume-%(volume_id)s: " + "VolumeNotFound.", {'volume_id': remove_vol_ref.id}, resource={'type': 'group', 'id': group.id}) @@ -3624,8 +3620,8 @@ class VolumeManager(manager.CleanableManager, except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred in the volume driver when " - "updating group %(group_id)s."), + LOG.error("Error occurred in the volume driver when " + "updating group %(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -3639,8 +3635,7 @@ class VolumeManager(manager.CleanableManager, rem_vol.save() except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error occurred when updating " - "group %(group_id)s."), + LOG.error("Error occurred when updating group %(group_id)s.", {'group_id': group.id}) group.status = 'error' group.save() @@ -3662,7 +3657,7 @@ class VolumeManager(manager.CleanableManager, self._notify_about_group_usage( context, group, "update.end") - LOG.info(_LI("Update group completed successfully."), + LOG.info("Update group completed successfully.", resource={'type': 'group', 'id': group.id}) @@ -3671,7 +3666,7 @@ class VolumeManager(manager.CleanableManager, caller_context = context context = context.elevated() - LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id) + LOG.info("Cgsnapshot %s: creating.", cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) @@ -3748,9 +3743,9 @@ class VolumeManager(manager.CleanableManager, # volume glance metadata table pass except exception.CinderException as ex: - LOG.error(_LE("Failed updating %(snapshot_id)s" - " metadata using the provided volumes" - " %(volume_id)s metadata"), + LOG.error("Failed updating %(snapshot_id)s" + " metadata using the provided volumes" + " %(volume_id)s metadata", {'volume_id': volume_id, 'snapshot_id': snapshot_id}) @@ -3770,7 +3765,7 @@ class VolumeManager(manager.CleanableManager, cgsnapshot.status = 'available' cgsnapshot.save() - LOG.info(_LI("cgsnapshot %s: created successfully"), + LOG.info("cgsnapshot %s: created successfully", cgsnapshot.id) self._notify_about_cgsnapshot_usage( context, cgsnapshot, "create.end") @@ -3781,7 +3776,7 @@ class VolumeManager(manager.CleanableManager, caller_context = context context = context.elevated() - LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id) + LOG.info("GroupSnapshot %s: creating.", group_snapshot.id) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) @@ -3873,9 +3868,9 @@ class VolumeManager(manager.CleanableManager, # volume glance metadata table pass except exception.CinderException as ex: - LOG.error(_LE("Failed updating %(snapshot_id)s" - " metadata using the provided volumes" - " %(volume_id)s metadata"), + LOG.error("Failed updating %(snapshot_id)s" + " metadata using the provided volumes" + " %(volume_id)s metadata.", {'volume_id': volume_id, 'snapshot_id': snapshot_id}) snapshot.status = fields.SnapshotStatus.ERROR @@ -3890,7 +3885,7 @@ class VolumeManager(manager.CleanableManager, group_snapshot.status = 'available' group_snapshot.save() - LOG.info(_LI("group_snapshot %s: created successfully"), + LOG.info("group_snapshot %s: created successfully", group_snapshot.id) self._notify_about_group_snapshot_usage( context, group_snapshot, "create.end") @@ -3938,7 +3933,7 @@ class VolumeManager(manager.CleanableManager, context = context.elevated() project_id = cgsnapshot.project_id - LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id) + LOG.info("cgsnapshot %s: deleting", cgsnapshot.id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) @@ -4022,7 +4017,7 @@ class VolumeManager(manager.CleanableManager, except Exception: reservations = None - LOG.exception(_LE("Failed to update usages deleting snapshot")) + LOG.exception("Failed to update usages deleting snapshot") self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot['id']) @@ -4036,7 +4031,7 @@ class VolumeManager(manager.CleanableManager, QUOTAS.commit(context, reservations, project_id=project_id) cgsnapshot.destroy() - LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id) + LOG.info("cgsnapshot %s: deleted successfully", cgsnapshot.id) self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end", snapshots) @@ -4046,7 +4041,7 @@ class VolumeManager(manager.CleanableManager, context = context.elevated() project_id = group_snapshot.project_id - LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id) + LOG.info("group_snapshot %s: deleting", group_snapshot.id) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) @@ -4148,7 +4143,7 @@ class VolumeManager(manager.CleanableManager, except Exception: reservations = None - LOG.exception(_LE("Failed to update usages deleting snapshot")) + LOG.exception("Failed to update usages deleting snapshot") self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) @@ -4159,7 +4154,7 @@ class VolumeManager(manager.CleanableManager, QUOTAS.commit(context, reservations, project_id=project_id) group_snapshot.destroy() - LOG.info(_LI("group_snapshot %s: deleted successfully"), + LOG.info("group_snapshot %s: deleted successfully", group_snapshot.id) self._notify_about_group_snapshot_usage(context, group_snapshot, "delete.end", @@ -4248,11 +4243,11 @@ class VolumeManager(manager.CleanableManager, secondary_id=secondary_backend_id) exception_encountered = False except exception.UnableToFailOver: - LOG.exception(_LE("Failed to perform replication failover")) + LOG.exception("Failed to perform replication failover") updates['replication_status'] = repl_status.FAILOVER_ERROR except exception.InvalidReplicationTarget: - LOG.exception(_LE("Invalid replication target specified " - "for failover")) + LOG.exception("Invalid replication target specified " + "for failover") # Preserve the replication_status: Status should be failed over if # we were failing back or if we were failing over from one # secondary to another secondary. In both cases active_backend_id @@ -4266,14 +4261,14 @@ class VolumeManager(manager.CleanableManager, # a failover sequence, we're expecting them to cleanup # and make sure the driver state is such that the original # backend is still set as primary as per driver memory - LOG.error(_LE("Driver reported error during " - "replication failover.")) + LOG.error("Driver reported error during " + "replication failover.") updates.update(disabled=True, replication_status=repl_status.FAILOVER_ERROR) if exception_encountered: LOG.error( - _LE("Error encountered during failover on host: " - "%(host)s invalid target ID %(backend_id)s"), + "Error encountered during failover on host: " + "%(host)s invalid target ID %(backend_id)s", {'host': self.host, 'backend_id': secondary_backend_id}) self.finish_failover(context, service, updates) @@ -4306,7 +4301,7 @@ class VolumeManager(manager.CleanableManager, vobj.update(update.get('updates', {})) vobj.save() - LOG.info(_LI("Failed over to replication target successfully.")) + LOG.info("Failed over to replication target successfully.") # TODO(geguileo): In P - remove this failover_host = failover @@ -4374,9 +4369,9 @@ class VolumeManager(manager.CleanableManager, # need the backend's consent or anything, we'll just # disable the service, so we can just log this and # go about our business - LOG.warning(_LW('Error encountered on Cinder backend during ' - 'freeze operation, service is frozen, however ' - 'notification to driver has failed.')) + LOG.warning('Error encountered on Cinder backend during ' + 'freeze operation, service is frozen, however ' + 'notification to driver has failed.') svc_host = vol_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args( @@ -4386,7 +4381,7 @@ class VolumeManager(manager.CleanableManager, service.disabled = True service.disabled_reason = "frozen" service.save() - LOG.info(_LI("Set backend status to frozen successfully.")) + LOG.info("Set backend status to frozen successfully.") return True def thaw_host(self, context): @@ -4407,8 +4402,8 @@ class VolumeManager(manager.CleanableManager, except exception.VolumeDriverException: # NOTE(jdg): Thaw actually matters, if this call # to the backend fails, we're stuck and can't re-enable - LOG.error(_LE('Error encountered on Cinder backend during ' - 'thaw operation, service will remain frozen.')) + LOG.error('Error encountered on Cinder backend during ' + 'thaw operation, service will remain frozen.') return False svc_host = vol_utils.extract_host(self.host, 'backend') @@ -4419,7 +4414,7 @@ class VolumeManager(manager.CleanableManager, service.disabled = False service.disabled_reason = "" service.save() - LOG.info(_LI("Thawed backend successfully.")) + LOG.info("Thawed backend successfully.") return True def manage_existing_snapshot(self, ctxt, snapshot, ref=None): @@ -4433,10 +4428,10 @@ class VolumeManager(manager.CleanableManager, snapshot.id, ref) except Exception: - msg = _LE("Failed to create manage_existing flow: " - "%(object_type)s %(object_id)s.") - LOG.exception(msg, {'object_type': 'snapshot', - 'object_id': snapshot.id}) + LOG.exception("Failed to create manage_existing flow: " + "%(object_type)s %(object_id)s.", + {'object_type': 'snapshot', + 'object_id': snapshot.id}) raise exception.CinderException( _("Failed to create manage existing flow.")) @@ -4450,8 +4445,8 @@ class VolumeManager(manager.CleanableManager, utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Listing manageable snapshots failed, due " - "to uninitialized driver.")) + LOG.exception("Listing manageable snapshots failed, due " + "to uninitialized driver.") cinder_snapshots = self._get_my_snapshots(ctxt) try: @@ -4462,8 +4457,8 @@ class VolumeManager(manager.CleanableManager, from_primitives(ctxt, driver_entries)) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Listing manageable snapshots failed, due " - "to driver error.")) + LOG.exception("Listing manageable snapshots failed, due " + "to driver error.") return driver_entries def get_capabilities(self, context, discover): @@ -4515,7 +4510,7 @@ class VolumeManager(manager.CleanableManager, volume.update(model_update) volume.save() except exception.CinderException as ex: - LOG.exception(_LE("Model update failed."), resource=volume) + LOG.exception("Model update failed.", resource=volume) raise exception.ExportFailure(reason=six.text_type(ex)) try: @@ -4607,7 +4602,7 @@ class VolumeManager(manager.CleanableManager, mode) vref.refresh() self._notify_about_volume_usage(context, vref, "attach.end") - LOG.info(_LI("Attach volume completed successfully."), + LOG.info("Attach volume completed successfully.", resource=vref) attachment_ref = objects.VolumeAttachment.get_by_id(context, attachment_id) @@ -4635,7 +4630,7 @@ class VolumeManager(manager.CleanableManager, % {'err': six.text_type(err)}) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info(_LI("Terminate volume connection completed successfully."), + LOG.info("Terminate volume connection completed successfully.", resource=volume) # NOTE(jdg): Return True/False if there are other outstanding # attachments that share this connection. If True should signify diff --git a/cinder/volume/qos_specs.py b/cinder/volume/qos_specs.py index 6f0d439d2ac..737e7208674 100644 --- a/cinder/volume/qos_specs.py +++ b/cinder/volume/qos_specs.py @@ -22,8 +22,8 @@ from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception +from cinder.i18n import _ from cinder import objects -from cinder.i18n import _, _LE, _LW from cinder.volume import volume_types @@ -82,7 +82,7 @@ def update(context, qos_specs_id, specs): qos_spec.save() except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) @@ -138,7 +138,7 @@ def get_associations(context, qos_specs_id): types = objects.VolumeTypeList.get_all_types_for_qos(context, qos_specs_id) except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') msg = _('Failed to get all associations of ' 'qos specs %s') % qos_specs_id LOG.warning(msg) @@ -181,9 +181,9 @@ def associate_qos_with_type(context, specs_id, type_id): else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: - LOG.exception(_LE('DB error:')) - LOG.warning(_LW('Failed to associate qos specs ' - '%(id)s with type: %(vol_type_id)s'), + LOG.exception('DB error:') + LOG.warning('Failed to associate qos specs ' + '%(id)s with type: %(vol_type_id)s', dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id) @@ -195,9 +195,9 @@ def disassociate_qos_specs(context, specs_id, type_id): get_qos_specs(context, specs_id) db.qos_specs_disassociate(context, specs_id, type_id) except db_exc.DBError: - LOG.exception(_LE('DB error:')) - LOG.warning(_LW('Failed to disassociate qos specs ' - '%(id)s with type: %(vol_type_id)s'), + LOG.exception('DB error:') + LOG.warning('Failed to disassociate qos specs ' + '%(id)s with type: %(vol_type_id)s', dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=type_id) @@ -209,8 +209,8 @@ def disassociate_all(context, specs_id): get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError: - LOG.exception(_LE('DB error:')) - LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id) + LOG.exception('DB error:') + LOG.warning('Failed to disassociate qos specs %s.', specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None) diff --git a/cinder/volume/targets/cxt.py b/cinder/volume/targets/cxt.py index ad3b7fe5468..44d7701ff0e 100644 --- a/cinder/volume/targets/cxt.py +++ b/cinder/volume/targets/cxt.py @@ -22,7 +22,6 @@ from oslo_utils import fileutils from oslo_utils import netutils from cinder import exception -from cinder.i18n import _LI, _LW, _LE from cinder import utils from cinder.volume.targets import iscsi @@ -128,8 +127,8 @@ class CxtAdm(iscsi.ISCSITarget): volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): - LOG.warning(_LW('Persistence file already exists for volume, ' - 'found file at: %s'), volume_path) + LOG.warning('Persistence file already exists for volume, ' + 'found file at: %s', volume_path) utils.robust_file_write(volumes_dir, vol_id, volume_conf) LOG.debug('Created volume path %(vp)s,\n' 'content: %(vc)s', @@ -153,8 +152,8 @@ class CxtAdm(iscsi.ISCSITarget): '-x', self.config, run_as_root=True) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to create iscsi target for volume " - "id:%(vol_id)s: %(e)s"), + LOG.error("Failed to create iscsi target for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created @@ -174,11 +173,11 @@ class CxtAdm(iscsi.ISCSITarget): iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: - LOG.error(_LE("Failed to create iscsi target for volume " - "id:%(vol_id)s. Please verify your configuration " - "in %(volumes_dir)s'"), { - 'vol_id': vol_id, - 'volumes_dir': volumes_dir, }) + LOG.error("Failed to create iscsi target for volume " + "id:%(vol_id)s. Please verify your configuration " + "in %(volumes_dir)s'", + {'vol_id': vol_id, + 'volumes_dir': volumes_dir, }) raise exception.NotFound() if old_persist_file is not None and os.path.exists(old_persist_file): @@ -187,12 +186,12 @@ class CxtAdm(iscsi.ISCSITarget): return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info(_LI('Removing iscsi_target for: %s'), vol_id) + LOG.info('Removing iscsi_target for: %s', vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self._get_volumes_dir(), vol_uuid_file) if not os.path.exists(volume_path): - LOG.warning(_LW('Volume path %s does not exist, ' - 'nothing to remove.'), volume_path) + LOG.warning('Volume path %s does not exist, ' + 'nothing to remove.', volume_path) return if os.path.isfile(volume_path): @@ -211,8 +210,8 @@ class CxtAdm(iscsi.ISCSITarget): LOG.debug("StdErr from iscsictl -c: %s", err) except putils.ProcessExecutionError as e: if "NOT found" in e.stdout: - LOG.info(_LI("No iscsi target present for volume " - "id:%(vol_id)s: %(e)s"), + LOG.info("No iscsi target present for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) return else: @@ -231,13 +230,13 @@ class CxtAdm(iscsi.ISCSITarget): # for a target successfully but it is gone before we can remove # it, fail silently if "is not found" in e.stderr and target_exists: - LOG.info(_LI("No iscsi target present for volume " - "id:%(vol_id)s: %(e)s"), + LOG.info("No iscsi target present for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) return else: - LOG.error(_LE("Failed to remove iscsi target for volume " - "id:%(vol_id)s: %(e)s"), + LOG.error("Failed to remove iscsi target for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) diff --git a/cinder/volume/targets/iet.py b/cinder/volume/targets/iet.py index abd5da05c21..2bff2ac88fe 100644 --- a/cinder/volume/targets/iet.py +++ b/cinder/volume/targets/iet.py @@ -18,7 +18,6 @@ from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception -from cinder.i18n import _LI, _LE, _LW from cinder import utils from cinder.volume.targets import iscsi @@ -51,7 +50,7 @@ class IetAdm(iscsi.ISCSITarget): with open(self.iet_sessions, 'r') as f: sessions = f.read() except Exception: - LOG.exception(_LE("Failed to open iet session list for %s"), iqn) + LOG.exception("Failed to open iet session list for %s", iqn) raise session_list = re.split('^tid:(?m)', sessions)[1:] @@ -101,8 +100,8 @@ class IetAdm(iscsi.ISCSITarget): config_auth = ' '.join((self.auth_type,) + chap_auth) self._new_auth(tid, self.auth_type, username, password) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to create iscsi target for volume " - "id:%s"), vol_id) + LOG.exception("Failed to create iscsi target for volume " + "id:%s", vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Update config file only if new scsi target is created. @@ -123,8 +122,8 @@ class IetAdm(iscsi.ISCSITarget): utils.execute("truncate", conf_file, "--size=0", run_as_root=True) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to create %(conf)s for volume " - "id:%(vol_id)s"), + LOG.exception("Failed to create %(conf)s for volume " + "id:%(vol_id)s", {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) @@ -139,13 +138,13 @@ class IetAdm(iscsi.ISCSITarget): with open(conf_file, 'a+') as f: f.write(volume_conf) except Exception: - LOG.exception(_LE("Failed to update %(conf)s for volume " - "id:%(vol_id)s"), + LOG.exception("Failed to update %(conf)s for volume " + "id:%(vol_id)s", {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id) + LOG.info("Removing iscsi_target for volume: %s", vol_id) try: self._delete_logicalunit(tid, lun) @@ -156,8 +155,8 @@ class IetAdm(iscsi.ISCSITarget): self._delete_target(tid) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to remove iscsi target for volume " - "id:%s"), vol_id) + LOG.exception("Failed to remove iscsi target for volume " + "id:%s", vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) vol_uuid_file = vol_name @@ -183,14 +182,14 @@ class IetAdm(iscsi.ISCSITarget): iet_conf_text.truncate(0) iet_conf_text.writelines(new_iet_conf_txt) except Exception: - LOG.exception(_LE("Failed to update %(conf)s for volume id " - "%(vol_id)s after removing iscsi target"), + LOG.exception("Failed to update %(conf)s for volume id " + "%(vol_id)s after removing iscsi target", {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: - LOG.warning(_LW("Failed to update %(conf)s for volume id " - "%(vol_id)s after removing iscsi target. " - "%(conf)s does not exist."), + LOG.warning("Failed to update %(conf)s for volume id " + "%(vol_id)s after removing iscsi target. " + "%(conf)s does not exist.", {'conf': conf_file, 'vol_id': vol_id}) def _find_sid_cid_for_target(self, tid, name, vol_id): @@ -200,8 +199,8 @@ class IetAdm(iscsi.ISCSITarget): with open(self.iet_sessions, 'r') as f: sessions = f.read() except Exception as e: - LOG.info(_LI("Failed to open iet session list for " - "%(vol_id)s: %(e)s"), + LOG.info("Failed to open iet session list for " + "%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) return None diff --git a/cinder/volume/targets/iscsi.py b/cinder/volume/targets/iscsi.py index 47d38786bdb..cb6b6fe065e 100644 --- a/cinder/volume/targets/iscsi.py +++ b/cinder/volume/targets/iscsi.py @@ -16,7 +16,7 @@ from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder import utils from cinder.volume.targets import driver from cinder.volume import utils as vutils @@ -151,7 +151,7 @@ class ISCSITarget(driver.Target): def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) - LOG.warning(_LW("ISCSI provider_location not stored, using discovery")) + LOG.warning("ISCSI provider_location not stored, using discovery") volume_id = volume['id'] @@ -164,9 +164,9 @@ class ISCSITarget(driver.Target): volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: - LOG.error(_LE("ISCSI discovery attempt failed for:%s") % + LOG.error("ISCSI discovery attempt failed for: %s", volume['host'].split('@')[0]) - LOG.debug(("Error from iscsiadm -m discovery: %s") % ex.stderr) + LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) return None for target in out.splitlines(): @@ -221,8 +221,8 @@ class ISCSITarget(driver.Target): try: iscsi_target, lun = self._get_target_and_lun(context, volume) except exception.NotFound: - LOG.info(_LI("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) + LOG.info("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s", volume['id']) return try: @@ -236,8 +236,8 @@ class ISCSITarget(driver.Target): self.show_target(iscsi_target, iqn=iqn) except Exception: - LOG.info(_LI("Skipping remove_export. No iscsi_target " - "is presently exported for volume: %s"), volume['id']) + LOG.info("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %s", volume['id']) return # NOTE: For TgtAdm case volume['id'] is the ONLY param we need @@ -293,8 +293,8 @@ class ISCSITarget(driver.Target): def validate_connector(self, connector): # NOTE(jdg): api passes in connector which is initiator info if 'initiator' not in connector: - err_msg = (_LE('The volume driver requires the iSCSI initiator ' - 'name in the connector.')) + err_msg = ('The volume driver requires the iSCSI initiator ' + 'name in the connector.') LOG.error(err_msg) raise exception.InvalidConnectorException(missing='initiator') return True diff --git a/cinder/volume/targets/lio.py b/cinder/volume/targets/lio.py index dfed1807f27..85d1f220523 100644 --- a/cinder/volume/targets/lio.py +++ b/cinder/volume/targets/lio.py @@ -14,7 +14,6 @@ from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception -from cinder.i18n import _LE, _LI, _LW from cinder import utils from cinder.volume.targets import iscsi @@ -38,7 +37,7 @@ class LioAdm(iscsi.ISCSITarget): # This call doesn't need locking utils.execute('cinder-rtstool', 'verify') except (OSError, putils.ProcessExecutionError): - LOG.error(_LE('cinder-rtstool is not installed correctly')) + LOG.error('cinder-rtstool is not installed correctly') raise @staticmethod @@ -83,8 +82,8 @@ class LioAdm(iscsi.ISCSITarget): # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: - LOG.warning(_LW("Failed to save iscsi LIO configuration when " - "modifying volume id: %(vol_id)s."), + LOG.warning("Failed to save iscsi LIO configuration when " + "modifying volume id: %(vol_id)s.", {'vol_id': vol_id}) def _restore_configuration(self): @@ -94,7 +93,7 @@ class LioAdm(iscsi.ISCSITarget): # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: - LOG.warning(_LW("Failed to restore iscsi LIO configuration.")) + LOG.warning("Failed to restore iscsi LIO configuration.") def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): @@ -102,7 +101,7 @@ class LioAdm(iscsi.ISCSITarget): vol_id = name.split(':')[1] - LOG.info(_LI('Creating iscsi_target for volume: %s'), vol_id) + LOG.info('Creating iscsi_target for volume: %s', vol_id) chap_auth_userid = "" chap_auth_password = "" @@ -126,16 +125,16 @@ class LioAdm(iscsi.ISCSITarget): self.iscsi_protocol == 'iser'] + optional_args self._execute(*command_args, run_as_root=True) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to create iscsi target for volume " - "id:%s."), vol_id) + LOG.exception("Failed to create iscsi target for volume " + "id:%s.", vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: - LOG.error(_LE("Failed to create iscsi target for volume " - "id:%s."), vol_id) + LOG.error("Failed to create iscsi target for volume id:%s.", + vol_id) raise exception.NotFound() # We make changes persistent @@ -144,7 +143,7 @@ class LioAdm(iscsi.ISCSITarget): return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info(_LI('Removing iscsi_target: %s'), vol_id) + LOG.info('Removing iscsi_target: %s', vol_id) vol_uuid_name = vol_name iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name) @@ -154,8 +153,8 @@ class LioAdm(iscsi.ISCSITarget): iqn, run_as_root=True) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to remove iscsi target for volume " - "id:%s."), vol_id) + LOG.exception("Failed to remove iscsi target for volume id:%s.", + vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # We make changes persistent @@ -176,7 +175,7 @@ class LioAdm(iscsi.ISCSITarget): connector['initiator'], run_as_root=True) except putils.ProcessExecutionError: - LOG.exception(_LE("Failed to add initiator iqn %s to target"), + LOG.exception("Failed to add initiator iqn %s to target", connector['initiator']) raise exception.ISCSITargetAttachFailed( volume_id=volume['id']) @@ -197,7 +196,7 @@ class LioAdm(iscsi.ISCSITarget): run_as_root=True) except putils.ProcessExecutionError: LOG.exception( - _LE("Failed to delete initiator iqn %s from target."), + "Failed to delete initiator iqn %s from target.", connector['initiator']) raise exception.ISCSITargetDetachFailed(volume_id=volume['id']) @@ -209,8 +208,8 @@ class LioAdm(iscsi.ISCSITarget): # Restore saved configuration file if no target exists. if not self._get_targets(): - LOG.info(_LI('Restoring iSCSI target from configuration file')) + LOG.info('Restoring iSCSI target from configuration file') self._restore_configuration() return - LOG.info(_LI("Skipping ensure_export. Found existing iSCSI target.")) + LOG.info("Skipping ensure_export. Found existing iSCSI target.") diff --git a/cinder/volume/targets/scst.py b/cinder/volume/targets/scst.py index 21ac4af4931..b72ac0571e9 100644 --- a/cinder/volume/targets/scst.py +++ b/cinder/volume/targets/scst.py @@ -14,8 +14,8 @@ from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception +from cinder.i18n import _ from cinder import utils -from cinder.i18n import _, _LE from cinder.volume.targets import iscsi from cinder.volume import utils as vutils @@ -118,8 +118,7 @@ class SCSTAdm(iscsi.ISCSITarget): 'enabled=1') LOG.debug('StdOut from set driver attribute: %s', out) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to set attribute for enable target driver " - "%s"), e) + LOG.error("Failed to set attribute for enable target driver %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to enable SCST Target driver.") @@ -129,16 +128,16 @@ class SCSTAdm(iscsi.ISCSITarget): '-driver', self.target_driver) LOG.debug("StdOut from scstadmin create target: %s", out) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to create iscsi target for volume " - "id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e}) + LOG.error("Failed to create iscsi target for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) raise exception.ISCSITargetCreateFailed(volume_id=vol_name) try: (out, _err) = self.scst_execute('-enable_target', name, '-driver', self.target_driver) LOG.debug("StdOut from scstadmin enable target: %s", out) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to set 'enable' attribute for " - "SCST target %s"), e) + LOG.error("Failed to set 'enable' attribute for " + "SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_mesage="Failed to enable SCST Target.") if chap_auth and self.target_name: @@ -169,8 +168,7 @@ class SCSTAdm(iscsi.ISCSITarget): '-target', name) LOG.debug("StdOut from scstadmin create group: %s", out) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to create group to SCST target " - "%s"), e) + LOG.error("Failed to create group to SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to create group to SCST target.") try: @@ -181,8 +179,8 @@ class SCSTAdm(iscsi.ISCSITarget): '-group', scst_group) LOG.debug("StdOut from scstadmin add initiator: %s", out) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to add initiator to group " - " for SCST target %s"), e) + LOG.error("Failed to add initiator to group " + " for SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add Initiator to group for " "SCST target.") @@ -198,7 +196,7 @@ class SCSTAdm(iscsi.ISCSITarget): '-handler', 'vdisk_fileio', '-attributes', 'filename=%s' % path) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to add device to handler %s"), e) + LOG.error("Failed to add device to handler %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add device to SCST handler.") @@ -215,8 +213,8 @@ class SCSTAdm(iscsi.ISCSITarget): '-target', name, '-device', disk_id) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to add lun to SCST target " - "id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e}) + LOG.error("Failed to add lun to SCST target " + "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add LUN to SCST Target for " "volume " + vol_name) @@ -226,7 +224,7 @@ class SCSTAdm(iscsi.ISCSITarget): try: self.scst_execute('-write_config', '/etc/scst.conf') except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to write in /etc/scst.conf.")) + LOG.error("Failed to write in /etc/scst.conf.") raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to write in /etc/scst.conf.") @@ -312,8 +310,8 @@ class SCSTAdm(iscsi.ISCSITarget): self.show_target(iscsi_target, iqn) except Exception: - LOG.error(_LE("Skipping remove_export. No iscsi_target is" - "presently exported for volume: %s"), volume['id']) + LOG.error("Skipping remove_export. No iscsi_target is" + "presently exported for volume: %s", volume['id']) return vol = self.db.volume_get(context, volume['id']) lun = "".join(vol['provider_location'].split(" ")[-1:]) @@ -335,15 +333,15 @@ class SCSTAdm(iscsi.ISCSITarget): '-rem_target', iqn, '-driver', 'iscsi') except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to remove iscsi target for volume " - "id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e}) + LOG.error("Failed to remove iscsi target for volume " + "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: self.scst_execute('-noprompt', '-close_dev', "disk%s" % tid, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to close disk device %s"), e) + LOG.error("Failed to close disk device %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") @@ -354,8 +352,8 @@ class SCSTAdm(iscsi.ISCSITarget): '-rem_target', iqn, '-driver', self.target_driver) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to remove iscsi target for " - "volume id:%(vol_id)s: %(e)s"), + LOG.error("Failed to remove iscsi target for " + "volume id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: @@ -369,7 +367,7 @@ class SCSTAdm(iscsi.ISCSITarget): '-target', iqn, '-group', scst_group) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to remove LUN %s"), e) + LOG.error("Failed to remove LUN %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to remove LUN for SCST Target.") @@ -378,7 +376,7 @@ class SCSTAdm(iscsi.ISCSITarget): '-close_dev', disk_id, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to close disk device %s"), e) + LOG.error("Failed to close disk device %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") diff --git a/cinder/volume/targets/tgt.py b/cinder/volume/targets/tgt.py index c7cf10980d6..b54a7a83c55 100644 --- a/cinder/volume/targets/tgt.py +++ b/cinder/volume/targets/tgt.py @@ -19,7 +19,6 @@ from oslo_log import log as logging from oslo_utils import fileutils from cinder import exception -from cinder.i18n import _LI, _LW, _LE from cinder import utils from cinder.volume.targets import iscsi @@ -78,7 +77,7 @@ class TgtAdm(iscsi.ISCSITarget): return backing_lun def _recreate_backing_lun(self, iqn, tid, name, path): - LOG.warning(_LW('Attempting recreate of backing lun...')) + LOG.warning('Attempting recreate of backing lun...') # Since we think the most common case of this is a dev busy # (create vol from snapshot) we're going to add a sleep here @@ -95,9 +94,9 @@ class TgtAdm(iscsi.ISCSITarget): tid, '--lun', '1', '-b', path, run_as_root=True) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed recovery attempt to create " - "iscsi backing lun for Volume " - "ID:%(vol_id)s: %(e)s"), + LOG.error("Failed recovery attempt to create " + "iscsi backing lun for Volume " + "ID:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) finally: LOG.debug('StdOut from recreate backing lun: %s', out) @@ -187,13 +186,13 @@ class TgtAdm(iscsi.ISCSITarget): if "target already exists" in e.stderr: # Adding the additional Warning message below for a clear # ER marker (Ref bug: #1398078). - LOG.warning(_LW('Could not create target because ' - 'it already exists for volume: %s'), vol_id) + LOG.warning('Could not create target because ' + 'it already exists for volume: %s', vol_id) LOG.debug('Exception was: %s', e) else: - LOG.error(_LE("Failed to create iscsi target for Volume " - "ID: %(vol_id)s: %(e)s"), + LOG.error("Failed to create iscsi target for Volume " + "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created @@ -216,13 +215,13 @@ class TgtAdm(iscsi.ISCSITarget): iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: - LOG.warning(_LW("Failed to create iscsi target for Volume " - "ID: %(vol_id)s. It could be caused by problem " - "with concurrency. " - "Also please ensure your tgtd config " - "file contains 'include %(volumes_dir)s/*'"), { - 'vol_id': vol_id, - 'volumes_dir': volumes_dir, }) + LOG.warning("Failed to create iscsi target for Volume " + "ID: %(vol_id)s. It could be caused by problem " + "with concurrency. " + "Also please ensure your tgtd config " + "file contains 'include %(volumes_dir)s/*'", + {'vol_id': vol_id, + 'volumes_dir': volumes_dir, }) raise exception.NotFound() # NOTE(jdg): Sometimes we have some issues with the backing lun @@ -248,12 +247,12 @@ class TgtAdm(iscsi.ISCSITarget): return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info(_LI('Removing iscsi_target for Volume ID: %s'), vol_id) + LOG.info('Removing iscsi_target for Volume ID: %s', vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self.volumes_dir, vol_uuid_file) if not os.path.exists(volume_path): - LOG.warning(_LW('Volume path %s does not exist, ' - 'nothing to remove.'), volume_path) + LOG.warning('Volume path %s does not exist, ' + 'nothing to remove.', volume_path) return if os.path.isfile(volume_path): @@ -274,11 +273,11 @@ class TgtAdm(iscsi.ISCSITarget): "access control rule does not exist") if any(error in e.stderr for error in non_fatal_errors): - LOG.warning(_LW("Failed target removal because target or " - "ACL's couldn't be found for iqn: %s."), iqn) + LOG.warning("Failed target removal because target or " + "ACL's couldn't be found for iqn: %s.", iqn) else: - LOG.error(_LE("Failed to remove iscsi target for Volume " - "ID: %(vol_id)s: %(e)s"), + LOG.error("Failed to remove iscsi target for Volume " + "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): There's a bug in some versions of tgt that @@ -293,15 +292,15 @@ class TgtAdm(iscsi.ISCSITarget): # https://bugs.launchpad.net/cinder/+bug/1304122 if self._get_target(iqn): try: - LOG.warning(_LW('Silent failure of target removal ' - 'detected, retry....')) + LOG.warning('Silent failure of target removal ' + 'detected, retry....') utils.execute('tgt-admin', '--delete', iqn, run_as_root=True) except putils.ProcessExecutionError as e: - LOG.error(_LE("Failed to remove iscsi target for Volume " - "ID: %(vol_id)s: %(e)s"), + LOG.error("Failed to remove iscsi target for Volume " + "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) diff --git a/cinder/volume/throttling.py b/cinder/volume/throttling.py index b8ce5705ab0..39cbbeb9900 100644 --- a/cinder/volume/throttling.py +++ b/cinder/volume/throttling.py @@ -22,7 +22,6 @@ from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception -from cinder.i18n import _LW, _LE from cinder import utils @@ -69,7 +68,7 @@ class BlkioCgroup(Throttle): utils.execute('cgcreate', '-g', 'blkio:%s' % self.cgroup, run_as_root=True) except processutils.ProcessExecutionError: - LOG.error(_LE('Failed to create blkio cgroup \'%(name)s\'.'), + LOG.error('Failed to create blkio cgroup \'%(name)s\'.', {'name': cgroup_name}) raise @@ -77,16 +76,16 @@ class BlkioCgroup(Throttle): try: return utils.get_blkdev_major_minor(path) except exception.Error as e: - LOG.error(_LE('Failed to get device number for throttling: ' - '%(error)s'), {'error': e}) + LOG.error('Failed to get device number for throttling: ' + '%(error)s', {'error': e}) def _limit_bps(self, rw, dev, bps): try: utils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d' % (rw, dev, bps), self.cgroup, run_as_root=True) except processutils.ProcessExecutionError: - LOG.warning(_LW('Failed to setup blkio cgroup to throttle the ' - 'device \'%(device)s\'.'), {'device': dev}) + LOG.warning('Failed to setup blkio cgroup to throttle the ' + 'device \'%(device)s\'.', {'device': dev}) def _set_limits(self, rw, devs): total = sum(devs.values()) diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index f7066ecb737..a84d5ef317a 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -39,7 +39,7 @@ from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LI, _LW, _LE +from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils @@ -399,9 +399,9 @@ def _check_blocksize(blocksize): raise ValueError strutils.string_to_bytes('%sB' % blocksize) except ValueError: - LOG.warning(_LW("Incorrect value error: %(blocksize)s, " - "it may indicate that \'volume_dd_blocksize\' " - "was configured incorrectly. Fall back to default."), + LOG.warning("Incorrect value error: %(blocksize)s, " + "it may indicate that \'volume_dd_blocksize\' " + "was configured incorrectly. Fall back to default.", {'blocksize': blocksize}) # Fall back to default blocksize CONF.clear_override('volume_dd_blocksize') @@ -478,7 +478,7 @@ def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize, "dest": deststr, "sz": size_in_m, "duration": duration}) - LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"), + LOG.info("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s", {'size_in_m': size_in_m, 'mbps': mbps}) @@ -488,7 +488,7 @@ def _open_volume_with_path(path, mode): handle = open(path, mode) return handle except Exception: - LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path}) + LOG.error("Failed to open volume from %(path)s.", {'path': path}) def _transfer_data(src, dest, length, chunk_size): @@ -551,8 +551,8 @@ def _copy_volume_with_file(src, dest, size_in_m): dest_handle.close() mbps = (size_in_m / duration) - LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at " - "%(mbps).2f MB/s)."), + LOG.info("Volume copy completed (%(size_in_m).2f MB at " + "%(mbps).2f MB/s).", {'size_in_m': size_in_m, 'mbps': mbps}) @@ -601,7 +601,7 @@ def clear_volume(volume_size, volume_path, volume_clear=None, if volume_clear_ionice is None: volume_clear_ionice = CONF.volume_clear_ionice - LOG.info(_LI("Performing secure delete on volume: %s"), volume_path) + LOG.info("Performing secure delete on volume: %s", volume_path) # We pass sparse=False explicitly here so that zero blocks are not # skipped in order to clear the volume. @@ -861,8 +861,8 @@ def convert_config_string_to_dict(config_string): st = st.replace(" ", ", ") resultant_dict = ast.literal_eval(st) except Exception: - LOG.warning(_LW("Error encountered translating config_string: " - "%(config_string)s to dict"), + LOG.warning("Error encountered translating config_string: " + "%(config_string)s to dict", {'config_string': config_string}) return resultant_dict diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py index f2010f75416..cb77316ee68 100644 --- a/cinder/volume/volume_types.py +++ b/cinder/volume/volume_types.py @@ -28,7 +28,7 @@ from oslo_utils import uuidutils from cinder import context from cinder import db from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import quota from cinder import rpc from cinder import utils @@ -58,7 +58,7 @@ def create(context, description=description), projects=projects) except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') raise exception.VolumeTypeCreateFailed(name=name, extra_specs=extra_specs) return type_ref @@ -83,7 +83,7 @@ def update(context, id, name, description, is_public=None): old_type_name, name) except db_exc.DBError: - LOG.exception(_LE('DB error:')) + LOG.exception('DB error:') raise exception.VolumeTypeUpdateFailed(id=id) @@ -159,8 +159,8 @@ def get_default_volume_type(): # Couldn't find volume type with the name in default_volume_type # flag, record this issue and move on # TODO(zhiteng) consider add notification to warn admin - LOG.exception(_LE('Default volume type is not found. ' - 'Please check default_volume_type config:')) + LOG.exception('Default volume type is not found. ' + 'Please check default_volume_type config:') return vol_type diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py b/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py index a9fb92b94c4..a4dee42721b 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py @@ -19,7 +19,7 @@ from oslo_utils import excutils from oslo_utils import importutils from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager import fc_san_lookup_service as fc_service from cinder.zonemanager import utils as fczm_utils @@ -117,8 +117,8 @@ class BrcdFCSanLookupService(fc_service.FCSanLookupService): nsinfo = conn.get_nameserver_info() except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed collecting name server info from" - " fabric %s"), fabric_ip) + LOG.error("Failed collecting name server info from" + " fabric %s", fabric_ip) except Exception as e: msg = _("SSH connection failed " "for %(fabric)s with error: %(err)s" diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py index dc932ffe413..884aa509d16 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py @@ -29,7 +29,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant @@ -84,8 +84,8 @@ class BrcdFCZoneClientCLI(object): [zone_constant.GET_ACTIVE_ZONE_CFG]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed getting active zone set " - "from fabric %s"), self.switch_ip) + LOG.error("Failed getting active zone set " + "from fabric %s", self.switch_ip) try: for line in switch_data: line_split = re.split('\\t', line) @@ -333,8 +333,8 @@ class BrcdFCZoneClientCLI(object): cli_output = self._get_switch_info([cmd]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed collecting nsshow " - "info for fabric %s"), self.switch_ip) + LOG.error("Failed collecting nsshow " + "info for fabric %s", self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) cli_output = None @@ -405,7 +405,7 @@ class BrcdFCZoneClientCLI(object): firmware = int(ver[0] + ver[1]) return firmware > 63 else: - LOG.error(_LE("No CLI output for firmware version check")) + LOG.error("No CLI output for firmware version check") return False except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " @@ -474,7 +474,7 @@ class BrcdFCZoneClientCLI(object): command, check_exit_code=check_exit_code) except Exception as e: - LOG.exception(_LE('Error executing SSH command.')) + LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: @@ -491,7 +491,7 @@ class BrcdFCZoneClientCLI(object): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), command) + LOG.error("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. @@ -537,7 +537,7 @@ class BrcdFCZoneClientCLI(object): else: return True except Exception as e: - LOG.exception(_LE('Error executing SSH command.')) + LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after " @@ -556,7 +556,7 @@ class BrcdFCZoneClientCLI(object): cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error executing command via ssh: %s"), e) + LOG.error("Error executing command via ssh: %s", e) finally: if stdin: stdin.flush() @@ -615,7 +615,7 @@ exit try: channel.close() except Exception: - LOG.exception(_LE('Error closing channel.')) + LOG.exception('Error closing channel.') LOG.debug("_execute_cmd: stdout to return: %s", stdout) LOG.debug("_execute_cmd: stderr to return: %s", stderr) return (stdout, stderr) diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py index 64bb4f24959..69c9edf5fc5 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py @@ -37,7 +37,7 @@ import six import string from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import interface from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager.drivers.brocade import fc_zone_constants @@ -124,8 +124,8 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ - LOG.info(_LI("BrcdFCZoneDriver - Add connection for fabric " - "%(fabric)s for I-T map: %(i_t_map)s"), + LOG.info("BrcdFCZoneDriver - Add connection for fabric " + "%(fabric)s for I-T map: %(i_t_map)s", {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy @@ -137,12 +137,12 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab - LOG.info(_LI("Zoning policy for Fabric %(policy)s"), + LOG.info("Zoning policy for Fabric %(policy)s", {'policy': zoning_policy}) if (zoning_policy != 'initiator' and zoning_policy != 'initiator-target'): - LOG.info(_LI("Zoning policy is not valid, " - "no zoning will be performed.")) + LOG.info("Zoning policy is not valid, " + "no zoning will be performed.") return client = self._get_southbound_client(fabric) @@ -175,8 +175,8 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. - LOG.info(_LI("Zone exists in I-T mode. Skipping " - "zone creation for %(zonename)s"), + LOG.info("Zone exists in I-T mode. Skipping " + "zone creation for %(zonename)s", {'zonename': zone_name}) elif zoning_policy == 'initiator': zone_members = [utils.get_formatted_wwn(initiator)] @@ -208,9 +208,9 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): else: zone_map[zone_name] = zone_members - LOG.info(_LI("Zone map to create: %(zonemap)s"), + LOG.info("Zone map to create: %(zonemap)s", {'zonemap': zone_map}) - LOG.info(_LI("Zone map to update: %(zone_update_map)s"), + LOG.info("Zone map to update: %(zone_update_map)s", {'zone_update_map': zone_update_map}) try: @@ -247,8 +247,8 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ - LOG.info(_LI("BrcdFCZoneDriver - Delete connection for fabric " - "%(fabric)s for I-T map: %(i_t_map)s"), + LOG.info("BrcdFCZoneDriver - Delete connection for fabric " + "%(fabric)s for I-T map: %(i_t_map)s", {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy @@ -260,7 +260,7 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab - LOG.info(_LI("Zoning policy for fabric %(policy)s"), + LOG.info("Zoning policy for fabric %(policy)s", {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) @@ -344,7 +344,7 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): else: zones_to_delete.append(zone_name) else: - LOG.warning(_LW("Zoning policy not recognized: %(policy)s"), + LOG.warning("Zoning policy not recognized: %(policy)s", {'policy': zoning_policy}) LOG.debug("Zone map to update: %(zonemap)s", {'zonemap': zone_map}) @@ -420,7 +420,7 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting name server info.")) + LOG.exception("Error getting name server info.") except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) @@ -432,7 +432,7 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): nsinfo) if visible_targets: - LOG.info(_LI("Filtered targets for SAN is: %(targets)s"), + LOG.info("Filtered targets for SAN is: %(targets)s", {'targets': visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): @@ -460,7 +460,7 @@ class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): LOG.error(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting name server info.")) + LOG.exception("Error getting name server info.") except Exception as e: msg = (_("Failed to retrieve active zoning configuration %s") % six.text_type(e)) diff --git a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py index 36af58c1961..6045cd16bf8 100644 --- a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py +++ b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py @@ -26,7 +26,7 @@ import six import time from cinder import exception -from cinder.i18n import _, _LI +from cinder.i18n import _ import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant @@ -310,7 +310,7 @@ class BrcdHTTPFCZoneClient(object): session_LF_Id = self.get_nvp_value(parsed_info, zone_constant.SESSION_LF_ID) if session_LF_Id == vfid: - LOG.info(_LI("VF context is changed in the session.")) + LOG.info("VF context is changed in the session.") else: msg = _("Cannot change VF context in the session.") LOG.error(msg) diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py index 834d900ba85..943bd8aadf3 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py @@ -24,7 +24,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts @@ -185,8 +185,7 @@ class CiscoFCSanLookupService(fc_service.FCSanLookupService): cli_output = self._get_switch_info(cmd) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed collecting show fcns database for" - " fabric")) + LOG.error("Failed collecting show fcns database for fabric") if cli_output: nsinfo_list = self._parse_ns_output(cli_output) @@ -270,7 +269,7 @@ class CiscoFCSanLookupService(fc_service.FCSanLookupService): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Error running SSH command: %s"), command) + LOG.error("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py index 1bf5f7d2707..844cb0fbd1c 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py @@ -28,7 +28,7 @@ from oslo_utils import excutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder import ssh_utils from cinder import utils import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant @@ -90,8 +90,8 @@ class CiscoFCZoneClientCLI(object): ' | no-more']) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed getting active zone set " - "from fabric %s"), self.switch_ip) + LOG.error("Failed getting active zone set " + "from fabric %s", self.switch_ip) try: for line in switch_data: # Split on non-word characters, @@ -286,8 +286,8 @@ class CiscoFCZoneClientCLI(object): [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed getting zone status " - "from fabric %s"), self.switch_ip) + LOG.error("Failed getting zone status " + "from fabric %s", self.switch_ip) try: for line in switch_data: # Split on non-word characters, @@ -367,13 +367,13 @@ class CiscoFCZoneClientCLI(object): self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.error(_LE("Failed collecting fcns database " - "info for fabric %s"), self.switch_ip) + LOG.error("Failed collecting fcns database " + "info for fabric %s", self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) - LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list) + LOG.info("Connector returning fcnsinfo-%s", return_list) return return_list @@ -439,7 +439,7 @@ class CiscoFCZoneClientCLI(object): except Exception: with excutils.save_and_reraise_exception(): - LOG.warning(_LW("Error running SSH command: %s"), command) + LOG.warning("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. @@ -492,7 +492,7 @@ class CiscoFCZoneClientCLI(object): else: return True except Exception as e: - LOG.exception(_LE('Error executing SSH command.')) + LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH: %s", last_exception) @@ -510,7 +510,7 @@ class CiscoFCZoneClientCLI(object): cmd=command) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error executing command via ssh.")) + LOG.exception("Error executing command via ssh.") finally: if stdin: stdin.flush() diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py index 607ac1a9717..993cbe90001 100644 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py +++ b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py @@ -36,7 +36,7 @@ import six import string from cinder import exception -from cinder.i18n import _, _LE, _LI +from cinder.i18n import _ from cinder import interface from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers.cisco import fc_zone_constants as ZoneConstant @@ -134,8 +134,8 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): """ LOG.debug("Add connection for Fabric: %s", fabric) - LOG.info(_LI("CiscoFCZoneDriver - Add connection " - "for I-T map: %s"), initiator_target_map) + LOG.info("CiscoFCZoneDriver - Add connection " + "for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( @@ -152,7 +152,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy) + LOG.info("Zoning policy for Fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) @@ -191,8 +191,8 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. - LOG.info(_LI("Zone exists in I-T mode. " - "Skipping zone creation %s"), + LOG.info("Zone exists in I-T mode. " + "Skipping zone creation %s", zone_name) elif zoning_policy == 'initiator': zone_members = [ @@ -228,9 +228,9 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): LOG.error(msg) raise exception.FCZoneDriverException(msg) - LOG.info(_LI("Zone map to add: %(zone_map)s"), + LOG.info("Zone map to add: %(zone_map)s", {'zone_map': zone_map}) - LOG.info(_LI("Zone map to update add: %(zone_update_map)s"), + LOG.info("Zone map to update add: %(zone_update_map)s", {'zone_update_map': zone_update_map}) if zone_map or zone_update_map: conn = None @@ -283,7 +283,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric: %s", fabric) - LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"), + LOG.info("CiscoFCZoneDriver - Delete connection for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') @@ -302,7 +302,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy) + LOG.info("Zoning policy for fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) @@ -391,7 +391,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): else: zones_to_delete.append(zone_name) else: - LOG.info(_LI("Zoning Policy: %s, not recognized"), + LOG.info("Zoning Policy: %s, not recognized", zoning_policy) LOG.debug("Zone map to remove update: %s", zone_update_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) @@ -484,8 +484,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): conn.cleanup() except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Error getting show fcns database " - "info.")) + LOG.exception("Error getting show fcns database info.") except Exception: msg = _("Failed to get show fcns database info.") LOG.exception(msg) @@ -494,7 +493,7 @@ class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): lambda x: x in formatted_target_list, nsinfo) if visible_targets: - LOG.info(_LI("Filtered targets for SAN is: %s"), + LOG.info("Filtered targets for SAN is: %s", {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): diff --git a/cinder/zonemanager/drivers/driver_utils.py b/cinder/zonemanager/drivers/driver_utils.py index 1a88a9a2a17..2927ed4b58b 100644 --- a/cinder/zonemanager/drivers/driver_utils.py +++ b/cinder/zonemanager/drivers/driver_utils.py @@ -20,8 +20,6 @@ import re from oslo_log import log -from cinder.i18n import _LI - LOG = log.getLogger(__name__) @@ -61,8 +59,8 @@ def get_friendly_zone_name(zoning_policy, initiator, target, zone_name = (zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) - LOG.info(_LI("Zone name created using prefix because either " - "host name or storage system is none.")) + LOG.info("Zone name created using prefix because either " + "host name or storage system is none.") else: host_name = host_name[:47] if len(host_name) > 0: @@ -71,10 +69,10 @@ def get_friendly_zone_name(zoning_policy, initiator, target, else: zone_name = (zone_name_prefix + initiator.replace(':', '')) - LOG.info(_LI("Zone name created using prefix because host " - "name is none.")) + LOG.info("Zone name created using prefix because host " + "name is none.") - LOG.info(_LI("Friendly zone name after forming: %(zonename)s"), + LOG.info("Friendly zone name after forming: %(zonename)s", {'zonename': zone_name}) zone_name = re.sub('[^%s]' % supported_chars, '', zone_name) return zone_name diff --git a/cinder/zonemanager/fc_san_lookup_service.py b/cinder/zonemanager/fc_san_lookup_service.py index 9e1b58dd3a5..5ac46235a3f 100644 --- a/cinder/zonemanager/fc_san_lookup_service.py +++ b/cinder/zonemanager/fc_san_lookup_service.py @@ -27,7 +27,7 @@ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception -from cinder.i18n import _, _LE +from cinder.i18n import _ from cinder.volume import configuration as config from cinder.zonemanager import fc_common from cinder.zonemanager import fc_zone_manager @@ -92,6 +92,6 @@ class FCSanLookupService(fc_common.FCCommon): device_map = self.lookup_service.get_device_mapping_from_network( initiator_list, target_list) except Exception as e: - LOG.exception(_LE('Unable to get device mapping from network.')) + LOG.exception('Unable to get device mapping from network.') raise exception.FCSanLookupServiceException(e) return device_map diff --git a/cinder/zonemanager/fc_zone_manager.py b/cinder/zonemanager/fc_zone_manager.py index 2e5b52960c3..e161ec9ac60 100644 --- a/cinder/zonemanager/fc_zone_manager.py +++ b/cinder/zonemanager/fc_zone_manager.py @@ -37,7 +37,7 @@ from oslo_utils import importutils import six from cinder import exception -from cinder.i18n import _, _LE, _LI, _LW +from cinder.i18n import _ from cinder.volume import configuration as config from cinder.zonemanager import fc_common import cinder.zonemanager.fczm_constants as zone_constant @@ -122,10 +122,10 @@ class ZoneManager(fc_common.FCCommon): self._log_unsupported_driver_warning() if not self.configuration.enable_unsupported_driver: - LOG.error(_LE("Unsupported drivers are disabled." - " You can re-enable by adding " - "enable_unsupported_driver=True to the " - "fc-zone-manager section in cinder.conf"), + LOG.error("Unsupported drivers are disabled." + " You can re-enable by adding " + "enable_unsupported_driver=True to the " + "fc-zone-manager section in cinder.conf", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return @@ -142,7 +142,7 @@ class ZoneManager(fc_common.FCCommon): def _require_initialized(self): """Verifies that the zone manager has been properly initialized.""" if not self.initialized: - LOG.error(_LE("Fibre Channel Zone Manager is not initialized.""")) + LOG.error("Fibre Channel Zone Manager is not initialized.") raise exception.ZoneManagerNotInitialized() else: self._log_unsupported_driver_warning() @@ -150,10 +150,10 @@ class ZoneManager(fc_common.FCCommon): def _log_unsupported_driver_warning(self): """Annoy the log about unsupported fczm drivers.""" if not self.driver.supported: - LOG.warning(_LW("Zone Manager driver (%(driver_name)s %(version)s)" - " is currently unsupported and may be removed in " - "the next release of OpenStack. Use at your own " - "risk."), + LOG.warning("Zone Manager driver (%(driver_name)s %(version)s)" + " is currently unsupported and may be removed in " + "the next release of OpenStack. Use at your own " + "risk.", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}, resource={'type': 'zone_manager', @@ -193,8 +193,8 @@ class ZoneManager(fc_common.FCCommon): self._log_unsupported_driver_warning() self._require_initialized() except exception.ZoneManagerNotInitialized: - LOG.error(_LE("Cannot add Fibre Channel Zone because the " - "Zone Manager is not initialized properly."), + LOG.error("Cannot add Fibre Channel Zone because the " + "Zone Manager is not initialized properly.", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return @@ -231,15 +231,15 @@ class ZoneManager(fc_common.FCCommon): i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, True) - LOG.info(_LI("Final filtered map for fabric: %(i_t_map)s"), + LOG.info("Final filtered map for fabric: %(i_t_map)s", {'i_t_map': valid_i_t_map}) # Call driver to add connection control self.driver.add_connection(fabric, valid_i_t_map, host_name, storage_system) - LOG.info(_LI("Add connection: finished iterating " - "over all target list")) + LOG.info("Add connection: finished iterating " + "over all target list") except Exception as e: msg = _("Failed adding connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, @@ -270,8 +270,8 @@ class ZoneManager(fc_common.FCCommon): self._log_unsupported_driver_warning() self._require_initialized() except exception.ZoneManagerNotInitialized: - LOG.error(_LE("Cannot delete fibre channel zone because the " - "Zone Manager is not initialized properly."), + LOG.error("Cannot delete fibre channel zone because the " + "Zone Manager is not initialized properly.", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return @@ -291,7 +291,7 @@ class ZoneManager(fc_common.FCCommon): for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] - LOG.info(_LI("Delete connection target list: %(targets)s"), + LOG.info("Delete connection target list: %(targets)s", {'targets': target_list}) # get SAN context for the target list @@ -307,8 +307,8 @@ class ZoneManager(fc_common.FCCommon): i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, False) - LOG.info(_LI("Final filtered map for delete connection: " - "%(i_t_map)s"), {'i_t_map': valid_i_t_map}) + LOG.info("Final filtered map for delete connection: " + "%(i_t_map)s", {'i_t_map': valid_i_t_map}) # Call driver to delete connection control if len(valid_i_t_map) > 0: @@ -361,7 +361,7 @@ class ZoneManager(fc_common.FCCommon): if t_list: filtered_i_t_map[initiator] = t_list else: - LOG.info(_LI("No targets to add or remove connection for " - "initiator: %(init_wwn)s"), + LOG.info("No targets to add or remove connection for " + "initiator: %(init_wwn)s", {'init_wwn': initiator}) return filtered_i_t_map diff --git a/cinder/zonemanager/utils.py b/cinder/zonemanager/utils.py index 6e6b4d9825d..7628491bcdf 100644 --- a/cinder/zonemanager/utils.py +++ b/cinder/zonemanager/utils.py @@ -19,7 +19,6 @@ Utility functions related to the Zone Manager. """ from oslo_log import log -from cinder.i18n import _LI, _LW from cinder.volume import configuration from cinder.volume import manager from cinder.zonemanager import fc_san_lookup_service @@ -36,8 +35,8 @@ def create_zone_manager(): LOG.debug("FC Zone Manager enabled.") zm = fc_zone_manager.ZoneManager() if zm.initialized: - LOG.info(_LI("Using FC Zone Manager %(zm_version)s," - " Driver %(drv_name)s %(drv_version)s."), + LOG.info("Using FC Zone Manager %(zm_version)s," + " Driver %(drv_name)s %(drv_version)s.", {'zm_version': zm.get_version(), 'drv_name': zm.driver.__class__.__name__, 'drv_version': zm.driver.get_version()}) @@ -58,7 +57,7 @@ def create_lookup_service(): if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Lookup Service enabled.") lookup = fc_san_lookup_service.FCSanLookupService() - LOG.info(_LI("Using FC lookup service %s."), lookup.lookup_service) + LOG.info("Using FC lookup service %s.", lookup.lookup_service) return lookup else: LOG.debug("FC Lookup Service not enabled in cinder.conf.") @@ -80,8 +79,8 @@ def add_fc_zone(initialize_connection): def decorator(self, *args, **kwargs): conn_info = initialize_connection(self, *args, **kwargs) if not conn_info: - LOG.warning(_LW("Driver didn't return connection info, " - "can't add zone.")) + LOG.warning("Driver didn't return connection info, " + "can't add zone.") return None vol_type = conn_info.get('driver_volume_type', None) @@ -104,8 +103,8 @@ def remove_fc_zone(terminate_connection): def decorator(self, *args, **kwargs): conn_info = terminate_connection(self, *args, **kwargs) if not conn_info: - LOG.warning(_LW("Driver didn't return connection info from " - "terminate_connection call.")) + LOG.warning("Driver didn't return connection info from " + "terminate_connection call.") return None vol_type = conn_info.get('driver_volume_type', None)