Remove str() from LOG.* and exceptions
This commit removes the use of str() from LOG.* messages and exceptions. The reason we need to remove str() is because str() will fail if it gets a Unicode string that has something that doesn't translate to ASCII in it. If such a situation is encountered you will lose the message string in question. In most cases, the use of str() is unnecessary for LOG.* and exception messages. Using %s is smart enough to figure out what to do with what it is passed. It first tries to str() it, if this fails it falls back to using unicode. Either way, the result will then be something that gettextutils can handle and translate. Change-Id: I6eb81043edd9fa5e035d81ee81e8439340546d24 Closes-bug: 1274245 Related-bp: i18n-messages
This commit is contained in:
parent
ee371dfc56
commit
cbe1d5f5e2
@ -119,23 +119,23 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'QoSSpecs.create',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.InvalidInput as err:
|
||||
notifier_err = dict(name=name, error_message=str(err))
|
||||
notifier_err = dict(name=name, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.create',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPBadRequest(explanation=str(err))
|
||||
raise webob.exc.HTTPBadRequest(explanation=err)
|
||||
except exception.QoSSpecsExists as err:
|
||||
notifier_err = dict(name=name, error_message=str(err))
|
||||
notifier_err = dict(name=name, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.create',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPConflict(explanation=str(err))
|
||||
raise webob.exc.HTTPConflict(explanation=err)
|
||||
except exception.QoSSpecsCreateFailed as err:
|
||||
notifier_err = dict(name=name, error_message=str(err))
|
||||
notifier_err = dict(name=name, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.create',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return self._view_builder.detail(req, spec)
|
||||
|
||||
@ -154,23 +154,23 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.update',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.update',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.InvalidQoSSpecs as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.update',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPBadRequest(explanation=str(err))
|
||||
raise webob.exc.HTTPBadRequest(explanation=err)
|
||||
except exception.QoSSpecsUpdateFailed as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.update',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return body
|
||||
|
||||
@ -183,7 +183,7 @@ class QoSSpecsController(wsgi.Controller):
|
||||
try:
|
||||
spec = qos_specs.get_qos_specs(context, id)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
|
||||
return self._view_builder.detail(req, spec)
|
||||
|
||||
@ -206,13 +206,13 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.delete',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.delete',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsInUse as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.delete',
|
||||
notifier_err)
|
||||
@ -244,17 +244,17 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.delete_keys',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.delete_keys',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsKeyNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.delete_keys',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPBadRequest(explanation=str(err))
|
||||
raise webob.exc.HTTPBadRequest(explanation=err)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
@ -273,17 +273,17 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.associations',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associations',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.CinderException as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associations',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return self._view_builder.associations(req, associates)
|
||||
|
||||
@ -311,32 +311,32 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.associate',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.VolumeTypeNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.InvalidVolumeType as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associate',
|
||||
notifier_err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPBadRequest(explanation=str(err))
|
||||
raise webob.exc.HTTPBadRequest(explanation=err)
|
||||
except exception.QoSSpecsAssociateFailed as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.associate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
@ -364,23 +364,23 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.disassociate',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.VolumeTypeNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.disassociate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.disassociate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsDisassociateFailed as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.disassociate',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
@ -398,17 +398,17 @@ class QoSSpecsController(wsgi.Controller):
|
||||
'qos_specs.disassociate_all',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.QoSSpecsNotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.disassociate_all',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPNotFound(explanation=str(err))
|
||||
raise webob.exc.HTTPNotFound(explanation=err)
|
||||
except exception.QoSSpecsDisassociateFailed as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_qos_specs_error(context,
|
||||
'qos_specs.disassociate_all',
|
||||
notifier_err)
|
||||
raise webob.exc.HTTPInternalServerError(explanation=str(err))
|
||||
raise webob.exc.HTTPInternalServerError(explanation=err)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
|
@ -67,14 +67,14 @@ class VolumeTypesManageController(wsgi.Controller):
|
||||
notifier_api.INFO, notifier_info)
|
||||
|
||||
except exception.VolumeTypeExists as err:
|
||||
notifier_err = dict(volume_types=vol_type, error_message=str(err))
|
||||
notifier_err = dict(volume_types=vol_type, error_message=err)
|
||||
self._notify_volume_type_error(context,
|
||||
'volume_type.create',
|
||||
notifier_err)
|
||||
|
||||
raise webob.exc.HTTPConflict(explanation=str(err))
|
||||
raise webob.exc.HTTPConflict(explanation=err)
|
||||
except exception.NotFound as err:
|
||||
notifier_err = dict(volume_types=vol_type, error_message=str(err))
|
||||
notifier_err = dict(volume_types=vol_type, error_message=err)
|
||||
self._notify_volume_type_error(context,
|
||||
'volume_type.create',
|
||||
notifier_err)
|
||||
@ -96,14 +96,14 @@ class VolumeTypesManageController(wsgi.Controller):
|
||||
'volume_type.delete',
|
||||
notifier_api.INFO, notifier_info)
|
||||
except exception.VolumeTypeInUse as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_volume_type_error(context,
|
||||
'volume_type.delete',
|
||||
notifier_err)
|
||||
msg = 'Target volume type is still in use.'
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.NotFound as err:
|
||||
notifier_err = dict(id=id, error_message=str(err))
|
||||
notifier_err = dict(id=id, error_message=err)
|
||||
self._notify_volume_type_error(context,
|
||||
'volume_type.delete',
|
||||
notifier_err)
|
||||
|
@ -814,7 +814,7 @@ class CephBackupDriver(BackupDriver):
|
||||
vol_meta_backup = VolumeMetadataBackup(client, backup['id'])
|
||||
vol_meta_backup.set(json_meta)
|
||||
except exception.VolumeMetadataBackupExists as e:
|
||||
msg = _("Failed to backup volume metadata - %s") % (str(e))
|
||||
msg = (_("Failed to backup volume metadata - %s") % (e))
|
||||
raise exception.BackupOperationError(msg)
|
||||
|
||||
def backup(self, backup, volume_file, backup_metadata=True):
|
||||
|
@ -33,11 +33,11 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import six
|
||||
import socket
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from cinder.backup.driver import BackupDriver
|
||||
from cinder import exception
|
||||
@ -225,7 +225,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
try:
|
||||
container = self._create_container(self.context, backup)
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
|
||||
object_prefix = self._generate_swift_object_name_prefix(backup)
|
||||
backup['service_metadata'] = object_prefix
|
||||
@ -282,7 +282,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
etag = self.conn.put_object(container, object_name, reader,
|
||||
content_length=len(data))
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') %
|
||||
{'object_name': object_name, 'etag': etag, })
|
||||
md5 = hashlib.md5(data).hexdigest()
|
||||
@ -313,7 +313,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
object_list,
|
||||
volume_meta)
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
self.db.backup_update(self.context, backup['id'],
|
||||
{'object_count': object_id})
|
||||
LOG.debug(_('backup %s finished.') % backup['id'])
|
||||
@ -388,7 +388,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
try:
|
||||
(resp, body) = self.conn.get_object(container, object_name)
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
compression_algorithm = metadata_object[object_name]['compression']
|
||||
decompressor = self._get_compressor(compression_algorithm)
|
||||
if decompressor is not None:
|
||||
@ -435,7 +435,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
try:
|
||||
metadata = self._read_metadata(backup)
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
metadata_version = metadata['version']
|
||||
LOG.debug(_('Restoring swift backup version %s'), metadata_version)
|
||||
try:
|
||||
@ -479,7 +479,7 @@ class SwiftBackupDriver(BackupDriver):
|
||||
try:
|
||||
self.conn.delete_object(container, swift_object_name)
|
||||
except socket.error as err:
|
||||
raise exception.SwiftConnectionFailed(reason=str(err))
|
||||
raise exception.SwiftConnectionFailed(reason=err)
|
||||
except Exception:
|
||||
LOG.warn(_('swift error while deleting object %s, '
|
||||
'continuing with delete') % swift_object_name)
|
||||
|
@ -27,7 +27,6 @@ import time
|
||||
from cinder.brick import exception
|
||||
from cinder.brick import executor
|
||||
from cinder.openstack.common import fileutils
|
||||
from cinder.openstack.common.gettextutils import _
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.openstack.common import processutils as putils
|
||||
|
||||
@ -154,7 +153,7 @@ class TgtAdm(TargetAdmin):
|
||||
LOG.error(_("Failed to recover attempt to create "
|
||||
"iscsi backing lun for volume "
|
||||
"id:%(vol_id)s: %(e)s")
|
||||
% {'vol_id': name, 'e': str(e)})
|
||||
% {'vol_id': name, 'e': e})
|
||||
|
||||
def create_iscsi_target(self, name, tid, lun, path,
|
||||
chap_auth=None, **kwargs):
|
||||
@ -211,7 +210,7 @@ class TgtAdm(TargetAdmin):
|
||||
except putils.ProcessExecutionError as e:
|
||||
LOG.warning(_("Failed to create iscsi target for volume "
|
||||
"id:%(vol_id)s: %(e)s")
|
||||
% {'vol_id': vol_id, 'e': str(e)})
|
||||
% {'vol_id': vol_id, 'e': e})
|
||||
|
||||
#Don't forget to remove the persistent file we created
|
||||
os.unlink(volume_path)
|
||||
@ -274,7 +273,7 @@ class TgtAdm(TargetAdmin):
|
||||
except putils.ProcessExecutionError as e:
|
||||
LOG.error(_("Failed to remove iscsi target for volume "
|
||||
"id:%(vol_id)s: %(e)s")
|
||||
% {'vol_id': vol_id, 'e': str(e)})
|
||||
% {'vol_id': vol_id, 'e': e})
|
||||
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
|
||||
|
||||
# NOTE(jdg): This *should* be there still but incase
|
||||
@ -367,7 +366,7 @@ class IetAdm(TargetAdmin):
|
||||
vol_id = name.split(':')[1]
|
||||
LOG.error(_("Failed to create iscsi target for volume "
|
||||
"id:%(vol_id)s: %(e)s")
|
||||
% {'vol_id': vol_id, 'e': str(e)})
|
||||
% {'vol_id': vol_id, 'e': e})
|
||||
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
|
||||
return tid
|
||||
|
||||
@ -512,7 +511,7 @@ class LioAdm(TargetAdmin):
|
||||
except putils.ProcessExecutionError as e:
|
||||
LOG.error(_("Failed to create iscsi target for volume "
|
||||
"id:%s.") % vol_id)
|
||||
LOG.error("%s" % str(e))
|
||||
LOG.error("%s" % e)
|
||||
|
||||
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
|
||||
|
||||
@ -538,7 +537,7 @@ class LioAdm(TargetAdmin):
|
||||
except putils.ProcessExecutionError as e:
|
||||
LOG.error(_("Failed to remove iscsi target for volume "
|
||||
"id:%s.") % vol_id)
|
||||
LOG.error("%s" % str(e))
|
||||
LOG.error("%s" % e)
|
||||
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
|
||||
|
||||
def show_target(self, tid, iqn=None, **kwargs):
|
||||
|
@ -173,7 +173,7 @@ class GlanceClientWrapper(object):
|
||||
'extra': extra,
|
||||
}
|
||||
LOG.exception(error_msg)
|
||||
raise exception.GlanceConnectionFailed(reason=str(e))
|
||||
raise exception.GlanceConnectionFailed(reason=e)
|
||||
LOG.exception(error_msg)
|
||||
time.sleep(1)
|
||||
|
||||
|
@ -225,8 +225,8 @@ class TestCase(testtools.TestCase):
|
||||
|
||||
"""
|
||||
def raise_assertion(msg):
|
||||
d1str = str(d1)
|
||||
d2str = str(d2)
|
||||
d1str = d1
|
||||
d2str = d2
|
||||
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
|
||||
'd2: %(d2str)s' %
|
||||
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
|
||||
|
@ -82,7 +82,7 @@ def return_qos_specs_update(context, id, specs):
|
||||
if id == "777":
|
||||
raise exception.QoSSpecsNotFound(specs_id=id)
|
||||
elif id == "888":
|
||||
raise exception.InvalidQoSSpecs(reason=str(id))
|
||||
raise exception.InvalidQoSSpecs(reason=id)
|
||||
elif id == "999":
|
||||
raise exception.QoSSpecsUpdateFailed(specs_id=id,
|
||||
qos_specs=specs)
|
||||
|
@ -18,10 +18,10 @@ Tests dealing with HTTP rate-limiting.
|
||||
"""
|
||||
|
||||
import httplib
|
||||
import six
|
||||
from xml.dom import minidom
|
||||
|
||||
from lxml import etree
|
||||
import six
|
||||
import webob
|
||||
|
||||
from cinder.api.v1 import limits
|
||||
@ -362,7 +362,7 @@ class ParseLimitsTest(BaseLimitTestSuite):
|
||||
'(POST, /bar*, /bar.*, 5, second);'
|
||||
'(Say, /derp*, /derp.*, 1, day)')
|
||||
except ValueError as e:
|
||||
assert False, str(e)
|
||||
assert False, e
|
||||
|
||||
# Make sure the number of returned limits are correct
|
||||
self.assertEqual(len(l), 4)
|
||||
|
@ -17,6 +17,7 @@
|
||||
import hashlib
|
||||
import mock
|
||||
import os
|
||||
import six
|
||||
import tempfile
|
||||
import uuid
|
||||
|
||||
@ -813,7 +814,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
self.service._restore_metadata(self.backup, self.volume_id)
|
||||
except exception.BackupOperationError as exc:
|
||||
msg = _("Metadata restore failed due to incompatible version")
|
||||
self.assertEqual(str(exc), msg)
|
||||
self.assertEqual(six.text_type(exc), msg)
|
||||
else:
|
||||
# Force a test failure
|
||||
self.assertFalse(True)
|
||||
@ -839,7 +840,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
msg = (_("Failed to backup volume metadata - Metadata backup "
|
||||
"object 'backup.%s.meta' already exists") %
|
||||
(self.backup_id))
|
||||
self.assertEqual(str(e), msg)
|
||||
self.assertEqual(six.text_type(e), msg)
|
||||
else:
|
||||
# Make the test fail
|
||||
self.assertFalse(True)
|
||||
|
@ -152,12 +152,12 @@ def check_ssh_injection(cmd_list):
|
||||
if quoted:
|
||||
if (re.match('[\'"]', quoted) or
|
||||
re.search('[^\\\\][\'"]', quoted)):
|
||||
raise exception.SSHInjectionThreat(command=str(cmd_list))
|
||||
raise exception.SSHInjectionThreat(command=cmd_list)
|
||||
else:
|
||||
# We only allow spaces within quoted arguments, and that
|
||||
# is the only special character allowed within quotes
|
||||
if len(arg.split()) > 1:
|
||||
raise exception.SSHInjectionThreat(command=str(cmd_list))
|
||||
raise exception.SSHInjectionThreat(command=cmd_list)
|
||||
|
||||
# Second, check whether danger character in command. So the shell
|
||||
# special operator must be a single argument.
|
||||
@ -696,7 +696,7 @@ def tempdir(**kwargs):
|
||||
try:
|
||||
shutil.rmtree(tmpdir)
|
||||
except OSError as e:
|
||||
LOG.debug(_('Could not remove tmpdir: %s'), str(e))
|
||||
LOG.debug(_('Could not remove tmpdir: %s'), e)
|
||||
|
||||
|
||||
def walk_class_hierarchy(clazz, encountered=None):
|
||||
|
@ -296,7 +296,7 @@ class API(base.Base):
|
||||
filters['no_migration_targets'] = True
|
||||
|
||||
if filters:
|
||||
LOG.debug(_("Searching by: %s") % str(filters))
|
||||
LOG.debug(_("Searching by: %s") % filters)
|
||||
|
||||
def _check_metadata_match(volume, searchdict):
|
||||
volume_metadata = {}
|
||||
@ -361,7 +361,7 @@ class API(base.Base):
|
||||
context, context.project_id)
|
||||
|
||||
if search_opts:
|
||||
LOG.debug(_("Searching by: %s") % str(search_opts))
|
||||
LOG.debug(_("Searching by: %s") % search_opts)
|
||||
|
||||
results = []
|
||||
not_found = object()
|
||||
@ -908,7 +908,7 @@ class API(base.Base):
|
||||
|
||||
if migration_policy and migration_policy not in ['on-demand', 'never']:
|
||||
msg = _('migration_policy must be \'on-demand\' or \'never\', '
|
||||
'passed: %s') % str(new_type)
|
||||
'passed: %s') % new_type
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
@ -920,7 +920,7 @@ class API(base.Base):
|
||||
vol_type = volume_types.get_volume_type_by_name(context,
|
||||
new_type)
|
||||
except exception.InvalidVolumeType:
|
||||
msg = _('Invalid volume_type passed: %s') % str(new_type)
|
||||
msg = _('Invalid volume_type passed: %s') % new_type
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
@ -933,7 +933,7 @@ class API(base.Base):
|
||||
|
||||
# Error if the original and new type are the same
|
||||
if volume['volume_type_id'] == vol_type_id:
|
||||
msg = _('New volume_type same as original: %s') % str(new_type)
|
||||
msg = (_('New volume_type same as original: %s') % new_type)
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
|
@ -281,8 +281,8 @@ class CoraidAppliance(object):
|
||||
try:
|
||||
self.rpc('fetch', {}, None, allow_empty_response=True)
|
||||
except Exception as e:
|
||||
LOG.debug(_('Coraid Appliance ping failed: %s'), str(e))
|
||||
raise exception.CoraidESMNotAvailable(reason=str(e))
|
||||
LOG.debug(_('Coraid Appliance ping failed: %s'), e)
|
||||
raise exception.CoraidESMNotAvailable(reason=e)
|
||||
|
||||
def create_lun(self, repository_name, volume_name, volume_size_in_gb):
|
||||
request = {'addr': 'cms',
|
||||
|
@ -101,7 +101,7 @@ class EMCSMISCommon():
|
||||
LOG.debug(_('Create Volume: %(volume)s Pool: %(pool)s '
|
||||
'Storage System: %(storage_system)s')
|
||||
% {'volume': volumename,
|
||||
'pool': str(pool),
|
||||
'pool': pool,
|
||||
'storage_system': storage_system})
|
||||
|
||||
configservice = self._find_storage_configuration_service(
|
||||
@ -121,9 +121,9 @@ class EMCSMISCommon():
|
||||
'CreateOrModifyElementFromStoragePool ConfigServicie: '
|
||||
'%(service)s ElementName: %(name)s InPool: %(pool)s '
|
||||
'ElementType: %(provisioning)s Size: %(size)lu')
|
||||
% {'service': str(configservice),
|
||||
% {'service': configservice,
|
||||
'name': volumename,
|
||||
'pool': str(pool),
|
||||
'pool': pool,
|
||||
'provisioning': provisioning,
|
||||
'size': volumesize})
|
||||
|
||||
@ -193,7 +193,7 @@ class EMCSMISCommon():
|
||||
'%(snapshotinstance)s Storage System: %(storage_system)s.')
|
||||
% {'volumename': volumename,
|
||||
'snapshotname': snapshotname,
|
||||
'snapshotinstance': str(snapshot_instance.path),
|
||||
'snapshotinstance': snapshot_instance.path,
|
||||
'storage_system': storage_system})
|
||||
|
||||
isVMAX = storage_system.find('SYMMETRIX')
|
||||
@ -225,9 +225,9 @@ class EMCSMISCommon():
|
||||
'%(sourceelement)s')
|
||||
% {'volumename': volumename,
|
||||
'snapshotname': snapshotname,
|
||||
'service': str(repservice),
|
||||
'service': repservice,
|
||||
'elementname': volumename,
|
||||
'sourceelement': str(snapshot_instance.path)})
|
||||
'sourceelement': snapshot_instance.path})
|
||||
|
||||
# Create a Clone from snapshot
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
@ -284,8 +284,8 @@ class EMCSMISCommon():
|
||||
'Synchronization: %(sync_name)s')
|
||||
% {'volumename': volumename,
|
||||
'snapshotname': snapshotname,
|
||||
'service': str(repservice),
|
||||
'sync_name': str(sync_name)})
|
||||
'service': repservice,
|
||||
'sync_name': sync_name})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
@ -345,7 +345,7 @@ class EMCSMISCommon():
|
||||
'%(src_instance)s Storage System: %(storage_system)s.')
|
||||
% {'volumename': volumename,
|
||||
'srcname': srcname,
|
||||
'src_instance': str(src_instance.path),
|
||||
'src_instance': src_instance.path,
|
||||
'storage_system': storage_system})
|
||||
|
||||
repservice = self._find_replication_service(storage_system)
|
||||
@ -366,9 +366,9 @@ class EMCSMISCommon():
|
||||
'%(sourceelement)s')
|
||||
% {'volumename': volumename,
|
||||
'srcname': srcname,
|
||||
'service': str(repservice),
|
||||
'service': repservice,
|
||||
'elementname': volumename,
|
||||
'sourceelement': str(src_instance.path)})
|
||||
'sourceelement': src_instance.path})
|
||||
|
||||
# Create a Clone from source volume
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
@ -425,8 +425,8 @@ class EMCSMISCommon():
|
||||
'Synchronization: %(sync_name)s')
|
||||
% {'volumename': volumename,
|
||||
'srcname': srcname,
|
||||
'service': str(repservice),
|
||||
'sync_name': str(sync_name)})
|
||||
'service': repservice,
|
||||
'sync_name': sync_name})
|
||||
|
||||
rc, job = self.conn.InvokeMethod(
|
||||
'ModifyReplicaSynchronization',
|
||||
@ -499,9 +499,9 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_('Delete Volume: %(name)s Method: EMCReturnToStoragePool '
|
||||
'ConfigServic: %(service)s TheElement: %(vol_instance)s')
|
||||
% {'service': str(configservice),
|
||||
% {'service': configservice,
|
||||
'name': volumename,
|
||||
'vol_instance': str(vol_instance.path)})
|
||||
'vol_instance': vol_instance.path})
|
||||
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('EMCReturnToStoragePool',
|
||||
@ -561,9 +561,9 @@ class EMCSMISCommon():
|
||||
"Type: 7 SourceElement: %(sourceelement)s.")
|
||||
% {'snapshot': snapshotname,
|
||||
'volume': volumename,
|
||||
'service': str(repservice),
|
||||
'service': repservice,
|
||||
'elementname': snapshotname,
|
||||
'sourceelement': str(vol_instance.path)})
|
||||
'sourceelement': vol_instance.path})
|
||||
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('CreateElementReplica', repservice,
|
||||
@ -650,8 +650,8 @@ class EMCSMISCommon():
|
||||
"Synchronization: %(sync_name)s.")
|
||||
% {'snapshot': snapshotname,
|
||||
'volume': volumename,
|
||||
'service': str(repservice),
|
||||
'sync_name': str(sync_name)})
|
||||
'service': repservice,
|
||||
'sync_name': sync_name})
|
||||
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('ModifyReplicaSynchronization',
|
||||
@ -704,8 +704,8 @@ class EMCSMISCommon():
|
||||
LOG.debug(_('ExposePaths: %(vol)s ConfigServicie: %(service)s '
|
||||
'LUNames: %(lun_name)s InitiatorPortIDs: %(initiator)s '
|
||||
'DeviceAccesses: 2')
|
||||
% {'vol': str(vol_instance.path),
|
||||
'service': str(configservice),
|
||||
% {'vol': vol_instance.path,
|
||||
'service': configservice,
|
||||
'lun_name': lun_name,
|
||||
'initiator': initiators})
|
||||
|
||||
@ -719,7 +719,7 @@ class EMCSMISCommon():
|
||||
LOG.debug(_('ExposePaths parameter '
|
||||
'LunMaskingSCSIProtocolController: '
|
||||
'%(lunmasking)s')
|
||||
% {'lunmasking': str(lunmask_ctrl)})
|
||||
% {'lunmasking': lunmask_ctrl})
|
||||
rc, controller =\
|
||||
self.conn.InvokeMethod('ExposePaths',
|
||||
configservice, LUNames=[lun_name],
|
||||
@ -750,10 +750,10 @@ class EMCSMISCommon():
|
||||
LOG.debug(_('HidePaths: %(vol)s ConfigServicie: %(service)s '
|
||||
'LUNames: %(device_id)s LunMaskingSCSIProtocolController: '
|
||||
'%(lunmasking)s')
|
||||
% {'vol': str(vol_instance.path),
|
||||
'service': str(configservice),
|
||||
% {'vol': vol_instance.path,
|
||||
'service': configservice,
|
||||
'device_id': device_id,
|
||||
'lunmasking': str(lunmask_ctrl)})
|
||||
'lunmasking': lunmask_ctrl})
|
||||
|
||||
rc, controller = self.conn.InvokeMethod(
|
||||
'HidePaths', configservice,
|
||||
@ -779,9 +779,9 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_('AddMembers: ConfigServicie: %(service)s MaskingGroup: '
|
||||
'%(masking_group)s Members: %(vol)s')
|
||||
% {'service': str(configservice),
|
||||
'masking_group': str(masking_group),
|
||||
'vol': str(vol_instance.path)})
|
||||
% {'service': configservice,
|
||||
'masking_group': masking_group,
|
||||
'vol': vol_instance.path})
|
||||
|
||||
rc, job =\
|
||||
self.conn.InvokeMethod('AddMembers',
|
||||
@ -812,9 +812,9 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_('RemoveMembers: ConfigServicie: %(service)s '
|
||||
'MaskingGroup: %(masking_group)s Members: %(vol)s')
|
||||
% {'service': str(configservice),
|
||||
'masking_group': str(masking_group),
|
||||
'vol': str(vol_instance.path)})
|
||||
% {'service': configservice,
|
||||
'masking_group': masking_group,
|
||||
'vol': vol_instance.path})
|
||||
|
||||
rc, job = self.conn.InvokeMethod('RemoveMembers', configservice,
|
||||
MaskingGroup=masking_group,
|
||||
@ -948,7 +948,7 @@ class EMCSMISCommon():
|
||||
'CreateOrModifyElementFromStoragePool ConfigServicie: '
|
||||
'%(service)s ElementType: %(provisioning)s Size: %(size)lu'
|
||||
'Volume path: %(volumepath)s')
|
||||
% {'service': str(configservice),
|
||||
% {'service': configservice,
|
||||
'name': volumename,
|
||||
'provisioning': provisioning,
|
||||
'size': volumesize,
|
||||
@ -1104,7 +1104,7 @@ class EMCSMISCommon():
|
||||
if storage_system == repservice['SystemName']:
|
||||
foundRepService = repservice
|
||||
LOG.debug(_("Found Replication Service: %s")
|
||||
% (str(repservice)))
|
||||
% (repservice))
|
||||
break
|
||||
|
||||
return foundRepService
|
||||
@ -1117,7 +1117,7 @@ class EMCSMISCommon():
|
||||
if storage_system == configservice['SystemName']:
|
||||
foundConfigService = configservice
|
||||
LOG.debug(_("Found Storage Configuration Service: %s")
|
||||
% (str(configservice)))
|
||||
% (configservice))
|
||||
break
|
||||
|
||||
return foundConfigService
|
||||
@ -1130,7 +1130,7 @@ class EMCSMISCommon():
|
||||
if storage_system == configservice['SystemName']:
|
||||
foundConfigService = configservice
|
||||
LOG.debug(_("Found Controller Configuration Service: %s")
|
||||
% (str(configservice)))
|
||||
% (configservice))
|
||||
break
|
||||
|
||||
return foundConfigService
|
||||
@ -1143,7 +1143,7 @@ class EMCSMISCommon():
|
||||
if storage_system == configservice['SystemName']:
|
||||
foundConfigService = configservice
|
||||
LOG.debug(_("Found Storage Hardware ID Management Service: %s")
|
||||
% (str(configservice)))
|
||||
% (configservice))
|
||||
break
|
||||
|
||||
return foundConfigService
|
||||
@ -1199,7 +1199,8 @@ class EMCSMISCommon():
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
LOG.debug(_("Pool: %(pool)s SystemName: %(systemname)s.")
|
||||
% {'pool': str(foundPool), 'systemname': systemname})
|
||||
% {'pool': foundPool,
|
||||
'systemname': systemname})
|
||||
return foundPool, systemname
|
||||
|
||||
def _parse_pool_instance_id(self, instanceid):
|
||||
@ -1235,7 +1236,7 @@ class EMCSMISCommon():
|
||||
LOG.debug(_("Volume name: %(volumename)s Volume instance: "
|
||||
"%(vol_instance)s.")
|
||||
% {'volumename': volumename,
|
||||
'vol_instance': str(foundinstance.path)})
|
||||
'vol_instance': foundinstance.path})
|
||||
|
||||
return foundinstance
|
||||
|
||||
@ -1267,7 +1268,7 @@ class EMCSMISCommon():
|
||||
LOG.debug(_("Storage system: %(storage_system)s "
|
||||
"Storage Synchronized instance: %(sync)s.")
|
||||
% {'storage_system': storage_system,
|
||||
'sync': str(foundsyncname)})
|
||||
'sync': foundsyncname})
|
||||
# Wait for SE_StorageSynchronized_SV_SV to be fully synced
|
||||
while waitforsync and percent_synced < 100:
|
||||
time.sleep(10)
|
||||
@ -1359,7 +1360,7 @@ class EMCSMISCommon():
|
||||
"%(ctrl)s.")
|
||||
% {'storage_system': storage_system,
|
||||
'initiator': initiators,
|
||||
'ctrl': str(foundCtrl)})
|
||||
'ctrl': foundCtrl})
|
||||
return foundCtrl
|
||||
|
||||
# Find LunMaskingSCSIProtocolController for the local host and the
|
||||
@ -1397,8 +1398,9 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_("LunMaskingSCSIProtocolController for storage volume "
|
||||
"%(vol)s and initiator %(initiator)s is %(ctrl)s.")
|
||||
% {'vol': str(vol_instance.path), 'initiator': initiators,
|
||||
'ctrl': str(foundCtrl)})
|
||||
% {'vol': vol_instance.path,
|
||||
'initiator': initiators,
|
||||
'ctrl': foundCtrl})
|
||||
return foundCtrl
|
||||
|
||||
# Find out how many volumes are mapped to a host
|
||||
@ -1424,7 +1426,7 @@ class EMCSMISCommon():
|
||||
"%(storage)s and %(connector)s is %(ctrl)s.")
|
||||
% {'storage': storage_system,
|
||||
'connector': connector,
|
||||
'ctrl': str(ctrl)})
|
||||
'ctrl': ctrl})
|
||||
|
||||
associators = self.conn.Associators(
|
||||
ctrl,
|
||||
@ -1513,13 +1515,13 @@ class EMCSMISCommon():
|
||||
LOG.info(_("Device number not found for volume "
|
||||
"%(volumename)s %(vol_instance)s.") %
|
||||
{'volumename': volumename,
|
||||
'vol_instance': str(vol_instance.path)})
|
||||
'vol_instance': vol_instance.path})
|
||||
else:
|
||||
LOG.debug(_("Found device number %(device)d for volume "
|
||||
"%(volumename)s %(vol_instance)s.") %
|
||||
{'device': out_num_device_number,
|
||||
'volumename': volumename,
|
||||
'vol_instance': str(vol_instance.path)})
|
||||
'vol_instance': vol_instance.path})
|
||||
|
||||
data = {'hostlunid': out_num_device_number,
|
||||
'storagesystem': storage_system,
|
||||
@ -1549,7 +1551,7 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_("Masking view: %(view)s DeviceMaskingGroup: %(masking)s.")
|
||||
% {'view': maskingview_name,
|
||||
'masking': str(foundMaskingGroup)})
|
||||
'masking': foundMaskingGroup})
|
||||
|
||||
return foundMaskingGroup
|
||||
|
||||
@ -1570,7 +1572,7 @@ class EMCSMISCommon():
|
||||
owningsp == sp):
|
||||
foundSystem = system
|
||||
LOG.debug(_("Found Storage Processor System: %s")
|
||||
% (str(system)))
|
||||
% (system))
|
||||
break
|
||||
|
||||
return foundSystem
|
||||
@ -1602,7 +1604,7 @@ class EMCSMISCommon():
|
||||
"%(endpoint)s.")
|
||||
% {'storage_system': storage_system,
|
||||
'sp': owningsp,
|
||||
'endpoint': str(foundEndpoints)})
|
||||
'endpoint': foundEndpoints})
|
||||
return foundEndpoints
|
||||
|
||||
def _getnum(self, num, datatype):
|
||||
@ -1646,8 +1648,8 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_('EMCGetTargetEndpoints: Service: %(service)s '
|
||||
'Storage HardwareIDs: %(hardwareids)s.')
|
||||
% {'service': str(configservice),
|
||||
'hardwareids': str(hardwareids)})
|
||||
% {'service': configservice,
|
||||
'hardwareids': hardwareids})
|
||||
|
||||
for hardwareid in hardwareids:
|
||||
rc, targetendpoints = self.conn.InvokeMethod(
|
||||
@ -1686,8 +1688,8 @@ class EMCSMISCommon():
|
||||
|
||||
LOG.debug(_("Storage Hardware IDs for %(wwpns)s is "
|
||||
"%(foundInstances)s.")
|
||||
% {'wwpns': str(wwpns),
|
||||
'foundInstances': str(foundInstances)})
|
||||
% {'wwpns': wwpns,
|
||||
'foundInstances': foundInstances})
|
||||
|
||||
return foundInstances
|
||||
|
||||
|
@ -1034,7 +1034,7 @@ class GlusterfsDriver(nfs.RemoteFsDriver):
|
||||
except Exception as exc:
|
||||
LOG.warning(_('Exception during mounting %s') % (exc,))
|
||||
|
||||
LOG.debug(_('Available shares: %s') % str(self._mounted_shares))
|
||||
LOG.debug(_('Available shares: %s') % self._mounted_shares)
|
||||
|
||||
def _ensure_share_writable(self, path):
|
||||
"""Ensure that the Cinder user can write to the share.
|
||||
|
@ -85,8 +85,8 @@ class HuaweiVolumeDriver(object):
|
||||
'be set to either T, Dorado or HVS. "Protocol" should '
|
||||
'be set to either iSCSI or FC. Product: %(product)s '
|
||||
'Protocol: %(protocol)s')
|
||||
% {'product': str(product),
|
||||
'protocol': str(protocol)})
|
||||
% {'product': product,
|
||||
'protocol': protocol})
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
|
@ -473,8 +473,8 @@ class HVSCommon():
|
||||
|
||||
LOG.debug(_('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s'
|
||||
'view_id: %(view_id)s')
|
||||
% {'lun_group': str(lungroup_id),
|
||||
'view_id': str(view_id)})
|
||||
% {'lun_group': lungroup_id,
|
||||
'view_id': view_id})
|
||||
|
||||
try:
|
||||
if view_id is None:
|
||||
|
@ -317,7 +317,7 @@ class GPFSDriver(driver.VolumeDriver):
|
||||
self._cluster_id = self._get_gpfs_cluster_id()
|
||||
except Exception as setup_exception:
|
||||
msg = (_('Could not find GPFS cluster id: %s.') %
|
||||
str(setup_exception))
|
||||
setup_exception)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
try:
|
||||
@ -325,7 +325,7 @@ class GPFSDriver(driver.VolumeDriver):
|
||||
self._gpfs_device = self._get_filesystem_from_path(gpfs_base)
|
||||
except Exception as setup_exception:
|
||||
msg = (_('Could not find GPFS file system device: %s.') %
|
||||
str(setup_exception))
|
||||
setup_exception)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
|
@ -134,7 +134,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||
def _create_ibmnas_snap(self, src, dest, mount_path):
|
||||
"""Create volume clones and snapshots."""
|
||||
LOG.debug(_("Enter _create_ibmnas_snap: src %(src)s, dest %(dest)s")
|
||||
% {'src': str(src), 'dest': str(dest)})
|
||||
% {'src': src, 'dest': dest})
|
||||
if mount_path is not None:
|
||||
tmp_file_path = dest + '.snap'
|
||||
ssh_cmd = ['mkclone', '-p', dest, '-s', src, '-t', tmp_file_path]
|
||||
@ -164,8 +164,9 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||
def _create_ibmnas_copy(self, src, dest, snap):
|
||||
"""Create a cloned volume, parent & the clone both remain writable."""
|
||||
LOG.debug(_('Enter _create_ibmnas_copy: src %(src)s, dest %(dest)s, '
|
||||
'snap %(snap)s') % {'src': str(src), 'dest': str(dest),
|
||||
'snap': str(snap)})
|
||||
'snap %(snap)s') % {'src': src,
|
||||
'dest': dest,
|
||||
'snap': snap})
|
||||
ssh_cmd = ['mkclone', '-p', snap, '-s', src, '-t', dest]
|
||||
try:
|
||||
self._run_ssh(ssh_cmd)
|
||||
@ -198,7 +199,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||
def _delete_snapfiles(self, fchild, mount_point):
|
||||
LOG.debug(_('Enter _delete_snapfiles: fchild %(fchild)s, '
|
||||
'mount_point %(mount_point)s')
|
||||
% {'fchild': str(fchild), 'mount_point': str(mount_point)})
|
||||
% {'fchild': fchild,
|
||||
'mount_point': mount_point})
|
||||
ssh_cmd = ['lsclone', fchild]
|
||||
try:
|
||||
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
|
||||
|
@ -303,8 +303,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
"""
|
||||
|
||||
LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
|
||||
'connector %(conn)s') % {'vol': str(volume),
|
||||
'conn': str(connector)})
|
||||
'connector %(conn)s') % {'vol': volume, 'conn': connector})
|
||||
|
||||
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
|
||||
volume_name = volume['name']
|
||||
@ -352,7 +351,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
IO_group = volume_attributes['IO_group_id']
|
||||
except KeyError as e:
|
||||
LOG.error(_('Did not find expected column name in '
|
||||
'lsvdisk: %s') % str(e))
|
||||
'lsvdisk: %s') % e)
|
||||
msg = (_('initialize_connection: Missing volume '
|
||||
'attribute for volume %s') % volume_name)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
@ -423,14 +422,12 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
self.terminate_connection(volume, connector)
|
||||
LOG.error(_('initialize_connection: Failed to collect return '
|
||||
'properties for volume %(vol)s and connector '
|
||||
'%(conn)s.\n') % {'vol': str(volume),
|
||||
'conn': str(connector)})
|
||||
'%(conn)s.\n') % {'vol': volume,
|
||||
'conn': connector})
|
||||
|
||||
LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
|
||||
'connector %(conn)s\n properties: %(prop)s')
|
||||
% {'vol': str(volume),
|
||||
'conn': str(connector),
|
||||
'prop': str(properties)})
|
||||
% {'vol': volume, 'conn': connector, 'prop': properties})
|
||||
|
||||
return {'driver_volume_type': type_str, 'data': properties, }
|
||||
|
||||
@ -456,8 +453,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
automatically by this driver when mappings are created)
|
||||
"""
|
||||
LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
|
||||
'connector %(conn)s') % {'vol': str(volume),
|
||||
'conn': str(connector)})
|
||||
'connector %(conn)s') % {'vol': volume, 'conn': connector})
|
||||
|
||||
vol_name = volume['name']
|
||||
if 'host' in connector:
|
||||
@ -482,8 +478,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||
self._helpers.unmap_vol_from_host(vol_name, host_name)
|
||||
|
||||
LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
|
||||
'connector %(conn)s') % {'vol': str(volume),
|
||||
'conn': str(connector)})
|
||||
'connector %(conn)s') % {'vol': volume, 'conn': connector})
|
||||
|
||||
return info
|
||||
|
||||
|
@ -61,7 +61,7 @@ class StorwizeHelpers(object):
|
||||
level = resp['code_level']
|
||||
match_obj = re.search('([0-9].){3}[0-9]', level)
|
||||
if match_obj is None:
|
||||
msg = _('Failed to get code level (%s).') % str(level)
|
||||
msg = _('Failed to get code level (%s).') % level
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
code_level = match_obj.group().split('.')
|
||||
return {'code_level': tuple([int(x) for x in code_level]),
|
||||
@ -81,7 +81,7 @@ class StorwizeHelpers(object):
|
||||
if int(iogrp['node_count']) > 0:
|
||||
iogrps.append(int(iogrp['id']))
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsiogrp', str(iogrp))
|
||||
self.handle_keyerror('lsiogrp', iogrp)
|
||||
except ValueError:
|
||||
msg = (_('Expected integer for node_count, '
|
||||
'svcinfo lsiogrp returned: %(node)s') %
|
||||
@ -111,7 +111,7 @@ class StorwizeHelpers(object):
|
||||
node['enabled_protocols'] = []
|
||||
nodes[node['id']] = node
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsnode', str(node_data))
|
||||
self.handle_keyerror('lsnode', node_data)
|
||||
return nodes
|
||||
|
||||
def add_iscsi_ip_addrs(self, storage_nodes):
|
||||
@ -128,7 +128,7 @@ class StorwizeHelpers(object):
|
||||
if len(ip_data['IP_address_6']):
|
||||
node['ipv6'].append(ip_data['IP_address_6'])
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsportip', str(ip_data))
|
||||
self.handle_keyerror('lsportip', ip_data)
|
||||
|
||||
def add_fc_wwpns(self, storage_nodes):
|
||||
"""Add FC WWPNs to system node information."""
|
||||
@ -160,7 +160,7 @@ class StorwizeHelpers(object):
|
||||
if host_data['iscsi_auth_method'] == 'chap':
|
||||
return host_data['iscsi_chap_secret']
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsiscsiauth', str(host_data))
|
||||
self.handle_keyerror('lsiscsiauth', host_data)
|
||||
if not host_found:
|
||||
msg = _('Failed to find host %s') % host_name
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
@ -176,7 +176,7 @@ class StorwizeHelpers(object):
|
||||
|
||||
def get_host_from_connector(self, connector):
|
||||
"""Return the Storwize host described by the connector."""
|
||||
LOG.debug(_('enter: get_host_from_connector: %s') % str(connector))
|
||||
LOG.debug(_('enter: get_host_from_connector: %s') % connector)
|
||||
|
||||
# If we have FC information, we have a faster lookup option
|
||||
host_name = None
|
||||
@ -191,7 +191,7 @@ class StorwizeHelpers(object):
|
||||
wwpn.lower()):
|
||||
host_name = wwpn_info['name']
|
||||
except KeyError:
|
||||
self.handle_keyerror('lsfabric', str(wwpn_info))
|
||||
self.handle_keyerror('lsfabric', wwpn_info)
|
||||
|
||||
# That didn't work, so try exhaustive search
|
||||
if host_name is None:
|
||||
|
@ -378,7 +378,8 @@ class CLIResponse(object):
|
||||
if len(hds) != len(row):
|
||||
msg = (_('Unexpected CLI response: header/row mismatch. '
|
||||
'header: %(header)s, row: %(row)s')
|
||||
% {'header': str(hds), 'row': str(row)})
|
||||
% {'header': hds,
|
||||
'row': row})
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
for k, v in zip(hds, row):
|
||||
CLIResponse.append_dict(cur, k, v)
|
||||
|
@ -177,7 +177,7 @@ class RemoteFsDriver(driver.VolumeDriver):
|
||||
except Exception as exc:
|
||||
LOG.warning(_('Exception during mounting %s') % (exc,))
|
||||
|
||||
LOG.debug('Available shares %s' % str(self._mounted_shares))
|
||||
LOG.debug('Available shares %s' % self._mounted_shares)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
raise NotImplementedError()
|
||||
|
@ -585,7 +585,7 @@ class RBDDriver(driver.VolumeDriver):
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
volume_name = str(volume['name'])
|
||||
volume_name = volume['name']
|
||||
with RADOSClient(self) as client:
|
||||
try:
|
||||
rbd_image = self.rbd.Image(client.ioctx, volume_name)
|
||||
@ -659,7 +659,7 @@ class RBDDriver(driver.VolumeDriver):
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes an rbd snapshot."""
|
||||
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
|
||||
snap = str(snapshot['name'])
|
||||
snap = snapshot['name']
|
||||
if self._supports_layering():
|
||||
try:
|
||||
volume.unprotect_snap(snap)
|
||||
|
@ -196,7 +196,7 @@ class HP3PARCommon(object):
|
||||
self.config.hp3par_password)
|
||||
except hpexceptions.HTTPUnauthorized as ex:
|
||||
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
|
||||
{'url': self.config.hp3par_api_url, 'err': str(ex)})
|
||||
{'url': self.config.hp3par_api_url, 'err': ex})
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
@ -208,7 +208,7 @@ class HP3PARCommon(object):
|
||||
try:
|
||||
self.client = self._create_client()
|
||||
except hpexceptions.UnsupportedVersion as ex:
|
||||
raise exception.InvalidInput(str(ex))
|
||||
raise exception.InvalidInput(ex)
|
||||
LOG.info(_("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s")
|
||||
% {"common_ver": self.VERSION,
|
||||
"rest_ver": hp3parclient.get_version_string()})
|
||||
@ -713,24 +713,24 @@ class HP3PARCommon(object):
|
||||
except exception.InvalidInput as ex:
|
||||
# Delete the volume if unable to add it to the volume set
|
||||
self.client.deleteVolume(volume_name)
|
||||
LOG.error(str(ex))
|
||||
raise exception.CinderException(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
except hpexceptions.HTTPConflict:
|
||||
msg = _("Volume (%s) already exists on array") % volume_name
|
||||
LOG.error(msg)
|
||||
raise exception.Duplicate(msg)
|
||||
except hpexceptions.HTTPBadRequest as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.Invalid(ex.get_description())
|
||||
except exception.InvalidInput as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
except exception.CinderException as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.CinderException(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
|
||||
tpvv=True):
|
||||
@ -778,7 +778,7 @@ class HP3PARCommon(object):
|
||||
except hpexceptions.HTTPNotFound:
|
||||
raise exception.NotFound()
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
@ -799,10 +799,10 @@ class HP3PARCommon(object):
|
||||
# the volume once it stops the copy.
|
||||
self.client.stopOnlinePhysicalCopy(volume_name)
|
||||
else:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
else:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
except hpexceptions.HTTPConflict as ex:
|
||||
if ex.get_code() == 34:
|
||||
@ -823,7 +823,7 @@ class HP3PARCommon(object):
|
||||
volume_name)
|
||||
self.client.deleteVolume(volume_name)
|
||||
else:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
@ -833,13 +833,13 @@ class HP3PARCommon(object):
|
||||
"%(id)s Ex: %(msg)s") % {'id': volume['id'], 'msg': ex}
|
||||
LOG.warning(msg)
|
||||
except hpexceptions.HTTPForbidden as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotAuthorized(ex.get_description())
|
||||
except hpexceptions.HTTPConflict as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.VolumeIsBusy(ex.get_description())
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
@ -901,7 +901,7 @@ class HP3PARCommon(object):
|
||||
self.client.growVolume(volume_name, growth_size_mib)
|
||||
except Exception as ex:
|
||||
LOG.error(_("Error extending volume %(id)s. Ex: %(ex)s") %
|
||||
{'id': volume['id'], 'ex': str(ex)})
|
||||
{'id': volume['id'], 'ex': ex})
|
||||
# Delete the volume if unable to grow it
|
||||
self.client.deleteVolume(volume_name)
|
||||
raise exception.CinderException(ex)
|
||||
@ -915,16 +915,16 @@ class HP3PARCommon(object):
|
||||
except Exception as ex:
|
||||
# Delete the volume if unable to add it to the volume set
|
||||
self.client.deleteVolume(volume_name)
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
except hpexceptions.HTTPForbidden as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotFound()
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
@ -961,10 +961,10 @@ class HP3PARCommon(object):
|
||||
|
||||
self.client.createSnapshot(snap_name, vol_name, optional)
|
||||
except hpexceptions.HTTPForbidden as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotFound()
|
||||
|
||||
def update_volume_key_value_pair(self, volume, key, value):
|
||||
@ -976,15 +976,15 @@ class HP3PARCommon(object):
|
||||
(volume['display_name'],
|
||||
volume['name'],
|
||||
self._get_3par_vol_name(volume['id']),
|
||||
str(key),
|
||||
str(value)))
|
||||
key,
|
||||
value))
|
||||
try:
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
if value is None:
|
||||
value = ''
|
||||
self.client.setVolumeMetaData(volume_name, key, value)
|
||||
except Exception as ex:
|
||||
msg = _('Failure in update_volume_key_value_pair:%s') % str(ex)
|
||||
msg = _('Failure in update_volume_key_value_pair:%s') % ex
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
@ -993,12 +993,12 @@ class HP3PARCommon(object):
|
||||
|
||||
LOG.debug("VOLUME (%s : %s %s) Clearing Key : %s)" %
|
||||
(volume['display_name'], volume['name'],
|
||||
self._get_3par_vol_name(volume['id']), str(key)))
|
||||
self._get_3par_vol_name(volume['id']), key))
|
||||
try:
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
self.client.removeVolumeMetaData(volume_name, key)
|
||||
except Exception as ex:
|
||||
msg = _('Failure in clear_volume_key_value_pair:%s') % str(ex)
|
||||
msg = _('Failure in clear_volume_key_value_pair:%s') % ex
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
@ -1146,16 +1146,16 @@ class HP3PARCommon(object):
|
||||
LOG.error(msg)
|
||||
raise exception.Duplicate(msg)
|
||||
except hpexceptions.HTTPBadRequest as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.Invalid(ex.get_description())
|
||||
except exception.InvalidInput as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
except exception.CinderException as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
@ -1166,7 +1166,7 @@ class HP3PARCommon(object):
|
||||
snap_name = self._get_3par_snap_name(snapshot['id'])
|
||||
self.client.deleteVolume(snap_name)
|
||||
except hpexceptions.HTTPForbidden as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
# We'll let this act as if it worked
|
||||
@ -1175,7 +1175,7 @@ class HP3PARCommon(object):
|
||||
"%(id)s Ex: %(msg)s") % {'id': snapshot['id'], 'msg': ex}
|
||||
LOG.warning(msg)
|
||||
except hpexceptions.HTTPConflict as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
|
||||
|
||||
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
|
||||
@ -1215,11 +1215,11 @@ class HP3PARCommon(object):
|
||||
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
|
||||
# no 3par host, re-throw
|
||||
if (hostname is None):
|
||||
LOG.error(str(e))
|
||||
LOG.error(e)
|
||||
raise
|
||||
else:
|
||||
# not a 'host does not exist' HTTPNotFound exception, re-throw
|
||||
LOG.error(str(e))
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
# try again with name retrieved from 3par
|
||||
|
@ -19,6 +19,7 @@ HP LeftHand SAN ISCSI Driver.
|
||||
The driver communicates to the backend aka Cliq via SSH to perform all the
|
||||
operations on the SAN.
|
||||
"""
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from cinder import exception
|
||||
@ -333,9 +334,9 @@ class HPLeftHandCLIQProxy(SanISCSIDriver):
|
||||
except Exception as ex:
|
||||
in_use_msg = 'cannot be deleted because it is a clone point'
|
||||
if in_use_msg in ex.message:
|
||||
raise exception.SnapshotIsBusy(str(ex))
|
||||
raise exception.SnapshotIsBusy(ex)
|
||||
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def local_path(self, volume):
|
||||
msg = _("local_path not supported")
|
||||
|
@ -123,7 +123,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
raise exception.DriverNotInitialized(
|
||||
_('LeftHand cluster not found'))
|
||||
except Exception as ex:
|
||||
raise exception.DriverNotInitialized(str(ex))
|
||||
raise exception.DriverNotInitialized(ex)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
@ -160,7 +160,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
|
||||
return self._update_provider(volume_info)
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
@ -170,7 +170,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
except hpexceptions.HTTPNotFound:
|
||||
LOG.error(_("Volume did not exist. It will not be deleted"))
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend the size of an existing volume."""
|
||||
@ -181,7 +181,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
options = {'size': int(new_size) * units.GiB}
|
||||
self.client.modifyVolume(volume_info['id'], options)
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
@ -193,7 +193,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
volume_info['id'],
|
||||
option)
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
@ -205,12 +205,12 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
except hpexceptions.HTTPServerError as ex:
|
||||
in_use_msg = 'cannot be deleted because it is a clone point'
|
||||
if in_use_msg in ex.get_description():
|
||||
raise exception.SnapshotIsBusy(str(ex))
|
||||
raise exception.SnapshotIsBusy(ex)
|
||||
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def get_volume_stats(self, refresh):
|
||||
"""Gets volume stats."""
|
||||
@ -264,7 +264,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
|
||||
return {'driver_volume_type': 'iscsi', 'data': iscsi_properties}
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Unassign the volume from the host."""
|
||||
@ -275,7 +275,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
volume_info['id'],
|
||||
server_info['id'])
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
@ -286,14 +286,14 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
snap_info['id'])
|
||||
return self._update_provider(volume_info)
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
try:
|
||||
volume_info = self.client.getVolumeByName(src_vref['name'])
|
||||
self.client.cloneVolume(volume['name'], volume_info['id'])
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(str(ex))
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
|
||||
def _get_volume_extra_specs(self, volume):
|
||||
"""Get extra specs from a volume."""
|
||||
@ -423,7 +423,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
return True
|
||||
|
||||
except Exception as ex:
|
||||
LOG.warning("%s" % str(ex))
|
||||
LOG.warning("%s" % ex)
|
||||
|
||||
return False
|
||||
|
||||
@ -502,7 +502,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
|
||||
"management group.") % volume['name'])
|
||||
return false_ret
|
||||
except hpexceptions.HTTPServerError as ex:
|
||||
LOG.error(str(ex))
|
||||
LOG.error(ex)
|
||||
return false_ret
|
||||
|
||||
return (True, None)
|
||||
|
@ -183,7 +183,7 @@ class Vim(object):
|
||||
fault_list = []
|
||||
for child in detail.getChildren():
|
||||
fault_list.append(child.get('type'))
|
||||
raise error_util.VimFaultException(fault_list, str(excep))
|
||||
raise error_util.VimFaultException(fault_list, excep)
|
||||
|
||||
except AttributeError as excep:
|
||||
raise error_util.VimAttributeException(_("No such SOAP method "
|
||||
|
@ -24,7 +24,7 @@ from cinder.volume.drivers.xenapi import tools
|
||||
|
||||
class XenAPIException(Exception):
|
||||
def __init__(self, original_exception):
|
||||
super(XenAPIException, self).__init__(str(original_exception))
|
||||
super(XenAPIException, self).__init__(original_exception)
|
||||
self.original_exception = original_exception
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.openstack.common import log as logging
|
||||
@ -66,7 +67,7 @@ def error_out_volume(context, db, volume_id, reason=None):
|
||||
def _clean_reason(reason):
|
||||
if reason is None:
|
||||
return '???'
|
||||
reason = str(reason)
|
||||
reason = six.text_type(reason)
|
||||
if len(reason) <= REASON_LENGTH:
|
||||
return reason
|
||||
else:
|
||||
|
@ -757,7 +757,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
self.driver.validate_connector(connector)
|
||||
except Exception as err:
|
||||
err_msg = (_('Unable to fetch connection information from '
|
||||
'backend: %(err)s') % {'err': str(err)})
|
||||
'backend: %(err)s') % {'err': err})
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
@ -782,7 +782,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
except Exception as err:
|
||||
self.driver.remove_export(context, volume)
|
||||
err_msg = (_('Unable to fetch connection information from '
|
||||
'backend: %(err)s') % {'err': str(err)})
|
||||
'backend: %(err)s') % {'err': err})
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
@ -845,7 +845,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
self._add_or_delete_fc_connection(conn_info, 0)
|
||||
except Exception as err:
|
||||
err_msg = (_('Unable to terminate volume connection: %(err)s')
|
||||
% {'err': str(err)})
|
||||
% {'err': err})
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
@ -1282,4 +1282,4 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
self.zonemanager.delete_connection(_initiator_target_map)
|
||||
except exception.ZoneManagerException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(str(e))
|
||||
LOG.error(e)
|
||||
|
@ -237,7 +237,7 @@ def get_all_specs(context, inactive=False, search_opts={}):
|
||||
qos_specs = db.qos_specs_get_all(context, inactive)
|
||||
|
||||
if search_opts:
|
||||
LOG.debug(_("Searching by: %s") % str(search_opts))
|
||||
LOG.debug(_("Searching by: %s") % search_opts)
|
||||
|
||||
def _check_specs_match(qos_specs, searchdict):
|
||||
for k, v in searchdict.iteritems():
|
||||
|
@ -64,7 +64,7 @@ def get_all_types(context, inactive=0, search_opts={}):
|
||||
vol_types = db.volume_type_get_all(context, inactive)
|
||||
|
||||
if search_opts:
|
||||
LOG.debug(_("Searching by: %s") % str(search_opts))
|
||||
LOG.debug(_("Searching by: %s") % search_opts)
|
||||
|
||||
def _check_extra_specs_match(vol_type, searchdict):
|
||||
for k, v in searchdict.iteritems():
|
||||
|
@ -133,7 +133,7 @@ class BrcdFCSanLookupService(FCSanLookupService):
|
||||
except Exception as e:
|
||||
msg = _("SSH connection failed "
|
||||
"for %(fabric) with error: %(err)"
|
||||
) % {'fabric': fabric_ip, 'err': str(e)}
|
||||
) % {'fabric': fabric_ip, 'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.FCSanLookupServiceException(message=msg)
|
||||
finally:
|
||||
@ -213,7 +213,8 @@ class BrcdFCSanLookupService(FCSanLookupService):
|
||||
switch_data = stdout.readlines()
|
||||
except paramiko.SSHException as e:
|
||||
msg = (_("SSH Command failed with error '%(err)r' "
|
||||
"'%(command)s'") % {'err': str(e), 'command': cmd})
|
||||
"'%(command)s'") % {'err': e,
|
||||
'command': cmd})
|
||||
LOG.error(msg)
|
||||
raise exception.FCSanLookupServiceException(message=msg)
|
||||
finally:
|
||||
|
@ -178,7 +178,7 @@ class BrcdFCZoneClientCLI(object):
|
||||
self._cfg_trans_abort()
|
||||
msg = _("Creating and activating zone set failed: "
|
||||
"(Zone set=%(cfg_name)s error=%(err)s)."
|
||||
) % {'cfg_name': cfg_name, 'err': str(e)}
|
||||
) % {'cfg_name': cfg_name, 'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.BrocadeZoningCliException(reason=msg)
|
||||
|
||||
@ -229,7 +229,7 @@ class BrcdFCZoneClientCLI(object):
|
||||
self.activate_zoneset(active_zoneset_name)
|
||||
except Exception as e:
|
||||
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
|
||||
) % {'cmd': cmd, 'err': str(e)}
|
||||
) % {'cmd': cmd, 'err': e}
|
||||
LOG.error(msg)
|
||||
self._cfg_trans_abort()
|
||||
raise exception.BrocadeZoningCliException(reason=msg)
|
||||
@ -330,7 +330,7 @@ class BrcdFCZoneClientCLI(object):
|
||||
return False
|
||||
except processutils.ProcessExecutionError as e:
|
||||
msg = _("Error while getting data via ssh: (command=%(cmd)s "
|
||||
"error=%(err)s).") % {'cmd': cmd, 'err': str(e)}
|
||||
"error=%(err)s).") % {'cmd': cmd, 'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.BrocadeZoningCliException(reason=msg)
|
||||
|
||||
@ -343,7 +343,8 @@ class BrcdFCZoneClientCLI(object):
|
||||
return sw_data
|
||||
except processutils.ProcessExecutionError as e:
|
||||
msg = _("Error while getting data via ssh: (command=%(cmd)s "
|
||||
"error=%(err)s).") % {'cmd': cmd_list, 'err': str(e)}
|
||||
"error=%(err)s).") % {'cmd': cmd_list,
|
||||
'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.BrocadeZoningCliException(reason=msg)
|
||||
|
||||
@ -460,7 +461,7 @@ class BrcdFCZoneClientCLI(object):
|
||||
last_exception = e
|
||||
greenthread.sleep(random.randint(20, 500) / 100.0)
|
||||
LOG.debug(_("Handling error case after "
|
||||
"SSH:%s"), str(last_exception))
|
||||
"SSH:%s"), last_exception)
|
||||
try:
|
||||
raise processutils.ProcessExecutionError(
|
||||
exit_code=last_exception.exit_code,
|
||||
@ -475,7 +476,7 @@ class BrcdFCZoneClientCLI(object):
|
||||
cmd=command)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Error executing command via ssh: %s"), str(e))
|
||||
LOG.error(_("Error executing command via ssh: %s"), e)
|
||||
finally:
|
||||
if stdin:
|
||||
stdin.flush()
|
||||
|
@ -157,11 +157,10 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
LOG.error(msg)
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
except exception.BrocadeZoningCliException as brocade_ex:
|
||||
raise exception.FCZoneDriverException(str(brocade_ex))
|
||||
raise exception.FCZoneDriverException(brocade_ex)
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
msg = _("Failed to add zoning configuration %s"
|
||||
) % str(e)
|
||||
LOG.error(e)
|
||||
msg = _("Failed to add zoning configuration %s") % e
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
|
||||
cfgmap_from_fabric = self.get_active_zone_set(
|
||||
@ -220,11 +219,10 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
zone_map, self.configuration.zone_activate)
|
||||
cli_client.cleanup()
|
||||
except exception.BrocadeZoningCliException as brocade_ex:
|
||||
raise exception.FCZoneDriverException(str(brocade_ex))
|
||||
raise exception.FCZoneDriverException(brocade_ex)
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
msg = _("Failed to add zoning configuration %s"
|
||||
) % str(e)
|
||||
LOG.error(e)
|
||||
msg = _("Failed to add zoning configuration %s") % e
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
LOG.debug(_("Zones added successfully: %s"), zone_map)
|
||||
|
||||
@ -268,11 +266,10 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
LOG.error(msg)
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
except exception.BrocadeZoningCliException as brocade_ex:
|
||||
raise exception.FCZoneDriverException(str(brocade_ex))
|
||||
raise exception.FCZoneDriverException(brocade_ex)
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
msg = _("Failed to delete zoning configuration %s"
|
||||
) % str(e)
|
||||
LOG.error(e)
|
||||
msg = _("Failed to delete zoning configuration %s") % e
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
|
||||
cfgmap_from_fabric = self.get_active_zone_set(
|
||||
@ -363,7 +360,7 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
zone_name_string, self.configuration.zone_activate)
|
||||
conn.cleanup()
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
LOG.error(e)
|
||||
msg = _("Failed to update or delete zoning configuration")
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
|
||||
@ -408,11 +405,10 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
LOG.error(msg)
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
except exception.BrocadeZoningCliException as brocade_ex:
|
||||
raise exception.FCZoneDriverException(str(brocade_ex))
|
||||
raise exception.FCZoneDriverException(brocade_ex)
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
msg = _("Failed to get SAN context %s"
|
||||
) % str(e)
|
||||
LOG.error(e)
|
||||
msg = _("Failed to get SAN context %s") % e
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
|
||||
# Get name server data from fabric and get the targets
|
||||
@ -425,9 +421,9 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
except exception.BrocadeZoningCliException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Error getting name server "
|
||||
"info: %s"), str(ex))
|
||||
"info: %s"), ex)
|
||||
except Exception as e:
|
||||
msg = _("Failed to get name server info:%s") % str(e)
|
||||
msg = (_("Failed to get name server info:%s") % e)
|
||||
LOG.error(msg)
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
visible_targets = filter(
|
||||
@ -469,9 +465,9 @@ class BrcdFCZoneDriver(FCZoneDriver):
|
||||
cfgmap = conn.get_active_zone_set()
|
||||
conn.cleanup()
|
||||
except exception.BrocadeZoningCliException as brocade_ex:
|
||||
raise exception.FCZoneDriverException(str(brocade_ex))
|
||||
raise exception.FCZoneDriverException(brocade_ex)
|
||||
except Exception as e:
|
||||
msg = _("Failed to access active zoning configuration:%s") % str(e)
|
||||
msg = (_("Failed to access active zoning configuration:%s") % e)
|
||||
LOG.error(msg)
|
||||
raise exception.FCZoneDriverException(msg)
|
||||
LOG.debug(_("Active zone set from fabric: %s"), cfgmap)
|
||||
|
@ -23,7 +23,6 @@ defined in this class.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from cinder import exception
|
||||
from cinder.openstack.common import importutils
|
||||
from cinder.openstack.common import log as logging
|
||||
@ -89,6 +88,6 @@ class FCSanLookupService(fc_common.FCCommon):
|
||||
device_map = self.lookup_service.get_device_mapping_from_network(
|
||||
initiator_list, target_list)
|
||||
except Exception as e:
|
||||
LOG.error(str(e))
|
||||
raise exception.FCSanLookupServiceException(str(e))
|
||||
LOG.error(e)
|
||||
raise exception.FCSanLookupServiceException(e)
|
||||
return device_map
|
||||
|
@ -31,7 +31,6 @@ detach operation.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
@ -139,7 +138,7 @@ class ZoneManager(fc_common.FCCommon):
|
||||
except Exception as e:
|
||||
msg = _("Failed adding connection for fabric=%(fabric)s: "
|
||||
"Error:%(err)s") % {'fabric': connected_fabric,
|
||||
'err': str(e)}
|
||||
'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.ZoneManagerException(reason=msg)
|
||||
|
||||
@ -186,7 +185,7 @@ class ZoneManager(fc_common.FCCommon):
|
||||
except Exception as e:
|
||||
msg = _("Failed removing connection for fabric=%(fabric)s: "
|
||||
"Error:%(err)s") % {'fabric': connected_fabric,
|
||||
'err': str(e)}
|
||||
'err': e}
|
||||
LOG.error(msg)
|
||||
raise exception.ZoneManagerException(reason=msg)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user