Fix missing translations for log messages

Fixed log translations in volume.drivers and volume.manager modules
for error, info and warning messages with appropriate marker function
according to the logging standards [1].

As LOG.warn has deprecated [2] so I have changed LOG.warn to
LOG.warning.

[1] http://docs.openstack.org/developer/oslo.i18n/guidelines.html
[2] http://bugs.python.org/issue13235

Note: Any new occurrences added in master during the review can be
handled separately if they are not caught by hacking checks.

Partial-Bug: 1431256
Change-Id: I9b7b89097b296ea62b43f1d948dcf59e2e5a40c4
This commit is contained in:
ankitagrawal 2015-03-16 06:26:11 -07:00
parent 86b384ca2e
commit a09c4e1747
91 changed files with 1512 additions and 1674 deletions

View File

@ -214,9 +214,7 @@ def validate_log_translations(logical_line, filename):
# TODO(smcginnis): The following is temporary as a series
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
ignore_dirs = [
"cinder/openstack",
"cinder/volume"]
ignore_dirs = ["cinder/openstack"]
for directory in ignore_dirs:
if directory in filename:
return

View File

@ -632,13 +632,13 @@ class GlusterFsDriverTestCase(test.TestCase):
self._driver._refresh_mounts()
self.assertTrue(mock_unmount_shares.called)
self.assertTrue(mock_logger.warn.called)
self.assertTrue(mock_logger.warning.called)
self.assertTrue(mock_ensure_shares_mounted.called)
mock_unmount_shares.reset_mock()
mock_ensure_shares_mounted.reset_mock()
mock_logger.reset_mock()
mock_logger.warn.reset_mock()
mock_logger.warning.reset_mock()
mock_stderr = _("umount: <mnt_path>: some other error")
mock_unmount_shares.side_effect = \

View File

@ -1201,9 +1201,11 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_do_create_volume'):
self._driver.create_volume(FakeVolume(host, 1))
warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
'Use netapp_raid_type instead.'
utils.LOG.warning.assert_called_once_with(warn_msg)
warn_msg = ('Extra spec %(old)s is obsolete. Use %(new)s '
'instead.')
utils.LOG.warning.assert_called_once_with(
warn_msg, {'new': 'netapp_raid_type',
'old': 'netapp:raid_type'})
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume_deprecated_extra_spec(self):
@ -1220,9 +1222,11 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_do_create_volume'):
self._driver.create_volume(FakeVolume(host, 1))
warn_msg = 'Extra spec netapp_thick_provisioned is ' \
'deprecated. Use netapp_thin_provisioned instead.'
utils.LOG.warning.assert_called_once_with(warn_msg)
warn_msg = ('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.')
utils.LOG.warning.assert_called_once_with(
warn_msg, {'new': 'netapp_thin_provisioned',
'old': 'netapp_thick_provisioned'})
def test_create_volume_no_pool_specified(self):
drv = self._driver

View File

@ -119,7 +119,7 @@ class RemoteFsDriverTestCase(test.TestCase):
drv._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warn.called)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
@ -129,10 +129,10 @@ class RemoteFsDriverTestCase(test.TestCase):
drv._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warn.called)
warn_msg = "%s is being set with open permissions: ugo+rw" % \
self.TEST_FILE_NAME
LOG.warn.assert_called_once_with(warn_msg)
self.assertTrue(LOG.warning.called)
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
LOG.warning.assert_called_once_with(
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
@ -309,7 +309,7 @@ class RemoteFsDriverTestCase(test.TestCase):
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warn.called)
self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
@ -1003,7 +1003,7 @@ class NfsDriverTestCase(test.TestCase):
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
self.assertFalse(LOG.warn.called)
self.assertFalse(LOG.warning.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
@ -1027,7 +1027,7 @@ class NfsDriverTestCase(test.TestCase):
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warn.called)
self.assertTrue(LOG.warning.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""

View File

@ -195,8 +195,8 @@ class QuobyteDriverTestCase(test.TestCase):
mock_execute.assert_has_calls([mkdir_call, mount_call],
any_order=False)
mock_LOG.warn.assert_called_once_with('%s is already mounted',
self.TEST_QUOBYTE_VOLUME)
mock_LOG.warning.assert_called_once_with('%s is already mounted',
self.TEST_QUOBYTE_VOLUME)
def test_mount_quobyte_should_reraise_already_mounted_error(self):
"""Same as

View File

@ -362,9 +362,9 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'id': uuid.uuid4(),
'host': 'hostname@backend#vol1'})
warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
'Use netapp_raid_type instead.'
na_utils.LOG.warning.assert_called_once_with(warn_msg)
warn_msg = 'Extra spec %(old)s is obsolete. Use %(new)s instead.'
na_utils.LOG.warning.assert_called_once_with(
warn_msg, {'new': 'netapp_raid_type', 'old': 'netapp:raid_type'})
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun', mock.Mock())
@ -383,9 +383,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'id': uuid.uuid4(),
'host': 'hostname@backend#vol1'})
warn_msg = 'Extra spec netapp_thick_provisioned is deprecated. ' \
'Use netapp_thin_provisioned instead.'
na_utils.LOG.warning.assert_called_once_with(warn_msg)
warn_msg = "Extra spec %(old)s is deprecated. Use %(new)s instead."
na_utils.LOG.warning.assert_called_once_with(
warn_msg, {'new': 'netapp_thin_provisioned',
'old': 'netapp_thick_provisioned'})
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_configured(self, mock_check_flags):

View File

@ -351,12 +351,12 @@ class BaseVD(object):
self.terminate_connection(volume, properties, force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
% {'err': six.text_type(err)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
LOG.debug(("volume %s: removing export"), volume['id'])
LOG.debug("volume %s: removing export", volume['id'])
self.remove_export(context, volume)
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
@ -390,7 +390,7 @@ class BaseVD(object):
cgroup_name)
except processutils.ProcessExecutionError as err:
LOG.warning(_LW('Failed to activate volume copy throttling: '
'%(err)s'), {'err': six.text_type(err)})
'%(err)s'), {'err': err})
throttling.Throttle.set_default(self._throttle)
def get_version(self):
@ -482,8 +482,8 @@ class BaseVD(object):
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug(('copy_data_between_volumes %(src)s -> %(dest)s.')
% {'src': src_vol['name'], 'dest': dest_vol['name']})
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {
'src': src_vol['name'], 'dest': dest_vol['name']})
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
@ -499,8 +499,8 @@ class BaseVD(object):
remote=dest_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': dest_vol['id']})
LOG.error(_LE("Failed to attach volume %(vol)s"),
{'vol': dest_vol['id']})
self.db.volume_update(context, dest_vol['id'],
{'status': dest_orig_status})
@ -513,8 +513,8 @@ class BaseVD(object):
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': src_vol['id']})
LOG.error(_LE("Failed to attach volume %(vol)s"),
{'vol': src_vol['id']})
self.db.volume_update(context, src_vol['id'],
{'status': src_orig_status})
self._detach_volume(context, dest_attach_info, dest_vol,
@ -532,8 +532,8 @@ class BaseVD(object):
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(src)s to %(dest)s.")
LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
self._detach_volume(context, dest_attach_info, dest_vol,
properties, force=copy_error,
@ -544,7 +544,7 @@ class BaseVD(object):
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.debug(('copy_image_to_volume %s.') % volume['name'])
LOG.debug('copy_image_to_volume %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
@ -564,7 +564,7 @@ class BaseVD(object):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.debug(('copy_volume_to_image %s.') % volume['name'])
LOG.debug('copy_volume_to_image %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
@ -663,7 +663,7 @@ class BaseVD(object):
# clean this up in the future.
model_update = None
try:
LOG.debug(("Volume %s: creating export"), volume['id'])
LOG.debug("Volume %s: creating export", volume['id'])
model_update = self.create_export(context, volume)
if model_update:
volume = self.db.volume_update(context, volume['id'],
@ -672,7 +672,7 @@ class BaseVD(object):
if model_update:
LOG.exception(_LE("Failed updating model of volume "
"%(volume_id)s with driver provided "
"model %(model)s") %
"model %(model)s"),
{'volume_id': volume['id'],
'model': model_update})
raise exception.ExportFailure(reason=ex)
@ -682,13 +682,15 @@ class BaseVD(object):
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
'backend: %(err)s') %
{'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.remove_export(context, volume)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') % {'ex': ex})
'of a failed attach: %(ex)s') %
{'ex': six.text_type(ex)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -728,8 +730,7 @@ class BaseVD(object):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
LOG.debug(('Creating a new backup for volume %s.') %
volume['name'])
LOG.debug('Creating a new backup for volume %s.', volume['name'])
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
@ -755,7 +756,7 @@ class BaseVD(object):
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(('Restoring backup %(backup)s to '
'volume %(volume)s.') %
'volume %(volume)s.'),
{'backup': backup['id'],
'volume': volume['name']})
@ -1391,8 +1392,8 @@ class ISCSIDriver(VolumeDriver):
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_LW("ISCSI provider_location not "
"stored, using discovery"))
LOG.warning(_LW("ISCSI provider_location not "
"stored, using discovery"))
volume_name = volume['name']
@ -1405,9 +1406,9 @@ class ISCSIDriver(VolumeDriver):
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
LOG.error(_LE("ISCSI discovery attempt failed for:%s"),
volume['host'].split('@')[0])
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
return None
for target in out.splitlines():
@ -1468,7 +1469,7 @@ class ISCSIDriver(VolumeDriver):
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug("ISCSI Discovery: Found %s" % (location))
LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True
results = location.split(" ")
@ -1524,8 +1525,8 @@ class ISCSIDriver(VolumeDriver):
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
@ -1534,8 +1535,8 @@ class ISCSIDriver(VolumeDriver):
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
@ -1597,9 +1598,8 @@ class ISCSIDriver(VolumeDriver):
# iSCSI drivers require the initiator information
required = 'initiator'
if required not in connector:
err_msg = (_LE('The volume driver requires %(data)s '
'in the connector.'), {'data': required})
LOG.error(*err_msg)
LOG.error(_LE('The volume driver requires %(data)s '
'in the connector.'), {'data': required})
raise exception.InvalidConnectorException(missing=required)
def terminate_connection(self, volume, connector, **kwargs):
@ -1847,11 +1847,10 @@ class FibreChannelDriver(VolumeDriver):
def validate_connector_has_setting(connector, setting):
"""Test for non-empty setting in connector."""
if setting not in connector or not connector[setting]:
msg = (_LE(
LOG.error(_LE(
"FibreChannelDriver validate_connector failed. "
"No '%(setting)s'. Make sure HBA state is Online."),
{'setting': setting})
LOG.error(*msg)
raise exception.InvalidConnectorException(missing=setting)
def get_volume_stats(self, refresh=False):

View File

@ -61,7 +61,8 @@ class BlockDeviceDriver(driver.VolumeDriver):
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume['size'])
LOG.info("Create %s on %s" % (volume['name'], device))
LOG.info(_LI("Create %(volume)s on %(device)s"),
{"volume": volume['name'], "device": device})
return {
'provider_location': device,
}
@ -103,7 +104,7 @@ class BlockDeviceDriver(driver.VolumeDriver):
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,

View File

@ -20,9 +20,10 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import requests
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import versionutils
from cinder import utils
from cinder.volume.drivers.san import san
@ -160,9 +161,9 @@ class DateraDriver(san.SanISCSIDriver):
try:
self._issue_api_request('volumes', 'delete', volume['id'])
except exception.NotFound:
msg = _("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, volume['id'])
LOG.info(_LI("Tried to delete volume %s, but it was not found in "
"the Datera cluster. Continuing with delete."),
volume['id'])
def _do_export(self, context, volume):
"""Gets the associated account, retrieves CHAP info and updates."""
@ -203,18 +204,17 @@ class DateraDriver(san.SanISCSIDriver):
self._issue_api_request('volumes', 'delete', resource=volume['id'],
action='export')
except exception.NotFound:
msg = _("Tried to delete export for volume %s, but it was not "
"found in the Datera cluster. Continuing with volume "
"detach")
LOG.info(msg, volume['id'])
LOG.info(_LI("Tried to delete export for volume %s, but it was "
"not found in the Datera cluster. Continuing with "
"volume detach"), volume['id'])
def delete_snapshot(self, snapshot):
try:
self._issue_api_request('snapshots', 'delete', snapshot['id'])
except exception.NotFound:
msg = _("Tried to delete snapshot %s, but was not found in Datera "
"cluster. Continuing with delete.")
LOG.info(msg, snapshot['id'])
LOG.info(_LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete."),
snapshot['id'])
def create_snapshot(self, snapshot):
body = {
@ -244,7 +244,8 @@ class DateraDriver(san.SanISCSIDriver):
try:
self._update_cluster_stats()
except exception.DateraAPIException:
LOG.error('Failed to get updated stats from Datera cluster.')
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
pass
return self.cluster_stats
@ -360,7 +361,7 @@ class DateraDriver(san.SanISCSIDriver):
verify=False, cert=cert_data)
except requests.exceptions.RequestException as ex:
msg = _('Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % ex.message
'to the following reason: %s') % six.text_type(ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)

View File

@ -166,8 +166,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s."
% {'conn_info': conn_info})
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
@ -175,8 +175,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s."
% {'conn_info': conn_info})
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
@ -211,7 +211,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
'id':lun_id
}
"""
LOG.debug("Reference lun id %s." % existing_ref['id'])
LOG.debug("Reference lun id %s.", existing_ref['id'])
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):

View File

@ -190,7 +190,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
'id':lun_id
}
"""
LOG.debug("Reference lun id %s." % existing_ref['id'])
LOG.debug("Reference lun id %s.", existing_ref['id'])
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):

View File

@ -334,7 +334,7 @@ class EMCVMAXCommon(object):
device_number = device_info['hostlunid']
if device_number is None:
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
(volumename))
volumename)
return
vol_instance = self._find_lun(volume)
@ -444,7 +444,7 @@ class EMCVMAXCommon(object):
(self.masking
._check_if_rollback_action_for_masking_required(
self.conn, rollbackDict))
exception_message = ("Error Attaching volume %(vol)s."
exception_message = (_("Error Attaching volume %(vol)s.")
% {'vol': volumeName})
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -673,12 +673,12 @@ class EMCVMAXCommon(object):
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
LOG.warn(_LW("The VMAX plugin only supports Retype. "
"If a pool based migration is necessary "
"this will happen on a Retype "
"From the command line: "
"cinder --os-volume-api-version 2 retype "
"<volumeId> <volumeType> --migration-policy on-demand"))
LOG.warning(_LW("The VMAX plugin only supports Retype. "
"If a pool based migration is necessary "
"this will happen on a Retype "
"From the command line: "
"cinder --os-volume-api-version 2 retype <volumeId> "
"<volumeType> --migration-policy on-demand"))
return True, {}
def _migrate_volume(
@ -710,7 +710,7 @@ class EMCVMAXCommon(object):
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful.
LOG.warn(_LW(
LOG.warning(_LW(
"Failed to migrate: %(volumeName)s from "
"default source storage group "
"for FAST policy: %(sourceFastPolicyName)s. "
@ -738,7 +738,7 @@ class EMCVMAXCommon(object):
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
LOG.warn(_LW(
LOG.warning(_LW(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
{'volumeName': volumeName,
@ -770,8 +770,8 @@ class EMCVMAXCommon(object):
:param extraSpecs: extra specifications
"""
LOG.warn(_LW("_migrate_rollback on : %(volumeName)s."),
{'volumeName': volumeName})
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
{'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
conn, storageSystemName)
@ -805,8 +805,8 @@ class EMCVMAXCommon(object):
:param extraSpecs: extra specifications
"""
LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s."),
{'volumeName': volumeName})
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
{'volumeName': volumeName})
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
@ -934,11 +934,10 @@ class EMCVMAXCommon(object):
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
targetPoolInstanceName, extraSpecs)
except Exception as e:
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.error(_LE("Exception: %s"), e)
LOG.error(_LE(
LOG.exception(_LE(
"Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s."),
{'volumename': volumeName,
@ -993,8 +992,7 @@ class EMCVMAXCommon(object):
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName,
extraSpecs))
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
except Exception:
exceptionMessage = (_(
"Failed to remove: %(volumename)s. "
"from the default storage group for "
@ -1002,11 +1000,11 @@ class EMCVMAXCommon(object):
% {'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if defaultStorageGroupInstanceName is None:
LOG.warn(_LW(
LOG.warning(_LW(
"The volume: %(volumename)s "
"was not first part of the default storage "
"group for FAST policy %(fastPolicyName)s."),
@ -1098,7 +1096,7 @@ class EMCVMAXCommon(object):
self.utils.get_storage_group_from_volume(
self.conn, volumeInstanceName))
if foundStorageGroupInstanceName is None:
LOG.warn(_LW(
LOG.warning(_LW(
"Volume: %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
@ -1484,13 +1482,12 @@ class EMCVMAXCommon(object):
_rc, targetEndpoints = (
self.provision.get_target_endpoints(
self.conn, storageHardwareService, hardwareIdInstance))
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
except Exception:
errorMessage = (_(
"Unable to get target endpoints for hardwareId "
"%(hardwareIdInstance)s.")
% {'hardwareIdInstance': hardwareIdInstance})
LOG.error(errorMessage)
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
if targetEndpoints:
@ -1795,14 +1792,13 @@ class EMCVMAXCommon(object):
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception as e:
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.error(_LE("Exception: %s"), e)
errorMessage = (_(
"Rolling back %(volumeName)s by deleting it.")
% {'volumeName': volumeName})
LOG.error(errorMessage)
LOG.exception(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
@ -2126,7 +2122,7 @@ class EMCVMAXCommon(object):
self.masking.get_associated_masking_groups_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceNames:
LOG.warn(_LW(
LOG.warning(_LW(
"Pre check for deletion. "
"Volume: %(volumeName)s is part of a storage group. "
"Attempting removal from %(storageGroupInstanceNames)s."),
@ -2289,10 +2285,9 @@ class EMCVMAXCommon(object):
repservice = self.utils.find_replication_service(self.conn,
storageSystem)
if repservice is None:
exception_message = (_LE(
exception_message = _(
"Cannot find Replication Service to"
" delete snapshot %s.") %
snapshotname)
" delete snapshot %s.") % snapshotname
raise exception.VolumeBackendAPIException(
data=exception_message)
# Break the replication relationship
@ -2339,12 +2334,11 @@ class EMCVMAXCommon(object):
self.conn, storageSystem)
self.provision.create_consistency_group(
self.conn, replicationService, cgName, extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
except Exception:
exceptionMessage = (_("Failed to create consistency group:"
" %(cgName)s.")
% {'cgName': cgName})
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate
@ -2402,12 +2396,11 @@ class EMCVMAXCommon(object):
storageSystem, memberInstanceNames, storageConfigservice,
volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
except Exception:
exceptionMessage = (_(
"Failed to delete consistency group: %(cgName)s.")
% {'cgName': cgName})
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, volumes
@ -2574,15 +2567,14 @@ class EMCVMAXCommon(object):
rgSyncInstanceName,
extraSpecs)
except Exception as ex:
except Exception:
modelUpdate['status'] = 'error'
self.utils.populate_cgsnapshot_status(
context, db, cgsnapshot['id'], modelUpdate['status'])
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to create snapshot for cg:"
" %(cgName)s.")
% {'cgName': cgName})
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
snapshots = self.utils.populate_cgsnapshot_status(
@ -2623,15 +2615,14 @@ class EMCVMAXCommon(object):
modelUpdate, snapshots = self._delete_cg_and_members(
storageSystem, targetCgName, modelUpdate,
snapshots, extraSpecs)
except Exception as ex:
except Exception:
modelUpdate['status'] = 'error_deleting'
self.utils.populate_cgsnapshot_status(
context, db, cgsnapshot['id'], modelUpdate['status'])
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to delete snapshot for cg: "
"%(cgId)s.")
% {'cgId': cgsnapshot['consistencygroup_id']})
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
snapshots = self.utils.populate_cgsnapshot_status(
@ -2819,7 +2810,7 @@ class EMCVMAXCommon(object):
extraSpecs))
if not self.utils.is_in_range(
volumeSize, maximumVolumeSize, minimumVolumeSize):
LOG.warn(_LW(
LOG.warning(_LW(
"Volume: %(volume)s with size: %(volumeSize)s bits "
"is not in the Performance Capacity range: "
"%(minimumVolumeSize)s-%(maximumVolumeSize)s bits. "
@ -3012,7 +3003,7 @@ class EMCVMAXCommon(object):
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path))
if foundStorageGroupInstanceName is None:
LOG.warn(_LW(
LOG.warning(_LW(
"Volume : %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
@ -3314,7 +3305,7 @@ class EMCVMAXCommon(object):
volumeInstance.path, volumeName, fastPolicyName,
extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warn(_LW(
LOG.warning(_LW(
"The volume: %(volumename)s. was not first part of the "
"default storage group for FAST policy %(fastPolicyName)s"
"."),
@ -3343,7 +3334,7 @@ class EMCVMAXCommon(object):
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception as e:
except Exception:
# If we cannot successfully delete the volume then we want to
# return the volume to the default storage group.
if (fastPolicyName is not None and
@ -3365,10 +3356,9 @@ class EMCVMAXCommon(object):
{'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.error(errorMessage)
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
@ -3410,7 +3400,7 @@ class EMCVMAXCommon(object):
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception as e:
except Exception:
# If we cannot successfully delete the volume, then we want to
# return the volume to the default storage group,
# which should be the SG it previously belonged to.
@ -3432,10 +3422,9 @@ class EMCVMAXCommon(object):
storageGroupInstanceName, volumeInstance, volumeName,
storageGroupName, extraSpecs)
LOG.error(_LE("Exception: %s."), e)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.error(errorMessage)
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc

View File

@ -135,7 +135,7 @@ class EMCVMAXFast(object):
foundDefaultStorageGroupInstanceName = (
assocStorageGroupInstanceName)
else:
LOG.warn(_LW(
LOG.warning(_LW(
"Volume: %(volumeName)s Does not belong "
"to storage storage group %(defaultSgGroupName)s."),
{'volumeName': volumeName,
@ -406,7 +406,7 @@ class EMCVMAXFast(object):
if len(storageTierInstanceNames) == 0:
storageTierInstanceNames = None
LOG.warn(_LW(
LOG.warning(_LW(
"Unable to get storage tiers from tier policy rule."))
return storageTierInstanceNames
@ -519,9 +519,8 @@ class EMCVMAXFast(object):
conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs)
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
LOG.error(_LE(
except Exception:
LOG.exception(_LE(
"Failed to add storage group %(storageGroupInstanceName)s "
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
{'storageGroupInstanceName': storageGroupInstanceName,

View File

@ -224,8 +224,8 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver):
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
LOG.warn(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
return data
def _build_initiator_target_map(self, storage_system, volume, connector):

View File

@ -151,7 +151,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
iscsi_properties = self.smis_get_iscsi_properties(
volume, connector)
LOG.info(_LI("Leaving initialize_connection: %s"), (iscsi_properties))
LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
@ -201,7 +201,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
" for volume %(volumeName)s.")
% {'volumeName': volume['name']})
LOG.debug("ISCSI Discovery: Found %s", (location))
LOG.debug("ISCSI Discovery: Found %s", location)
properties['target_discovered'] = True
device_info = self.common.find_device_number(volume)
@ -243,7 +243,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
LOG.info(_LI("AUTH properties: %s."), (properties))
LOG.info(_LI("AUTH properties: %s."), properties)
return properties

View File

@ -103,7 +103,7 @@ class EMCVMAXMasking(object):
maskingViewDict['workload'])
if assocStorageGroupName != defaultSgGroupName:
LOG.warn(_LW(
LOG.warning(_LW(
"Volume: %(volumeName)s Does not belong "
"to storage storage group %(defaultSgGroupName)s."),
{'volumeName': volumeName,
@ -472,7 +472,7 @@ class EMCVMAXMasking(object):
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance):
LOG.warn(_LW(
LOG.warning(_LW(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s."),
{'volumeName': volumeName,
@ -1049,8 +1049,8 @@ class EMCVMAXMasking(object):
{'view': maskingViewName,
'masking': foundStorageGroupInstanceName})
else:
LOG.warn(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
return foundStorageGroupInstanceName
@ -1212,7 +1212,7 @@ class EMCVMAXMasking(object):
# Volume is not associated with any storage group so add
# it back to the default.
if len(foundStorageGroupInstanceName) == 0:
LOG.warn(_LW(
LOG.warning(_LW(
"No storage group found. "
"Performing rollback on Volume: %(volumeName)s "
"To return it to the default storage group for FAST "
@ -1257,8 +1257,7 @@ class EMCVMAXMasking(object):
rollbackDict['fastPolicyName'],
rollbackDict['volumeName'], rollbackDict['extraSpecs'],
False)
except Exception as e:
LOG.error(_LE("Exception: %s."), e)
except Exception:
errorMessage = (_(
"Rollback for Volume: %(volumeName)s has failed. "
"Please contact your system administrator to manually return "
@ -1266,7 +1265,7 @@ class EMCVMAXMasking(object):
"%(fastPolicyName)s failed.")
% {'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
LOG.error(errorMessage)
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _find_new_initiator_group(self, conn, maskingGroupDict):
@ -1307,8 +1306,8 @@ class EMCVMAXMasking(object):
{'view': maskingViewName,
'masking': foundInitiatorMaskingGroupInstanceName})
else:
LOG.warn(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
return foundInitiatorMaskingGroupInstanceName
@ -1582,7 +1581,7 @@ class EMCVMAXMasking(object):
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
LOG.warn(_LW(
LOG.warning(_LW(
"Volume %(volumeName)s was not first part of the default "
"storage group for the FAST Policy."),
{'volumeName': volumeName})
@ -1733,15 +1732,15 @@ class EMCVMAXMasking(object):
if numVolInMaskingView == 1:
# Last volume in the storage group.
LOG.warn(_LW("Only one volume remains in storage group "
"%(sgname)s. Driver will attempt cleanup."),
{'sgname': storageGroupName})
LOG.warning(_LW("Only one volume remains in storage group "
"%(sgname)s. Driver will attempt cleanup."),
{'sgname': storageGroupName})
mvInstanceName = self.get_masking_view_from_storage_group(
conn, storageGroupInstanceName)
if mvInstanceName is None:
LOG.warn(_LW("Unable to get masking view %(maskingView)s "
"from storage group."),
{'maskingView': mvInstanceName})
LOG.warning(_LW("Unable to get masking view %(maskingView)s "
"from storage group."),
{'maskingView': mvInstanceName})
else:
maskingViewInstance = conn.GetInstance(
mvInstanceName, LocalOnly=False)
@ -2053,10 +2052,10 @@ class EMCVMAXMasking(object):
ResultClass='Symm_FCSCSIProtocolEndpoint')
numberOfPorts = len(targetPortInstanceNames)
if numberOfPorts <= 0:
LOG.warn(_LW("No target ports found in "
"masking view %(maskingView)s."),
{'numPorts': len(targetPortInstanceNames),
'maskingView': mvInstanceName})
LOG.warning(_LW("No target ports found in "
"masking view %(maskingView)s."),
{'numPorts': len(targetPortInstanceNames),
'maskingView': mvInstanceName})
for targetPortInstanceName in targetPortInstanceNames:
targetWwns.append(targetPortInstanceName['Name'])
return targetWwns
@ -2107,8 +2106,8 @@ class EMCVMAXMasking(object):
'mv': maskingViewInstanceName})
return portGroupInstanceNames[0]
else:
LOG.warn(_LW("No port group found in masking view %(mv)s."),
{'mv': maskingViewInstanceName})
LOG.warning(_LW("No port group found in masking view %(mv)s."),
{'mv': maskingViewInstanceName})
def get_initiator_group_from_masking_view(
self, conn, maskingViewInstanceName):
@ -2126,8 +2125,8 @@ class EMCVMAXMasking(object):
'mv': maskingViewInstanceName})
return initiatorGroupInstanceNames[0]
else:
LOG.warn(_LW("No port group found in masking view %(mv)s."),
{'mv': maskingViewInstanceName})
LOG.warning(_LW("No port group found in masking view %(mv)s."),
{'mv': maskingViewInstanceName})
def _get_sg_or_mv_associated_with_initiator(
self, conn, controllerConfigService, volumeInstanceName,

View File

@ -18,7 +18,7 @@ from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder.i18n import _
from cinder.volume.drivers.emc import emc_vmax_utils
@ -613,11 +613,10 @@ class EMCVMAXProvision(object):
try:
rc = self._terminate_migrate_session(
conn, volumeInstanceName, extraSpecs)
except Exception as ex:
LOG.error(_LE('Exception: %s.'), ex)
except Exception:
exceptionMessage = (_(
"Failed to terminate migrate session."))
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
try:
@ -625,19 +624,17 @@ class EMCVMAXProvision(object):
conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName,
extraSpecs)
except Exception as ex:
LOG.error(_LE('Exception: %s'), ex)
except Exception:
exceptionMessage = (_(
"Failed to migrate volume for the second time."))
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.error(_LE('Exception: %s'), ex)
exceptionMessage = (_(
"Failed to migrate volume for the first time."))
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)

View File

@ -141,10 +141,11 @@ class EMCVMAXUtils(object):
for elementCompositionService in elementCompositionServices:
if storageSystemName == elementCompositionService['SystemName']:
foundElementCompositionService = elementCompositionService
LOG.debug("Found Element Composition Service:"
"%(elementCompositionService)s."
% {'elementCompositionService':
elementCompositionService})
LOG.debug(
"Found Element Composition Service: "
"%(elementCompositionService)s.", {
'elementCompositionService':
elementCompositionService})
break
if foundElementCompositionService is None:
exceptionMessage = (_("Element Composition Service not found "
@ -324,10 +325,9 @@ class EMCVMAXUtils(object):
if not wait_for_job_called:
if self._is_job_finished(conn, job):
kwargs['wait_for_job_called'] = True
except Exception as e:
LOG.error(_LE("Exception: %s.") % six.text_type(e))
except Exception:
exceptionMessage = (_("Issue encountered waiting for job."))
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
kwargs = {'retries': 0,
@ -415,11 +415,10 @@ class EMCVMAXUtils(object):
if not wait_for_sync_called:
if self._is_sync_complete(conn, syncName):
kwargs['wait_for_sync_called'] = True
except Exception as e:
LOG.error(_LE("Exception: %s") % six.text_type(e))
except Exception:
exceptionMessage = (_("Issue encountered waiting for "
"synchronization."))
LOG.error(exceptionMessage)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
kwargs = {'retries': 0,

View File

@ -286,7 +286,7 @@ class CommandLineHelper(object):
self.primary_storage_ip = self.active_storage_ip
self.secondary_storage_ip = configuration.san_secondary_ip
if self.secondary_storage_ip == self.primary_storage_ip:
LOG.warning(_LE("san_secondary_ip is configured as "
LOG.warning(_LW("san_secondary_ip is configured as "
"the same value as san_ip."))
self.secondary_storage_ip = None
if not configuration.san_ip:
@ -394,7 +394,7 @@ class CommandLineHelper(object):
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on enable compression on lun %s."),
six.text_type(ex))
ex)
# handle consistency group
try:
@ -405,7 +405,7 @@ class CommandLineHelper(object):
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on adding lun to consistency"
" group. %s"), six.text_type(ex))
" group. %s"), ex)
return data
def create_lun_by_cmd(self, cmd, name):
@ -514,7 +514,7 @@ class CommandLineHelper(object):
'_wait_for_a_condition: %(method_name)s '
'execution failed for %(exception)s',
{'method_name': testmethod.__name__,
'exception': six.text_type(ex)})
'exception': ex})
if test_value:
raise loopingcall.LoopingCallDone()
@ -2260,9 +2260,7 @@ class EMCVnxCliBase(object):
self._client.delete_consistencygroup(cg_name)
except Exception:
with excutils.save_and_reraise_exception():
msg = (_('Delete consistency group %s failed.')
% cg_name)
LOG.error(msg)
LOG.error(_LE('Delete consistency group %s failed.'), cg_name)
for volume_ref in volumes:
try:
@ -3114,8 +3112,8 @@ class MigrateLunTask(task.Task):
dest_vol_lun_id,
None)
if not migrated:
msg = (_LE("Migrate volume failed between source vol %(src)s"
" and dest vol %(dst)s."),
msg = (_("Migrate volume failed between source vol %(src)s"
" and dest vol %(dst)s.") %
{'src': new_vol_name, 'dst': dest_vol_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)

View File

@ -208,7 +208,7 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
desc = _("Error executing EQL command")
cmdout = '\n'.join(out)
LOG.error(cmdout)
LOG.error(_LE("%s"), cmdout)
raise processutils.ProcessExecutionError(
stdout=cmdout, cmd=command, description=desc)
return out
@ -412,8 +412,8 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
self._eql_execute('volume', 'select', volume['name'], 'offline')
self._eql_execute('volume', 'delete', volume['name'])
except exception.VolumeNotFound:
LOG.warn(_LW('Volume %s was not found while trying to delete it.'),
volume['name'])
LOG.warning(_LW('Volume %s was not found while trying to delete '
'it.'), volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete '
@ -527,8 +527,8 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
try:
self._check_volume(volume)
except exception.VolumeNotFound:
LOG.warn(_LW('Volume %s is not found!, it may have been deleted.'),
volume['name'])
LOG.warning(_LW('Volume %s is not found!, it may have been '
'deleted.'), volume['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume "%s".'),

View File

@ -92,12 +92,12 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warn(msg)
LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warn(msg)
LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
@ -119,7 +119,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_LE('Exception during unmounting %s') % (exc))
LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
@ -139,8 +139,8 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warn(_LW("Failed to refresh mounts, reason=%s") %
exc.stderr)
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
exc.stderr)
else:
raise
@ -168,7 +168,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s') % volume['provider_location'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
@ -182,10 +182,10 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s"
% {'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
@ -201,7 +201,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.glusterfs_qcow2_volumes:
out_format = 'qcow2'
@ -219,9 +219,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
@ -324,7 +324,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("creating new volume at %s" % volume_path)
LOG.debug("creating new volume at %s", volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
@ -353,9 +353,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s') % (exc,))
LOG.error(_LE('Exception during mounting %s'), exc)
LOG.debug('Available shares: %s' % self._mounted_shares)
LOG.debug('Available shares: %s', self._mounted_shares)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
@ -440,11 +440,10 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
info = self._qemu_img_info(active_file_path, volume['name'])
if info.backing_file is not None:
msg = _('No snapshots found in database, but '
'%(path)s has backing file '
'%(backing_file)s!') % {'path': active_file_path,
'backing_file': info.backing_file}
LOG.error(msg)
LOG.error(_LE('No snapshots found in database, but %(path)s has '
'backing file %(backing_file)s!'),
{'path': active_file_path,
'backing_file': info.backing_file})
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':

View File

@ -26,7 +26,7 @@ from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _LE, _LI
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds import hus_backend
@ -70,9 +70,8 @@ def _loc_info(loc):
def _do_lu_range_check(start, end, maxlun):
"""Validate array allocation range."""
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s"
% {'start': start,
'end': end})
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s",
{'start': start, 'end': end})
if int(start) < 0:
msg = 'start LU limit too low: ' + start
raise exception.InvalidInput(reason=msg)
@ -84,7 +83,7 @@ def _do_lu_range_check(start, end, maxlun):
raise exception.InvalidInput(reason=msg)
if int(end) > int(maxlun):
end = maxlun
LOG.debug("setting LU upper (end) limit to %s" % maxlun)
LOG.debug("setting LU upper (end) limit to %s", maxlun)
return (start, end)
@ -92,9 +91,8 @@ def _xml_read(root, element, check=None):
"""Read an xml element."""
try:
val = root.findtext(element)
LOG.info(_LI("%(element)s: %(val)s")
% {'element': element,
'val': val})
LOG.info(_LI("%(element)s: %(val)s"),
{'element': element, 'val': val})
if val:
return val.strip()
if check:
@ -103,9 +101,9 @@ def _xml_read(root, element, check=None):
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_LE("XML exception reading parameter: %s") % element)
LOG.error(_LE("XML exception reading parameter: %s"), element)
else:
LOG.info(_LI("XML exception reading parameter: %s") % element)
LOG.info(_LI("XML exception reading parameter: %s"), element)
return None
@ -178,12 +176,9 @@ class HUSDriver(driver.ISCSIDriver):
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp # HUS default: 3260
msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
LOG.debug(msg
% {'ip': ip,
'ipp': ipp,
'ctl': ctl,
'port': port})
LOG.debug('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: '
'%(port)s', {'ip': ip, 'ipp': ipp,
'ctl': ctl, 'port': port})
return conf
def _get_service(self, volume):
@ -197,7 +192,7 @@ class HUSDriver(driver.ISCSIDriver):
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
else:
LOG.error(_LE("No configuration found for service: %s") % label)
LOG.error(_LE("No configuration found for service: %s"), label)
raise exception.ParameterNotFound(param=label)
return service
@ -250,7 +245,7 @@ class HUSDriver(driver.ISCSIDriver):
lst.extend([self.config['snapshot_hdp'], ])
for hdp in lst:
if hdp not in hdpl:
LOG.error(_LE("HDP not found: %s") % hdp)
LOG.error(_LE("HDP not found: %s"), hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
@ -290,7 +285,7 @@ class HUSDriver(driver.ISCSIDriver):
iscsi_info[svc_ip]['iscsi_port'])
else: # config iscsi address not found on device!
LOG.error(_LE("iSCSI portal not found "
"for service: %s") % svc_ip)
"for service: %s"), svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
return
@ -328,9 +323,8 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (int(volume['size']) * 1024))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created."
% {'lun': lun,
'sz': sz})
LOG.debug("LUN %(lun)s of size %(sz)s MB is created.",
{'lun': lun, 'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
@ -356,9 +350,8 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
% {'lun': lun,
'size': size})
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.",
{'lun': lun, 'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
@ -373,9 +366,8 @@ class HUSDriver(driver.ISCSIDriver):
self.config['password'],
arid, lun,
'%s' % (new_size * 1024))
LOG.debug("LUN %(lun)s extended to %(size)s GB."
% {'lun': lun,
'size': new_size})
LOG.debug("LUN %(lun)s extended to %(size)s GB.",
{'lun': lun, 'size': new_size})
@utils.synchronized('hds_hus', external=True)
def delete_volume(self, volume):
@ -396,9 +388,8 @@ class HUSDriver(driver.ISCSIDriver):
arid, lun, ctl, port, iqn,
'')
name = self.hus_name
LOG.debug("delete lun %(lun)s on %(name)s"
% {'lun': lun,
'name': name})
LOG.debug("delete lun %(lun)s on %(name)s",
{'lun': lun, 'name': name})
self.bend.delete_lu(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
@ -481,9 +472,8 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
% {'lun': lun,
'sz': sz})
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.",
{'lun': lun, 'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
@ -504,9 +494,8 @@ class HUSDriver(driver.ISCSIDriver):
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot."
% {'lun': lun,
'size': size})
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot.",
{'lun': lun, 'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
@ -523,7 +512,7 @@ class HUSDriver(driver.ISCSIDriver):
self.config['username'],
self.config['password'],
arid, lun)
LOG.debug("LUN %s is deleted." % lun)
LOG.debug("LUN %s is deleted.", lun)
return
@utils.synchronized('hds_hus', external=True)

View File

@ -38,7 +38,8 @@ class HusBackend(object):
'--version', '1',
run_as_root=True,
check_exit_code=True)
LOG.debug('get_version: ' + out + ' -- ' + err)
LOG.debug('get_version: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw):
@ -50,7 +51,8 @@ class HusBackend(object):
'--password', pw,
'--iscsi', '1',
check_exit_code=True)
LOG.debug('get_iscsi_info: ' + out + ' -- ' + err)
LOG.debug('get_iscsi_info: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw):
@ -62,7 +64,8 @@ class HusBackend(object):
'--password', pw,
'--hdp', '1',
check_exit_code=True)
LOG.debug('get_hdp_info: ' + out + ' -- ' + err)
LOG.debug('get_hdp_info: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start,
@ -80,7 +83,8 @@ class HusBackend(object):
'--end', end,
'--size', size,
check_exit_code=True)
LOG.debug('create_lu: ' + out + ' -- ' + err)
LOG.debug('create_lu: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun):
@ -95,7 +99,8 @@ class HusBackend(object):
'--lun', lun,
'--force', 1,
check_exit_code=True)
LOG.debug('delete_lu: ' + out + ' -- ' + err)
LOG.debug('delete_lu: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun,
@ -114,7 +119,8 @@ class HusBackend(object):
'--end', end,
'--size', size,
check_exit_code=True)
LOG.debug('create_dup: ' + out + ' -- ' + err)
LOG.debug('create_dup: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lun, new_size):
@ -129,7 +135,8 @@ class HusBackend(object):
'--lun', lun,
'--size', new_size,
check_exit_code=True)
LOG.debug('extend_vol: ' + out + ' -- ' + err)
LOG.debug('extend_vol: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
@ -148,7 +155,8 @@ class HusBackend(object):
'--target', iqn,
'--initiator', initiator,
check_exit_code=True)
LOG.debug('add_iscsi_conn: ' + out + ' -- ' + err)
LOG.debug('add_iscsi_conn: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out
def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
@ -168,5 +176,6 @@ class HusBackend(object):
'--initiator', initiator,
'--force', 1,
check_exit_code=True)
LOG.debug('del_iscsi_conn: ' + out + ' -- ' + err)
LOG.debug('del_iscsi_conn: %(out)s -- %(err)s',
{'out': out, 'err': err})
return out

View File

@ -205,8 +205,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp
msg = "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s"
LOG.debug(msg, {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s",
{'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
return conf
@ -256,7 +256,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
# value and use a temporary dummy password.
if 'iscsi_secret' not in svc:
# Warns in the first time
LOG.info(_LE("CHAP authentication disabled"))
LOG.info(_LI("CHAP authentication disabled"))
svc['iscsi_secret'] = ""
@ -303,7 +303,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
self.config['password'],
pool['hdp'])
LOG.debug('Query for pool %s: %s', pool['pool_name'], out)
LOG.debug('Query for pool %(pool)s: %(out)s',
{'pool': pool['pool_name'], 'out': out})
(hdp, size, _ign, used) = out.split()[1:5] # in MB
pool['total_capacity_gb'] = int(size) / units.Ki
@ -566,7 +567,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
if 'tgt' in info.keys(): # spurious repeat connection
# print info.keys()
LOG.debug("initiate_conn: tgt already set %s" % info['tgt'])
LOG.debug("initiate_conn: tgt already set %s", info['tgt'])
(arid, lun) = info['id_lu']
loc = arid + '.' + lun
# sps, use target if provided
@ -612,7 +613,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
info = _loc_info(volume['provider_location'])
if 'tgt' not in info.keys(): # spurious disconnection
LOG.warn(_LW("terminate_conn: provider location empty."))
LOG.warning(_LW("terminate_conn: provider location empty."))
return
(arid, lun) = info['id_lu']
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']

View File

@ -450,10 +450,10 @@ class HDSNFSDriver(nfs.NfsDriver):
conf[key]['path'] = path
conf[key]['hdp'] = hdp
conf[key]['fslabel'] = fslabel
msg = _("nfs_info: %(key)s: %(path)s, HDP: \
%(fslabel)s FSID: %(hdp)s")
LOG.info(msg, {'key': key, 'path': path, 'fslabel': fslabel,
'hdp': hdp})
LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s "
"FSID: %(hdp)s"),
{'key': key, 'path': path,
'fslabel': fslabel, 'hdp': hdp})
return conf

View File

@ -23,7 +23,7 @@ from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LE
from cinder import utils
SMPL = 1
@ -168,7 +168,7 @@ def set_msg(msg_id, **kwargs):
def output_err(msg_id, **kwargs):
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
LOG.error("MSGID%04d-E: %s", msg_id, msg)
LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg})
return msg
@ -236,10 +236,10 @@ class HBSDBasicLib(object):
def exec_command(self, cmd, args=None, printflag=True):
if printflag:
if args:
LOG.debug('cmd: %(cmd)s, args: %(args)s' %
LOG.debug('cmd: %(cmd)s, args: %(args)s',
{'cmd': cmd, 'args': args})
else:
LOG.debug('cmd: %s' % cmd)
LOG.debug('cmd: %s', cmd)
cmd = [cmd]
@ -257,11 +257,11 @@ class HBSDBasicLib(object):
stdout = e.stdout
stderr = e.stderr
LOG.debug('cmd: %s' % six.text_type(cmd))
LOG.debug('from: %s' % six.text_type(inspect.stack()[2]))
LOG.debug('ret: %d' % ret)
LOG.debug('stdout: %s' % stdout.replace(os.linesep, ' '))
LOG.debug('stderr: %s' % stderr.replace(os.linesep, ' '))
LOG.debug('cmd: %s', cmd)
LOG.debug('from: %s', inspect.stack()[2])
LOG.debug('ret: %d', ret)
LOG.debug('stdout: %s', stdout.replace(os.linesep, ' '))
LOG.debug('stderr: %s', stderr.replace(os.linesep, ' '))
return ret, stdout, stderr

View File

@ -25,7 +25,7 @@ from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _LE, _LW
from cinder.i18n import _LE, _LI, _LW
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
@ -253,27 +253,28 @@ class HBSDCommon(object):
essential_inherited_param = ['volume_backend_name', 'volume_driver']
conf = self.configuration
msg = basic_lib.set_msg(1, config_group=conf.config_group)
LOG.info(msg)
LOG.info(basic_lib.set_msg(1, config_group=conf.config_group))
version = self.command.get_comm_version()
if conf.hitachi_unit_name:
prefix = 'HSNM2 version'
else:
prefix = 'RAID Manager version'
LOG.info('\t%-35s%s' % (prefix + ': ', six.text_type(version)))
LOG.info(_LI('\t%(prefix)-35s : %(version)s'),
{'prefix': prefix, 'version': version})
for param in essential_inherited_param:
value = conf.safe_get(param)
LOG.info('\t%-35s%s' % (param + ': ', six.text_type(value)))
LOG.info(_LI('\t%(param)-35s : %(value)s'),
{'param': param, 'value': value})
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info('\t%-35s%s' % (opt.name + ': ',
six.text_type(value)))
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
if storage_protocol == 'iSCSI':
value = getattr(conf, 'hitachi_group_request')
LOG.info('\t%-35s%s' % ('hitachi_group_request: ',
six.text_type(value)))
LOG.info(_LI('\t%(request)-35s : %(value)s'),
{'request': 'hitachi_group_request', 'value': value})
def check_param(self):
conf = self.configuration
@ -352,7 +353,7 @@ class HBSDCommon(object):
def delete_pair(self, ldev, all_split=True, is_vvol=None):
paired_info = self.command.get_paired_info(ldev)
LOG.debug('paired_info: %s' % six.text_type(paired_info))
LOG.debug('paired_info: %s', paired_info)
pvol = paired_info['pvol']
svols = paired_info['svol']
driver = self.generated_from
@ -413,15 +414,13 @@ class HBSDCommon(object):
try:
self.command.restart_pair_horcm()
except Exception as e:
LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e))
LOG.warning(_LW('Failed to restart horcm: %s'), e)
else:
if (all_split or is_vvol) and restart:
try:
self.command.restart_pair_horcm()
except Exception as e:
LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e))
LOG.warning(_LW('Failed to restart horcm: %s'), e)
def copy_async_data(self, pvol, svol, is_vvol):
path_list = []
@ -442,9 +441,8 @@ class HBSDCommon(object):
try:
driver.pair_terminate_connection(ldev)
except Exception as ex:
msg = basic_lib.set_msg(
310, ldev=ldev, reason=six.text_type(ex))
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(310, ldev=ldev,
reason=ex))
def copy_sync_data(self, src_ldev, dest_ldev, size):
src_vol = {'provider_location': six.text_type(src_ldev),
@ -488,9 +486,8 @@ class HBSDCommon(object):
try:
self.delete_ldev(svol, is_vvol)
except Exception as ex:
msg = basic_lib.set_msg(
313, ldev=svol, reason=six.text_type(ex))
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(313, ldev=svol,
reason=ex))
return six.text_type(svol), type
@ -502,22 +499,21 @@ class HBSDCommon(object):
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
LOG.debug('create start (normal)')
for i in basic_lib.DEFAULT_TRY_RANGE:
LOG.debug('Try number: %(tries)s / %(max_tries)s' %
LOG.debug('Try number: %(tries)s / %(max_tries)s',
{'tries': i + 1,
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
new_ldev = self._get_unused_volume_num(ldev_range)
try:
self._add_ldev(new_ldev, size, pool_id, is_vvol)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(312, resource='LDEV')
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(312, resource='LDEV'))
continue
else:
break
else:
msg = basic_lib.output_err(636)
raise exception.HBSDError(message=msg)
LOG.debug('create end (normal: %s)' % six.text_type(new_ldev))
LOG.debug('create end (normal: %s)', new_ldev)
self.init_volinfo(self.volume_info, new_ldev)
return new_ldev
@ -544,8 +540,8 @@ class HBSDCommon(object):
'metadata': volume_metadata}
def delete_ldev(self, ldev, is_vvol):
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)'
% {'ldev': ldev, 'vvol': is_vvol})
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)',
{'ldev': ldev, 'vvol': is_vvol})
with self.pair_flock:
self.delete_pair(ldev)
self.command.comm_delete_ldev(ldev, is_vvol)
@ -553,15 +549,14 @@ class HBSDCommon(object):
if ldev in self.volume_info:
self.volume_info.pop(ldev)
LOG.debug('delete_ldev is finished '
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)'
% {'ldev': ldev, 'vvol': is_vvol})
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)',
{'ldev': ldev, 'vvol': is_vvol})
def delete_volume(self, volume):
ldev = self.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(
304, method='delete_volume', id=volume['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(304, method='delete_volume',
id=volume['id']))
return
self.add_volinfo(ldev, volume['id'])
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
@ -576,9 +571,8 @@ class HBSDCommon(object):
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
msg = basic_lib.set_msg(
305, type='volume', id=volume['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
305, type='volume', id=volume['id']))
except exception.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
finally:
@ -621,9 +615,8 @@ class HBSDCommon(object):
def delete_snapshot(self, snapshot):
ldev = self.get_ldev(snapshot)
if ldev is None:
msg = basic_lib.set_msg(
304, method='delete_snapshot', id=snapshot['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
304, method='delete_snapshot', id=snapshot['id']))
return
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
@ -638,9 +631,8 @@ class HBSDCommon(object):
with self.volinfo_lock:
if ldev in self.volume_info:
self.volume_info.pop(ldev)
msg = basic_lib.set_msg(
305, type='snapshot', id=snapshot['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
305, type='snapshot', id=snapshot['id']))
except exception.HBSDBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
finally:
@ -722,9 +714,8 @@ class HBSDCommon(object):
def output_backend_available_once(self):
if self.output_first:
self.output_first = False
msg = basic_lib.set_msg(
3, config_group=self.configuration.config_group)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
3, config_group=self.configuration.config_group))
def update_volume_stats(self, storage_protocol):
data = {}
@ -740,8 +731,7 @@ class HBSDCommon(object):
total_gb, free_gb = self.command.comm_get_dp_pool(
self.configuration.hitachi_pool_id)
except Exception as ex:
LOG.error(_LE('Failed to update volume status: %s') %
six.text_type(ex))
LOG.error(_LE('Failed to update volume status: %s'), ex)
return None
data['total_capacity_gb'] = total_gb
@ -750,7 +740,7 @@ class HBSDCommon(object):
'reserved_percentage')
data['QoS_support'] = False
LOG.debug('Updating volume status (%s)' % data)
LOG.debug('Updating volume status (%s)', data)
return data
@ -773,8 +763,7 @@ class HBSDCommon(object):
ldev = self._string2int(existing_ref.get('ldev'))
msg = basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)
LOG.info(msg)
LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev))
return {'provider_location': ldev}
@ -833,8 +822,7 @@ class HBSDCommon(object):
except exception.HBSDBusy:
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
else:
msg = basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)
LOG.info(msg)
LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev))
finally:
if ldev in self.volume_info:
self.volume_info[ldev]['in_use'].lock.release()

View File

@ -25,7 +25,7 @@ from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _LW
from cinder.i18n import _LI, _LW
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
@ -83,8 +83,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%-35s%s' %
(opt.name + ': ', six.text_type(value)))
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
self.common.command.output_param_to_log(self.configuration)
def _add_wwn(self, hgs, port, gid, wwns):
@ -94,7 +94,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
detected = self.common.command.is_detected(port, wwn)
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
'detected': detected})
LOG.debug('Create host group for %s' % hgs)
LOG.debug('Create host group for %s', hgs)
def _add_lun(self, hostgroups, ldev):
if hostgroups is self.pair_hostgroups:
@ -107,8 +107,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
try:
self.common.command.comm_delete_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(301, ldev=ldev)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _get_hgname_gid(self, port, host_grp_name):
return self.common.command.get_hgname_gid(port, host_grp_name)
@ -127,9 +126,9 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
def _fill_group(self, hgs, port, host_grp_name, wwns):
added_hostgroup = False
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
'name: %(name)s wwns: %(wwns)s)'
% {'hgs': hgs, 'port': port,
'name': host_grp_name, 'wwns': wwns})
'name: %(name)s wwns: %(wwns)s)',
{'hgs': hgs, 'port': port,
'name': host_grp_name, 'wwns': wwns})
gid = self._get_hgname_gid(port, host_grp_name)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
@ -139,13 +138,12 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
added_hostgroup = True
except exception.HBSDNotFound:
gid = None
msg = basic_lib.set_msg(312, resource='GID')
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
else:
LOG.debug('Completed to add host target'
'(port: %(port)s gid: %(gid)d)'
% {'port': port, 'gid': gid})
'(port: %(port)s gid: %(gid)d)',
{'port': port, 'gid': gid})
break
else:
msg = basic_lib.output_err(641)
@ -180,15 +178,12 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
LOG.warning(_LW('Failed to add host group: %s') %
six.text_type(ex))
msg = basic_lib.set_msg(
308, port=port, name=host_grp_name)
LOG.warning(msg)
LOG.warning(_LW('Failed to add host group: %s'), ex)
LOG.warning(basic_lib.set_msg(
308, port=port, name=host_grp_name))
if not hgs:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup_pair(self, pair_hostgroups):
if self.configuration.hitachi_unit_name:
@ -232,7 +227,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("wwpns: %s" % properties['wwpns'])
LOG.debug("wwpns: %s", properties['wwpns'])
hostgroups = []
security_ports = self._get_hostgroup_info(
@ -254,9 +249,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
except Exception:
with excutils.save_and_reraise_exception():
msg = basic_lib.set_msg(
306, port=port, gid=gid, name=host_grp_name)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
306, port=port, gid=gid, name=host_grp_name))
def _check_volume_mapping(self, hostgroup):
port = hostgroup['port']
@ -372,8 +366,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)"
% {'group': self.configuration.config_group, 'ldev': ldev})
"(config_group: %(group)s ldev: %(ldev)d)",
{'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs is self.pair_hostgroups:
hostgroups = src_hgs
else:
@ -387,8 +381,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
try:
self._add_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(311, ldev=ldev)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(311, ldev=ldev))
for i in range(self.max_hostgroups + 1):
self.pair_hostnum += 1
pair_hostgroups = []
@ -419,18 +412,18 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
% self.common.volume_info)
LOG.debug('Initialize volume_info: %s',
self.common.volume_info)
LOG.debug('HFCDrv: properties=%s' % properties)
LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)"
% self.configuration.config_group)
LOG.debug("Call _terminate_connection(config_group: %s)",
self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
@ -440,8 +433,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(302, volume_id=volume['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'wwpns' not in connector:
@ -461,7 +453,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
LOG.debug('Terminate volume_info: %s', self.common.volume_info)
return {
'driver_volume_type': 'fibre_channel',

View File

@ -27,7 +27,7 @@ from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _LE, _LW
from cinder.i18n import _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
@ -454,7 +454,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
continue
target_wwns[port] = line[10]
LOG.debug('target wwns: %s' % target_wwns)
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
@ -584,8 +584,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
not self.comm_get_snapshot(ldev) or
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
msg = basic_lib.set_msg(310, ldev=ldev, reason=stderr)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr))
if time.time() - start >= LUN_DELETE_WAITTIME:
msg = basic_lib.output_err(
@ -790,9 +789,8 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
if is_once:
break
else:
msg = basic_lib.set_msg(
314, ldev=ldev, lun=lun, port=port, id=gid)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=lun, port=port, id=gid))
finally:
self.comm_unlock()
@ -885,8 +883,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.set_msg(315, ldev=ldev, reason=stderr)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr))
finally:
self.comm_unlock()
@ -896,9 +893,8 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
def discard_zero_page(self, ldev):
try:
self.comm_modify_ldev(ldev)
except Exception as e:
LOG.warning(_LW('Failed to discard zero page: %s') %
six.text_type(e))
except Exception as ex:
LOG.warning(_LW('Failed to discard zero page: %s'), ex)
@storage_synchronized
def comm_add_snapshot(self, pvol, svol):
@ -1396,8 +1392,7 @@ HORCM_CMD
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
LOG.warning(_LW('Failed to create pair: %s'), ex)
try:
self.comm_pairsplit(copy_group, ldev_name)
@ -1406,23 +1401,20 @@ HORCM_CMD
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
LOG.warning(_LW('Failed to create pair: %s'), ex)
if self.is_smpl(copy_group, ldev_name):
try:
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
LOG.warning(_LW('Failed to create pair: %s'), ex)
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(ex))
LOG.warning(_LW('Failed to restart horcm: %s'), ex)
else:
self.check_snap_count(pvol)
@ -1440,8 +1432,7 @@ HORCM_CMD
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
LOG.warning(_LW('Failed to create pair: %s'), ex)
def delete_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
@ -1480,8 +1471,8 @@ HORCM_CMD
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info('\t%-35s%s' % (opt.name + ': ',
six.text_type(value)))
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
def create_lock_file(self):
inst = self.conf.hitachi_horcm_numbers[0]

View File

@ -24,7 +24,7 @@ from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _LE
from cinder.i18n import _LE, _LI
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
@ -70,8 +70,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
self.configuration.append_config_values(volume_opts)
if (self.configuration.hitachi_auth_method and
self.configuration.hitachi_auth_method not in CHAP_METHOD):
msg = basic_lib.output_err(601, param='hitachi_auth_method')
raise exception.HBSDError(message=msg)
raise exception.HBSDError(
message=basic_lib.output_err(601, param='hitachi_auth_method'))
if self.configuration.hitachi_auth_method == 'None':
self.configuration.hitachi_auth_method = None
for opt in volume_opts:
@ -84,8 +84,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
except exception.HBSDError:
raise
except Exception as ex:
msg = basic_lib.output_err(601, param=six.text_type(ex))
raise exception.HBSDError(message=msg)
raise exception.HBSDError(
message=basic_lib.output_err(601, param=six.text_type(ex)))
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
@ -95,15 +95,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%-35s%s' % (opt.name + ': ',
six.text_type(value)))
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
def _delete_lun_iscsi(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(301, ldev=ldev)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _add_target(self, hostgroups, ldev):
self.common.add_lun('autargetmap', hostgroups, ldev)
@ -111,7 +110,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
def _add_initiator(self, hgs, port, gid, host_iqn):
self.common.command.comm_add_initiator(port, gid, host_iqn)
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
LOG.debug("Create iSCSI target for %s" % hgs)
LOG.debug("Create iSCSI target for %s", hgs)
def _get_unused_gid_iscsi(self, port):
group_range = self.configuration.hitachi_group_range
@ -123,16 +122,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
port, target_no, target_alias)
if ret:
msg = basic_lib.set_msg(
307, port=port, tno=target_no, alias=target_alias)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
307, port=port, tno=target_no, alias=target_alias))
def _delete_chap_user(self, port):
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
if ret:
msg = basic_lib.set_msg(
303, user=self.configuration.hitachi_auth_user)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
303, user=self.configuration.hitachi_auth_user))
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
return self.common.command.comm_get_hostgroup_info_iscsi(
@ -147,8 +144,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
hostgroup['ip_addr'] = ip_addr
hostgroup['ip_port'] = ip_port
hostgroup['target_iqn'] = target_iqn
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
% {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s",
{'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
for port in ports:
@ -156,7 +153,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
added_user = False
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
'target_iqn: %(tiqn)s target_alias: %(alias)s '
'add_iqn: %(aiqn)s)' %
'add_iqn: %(aiqn)s)',
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
'alias': target_alias, 'aiqn': add_iqn})
gid = self.common.command.get_gid_from_targetiqn(
@ -170,22 +167,20 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
port, gid, target_alias, target_iqn)
added_hostgroup = True
except exception.HBSDNotFound:
msg = basic_lib.set_msg(312, resource='GID')
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
except Exception as ex:
msg = basic_lib.set_msg(
LOG.warning(basic_lib.set_msg(
309, port=port, alias=target_alias,
reason=six.text_type(ex))
LOG.warning(msg)
reason=ex))
break
else:
LOG.debug('Completed to add target'
'(port: %(port)s gid: %(gid)d)'
% {'port': port, 'gid': gid})
'(port: %(port)s gid: %(gid)d)',
{'port': port, 'gid': gid})
break
if gid is None:
LOG.error(_LE('Failed to add target(port: %s)') % port)
LOG.error(_LE('Failed to add target(port: %s)'), port)
continue
try:
if added_hostgroup:
@ -196,9 +191,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
port, target_alias)
self._add_initiator(hgs, port, gid, add_iqn)
except Exception as ex:
msg = basic_lib.set_msg(
316, port=port, reason=six.text_type(ex))
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
316, port=port, reason=ex))
if added_hostgroup:
if added_user:
self._delete_chap_user(port)
@ -227,15 +221,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
target_alias, master_iqn)
if not hgs:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'initiator' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("initiator: %s" % properties['initiator'])
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
LOG.debug("initiator: %s", properties['initiator'])
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, properties['initiator'])
@ -325,8 +318,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)"
% {'group': self.configuration.config_group, 'ldev': ldev})
"(config_group: %(group)s ldev: %(ldev)d)",
{'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs:
hostgroups = src_hgs[:]
else:
@ -344,26 +337,26 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
raise exception.HBSDError(
message=basic_lib.output_err(619, volume_id=volume['id']))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
% self.common.volume_info)
LOG.debug('Initialize volume_info: %s',
self.common.volume_info)
LOG.debug('HFCDrv: properties=%s' % properties)
LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': protocol,
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)"
% self.configuration.config_group)
LOG.debug("Call _terminate_connection(config_group: %s)",
self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun_iscsi(hostgroups, ldev)
@ -373,20 +366,18 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(302, volume_id=volume['id'])
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'initiator' not in connector:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
hostgroups = []
self._get_hostgroup_info_iscsi(hostgroups,
connector['initiator'])
if not hostgroups:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
raise exception.HBSDError(message=basic_lib.output_err(649))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
@ -412,8 +403,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
self.do_setup_status.wait()
if volume['volume_attachment']:
desc = 'volume %s' % volume['id']
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDError(message=msg)
raise exception.HBSDError(
message=basic_lib.output_err(660, desc=desc))
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)

View File

@ -143,8 +143,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
if int(line[3]) == ldev:
hlu = int(line[2])
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
'(hlun: %(hlu)d)')
% {'ldev': ldev, 'hlu': hlu})
'(hlun: %(hlu)d)'),
{'ldev': ldev, 'hlu': hlu})
return hlu
return None
@ -295,7 +295,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
else:
target_wwns[port] = line[3]
LOG.debug('target wwns: %s' % target_wwns)
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
@ -379,7 +379,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
no_lun_cnt = 0
deleted_hostgroups = []
for hostgroup in hostgroups:
LOG.debug('comm_delete_lun: hostgroup is %s' % hostgroup)
LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
@ -423,7 +423,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
deleted_hostgroups.append({'port': port, 'gid': gid})
LOG.debug('comm_delete_lun is over (%d)' % lun)
LOG.debug('comm_delete_lun is over (%d)', lun)
def comm_delete_lun(self, hostgroups, ldev):
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
@ -554,9 +554,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
if is_once:
break
else:
msg = basic_lib.set_msg(
314, ldev=ldev, lun=hlu, port=port, id=gid)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=hlu, port=port, id=gid))
if not is_ok:
if stderr:
@ -680,8 +679,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
if added_flag:
_ret, _stdout, _stderr = self.delete_chap_user(port)
if _ret:
msg = basic_lib.set_msg(303, user=auth_username)
LOG.warning(msg)
LOG.warning(basic_lib.set_msg(
303, user=auth_username))
msg = basic_lib.output_err(
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
@ -773,8 +772,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
gid = int(shlex.split(line)[0][0:3])
hostgroups.append(
{'port': port, 'gid': gid, 'detected': True})
LOG.debug('Find port=%(port)s gid=%(gid)d'
% {'port': port, 'gid': gid})
LOG.debug('Find port=%(port)s gid=%(gid)d',
{'port': port, 'gid': gid})
if port not in security_ports:
security_ports.append(port)

View File

@ -62,15 +62,15 @@ class HuaweiVolumeDriver(object):
conf_file = self.configuration.cinder_huawei_conf_file
(product, protocol) = self._get_conf_info(conf_file)
LOG.info(_LI(
'_instantiate_driver: Loading %(protocol)s driver for '
'Huawei OceanStor %(product)s series storage arrays.')
% {'protocol': protocol,
'product': product})
LOG.info(_LI('_instantiate_driver: Loading %(protocol)s driver for '
'Huawei OceanStor %(product)s series storage arrays.'),
{'protocol': protocol,
'product': product})
# Map HVS to 18000
if product in MAPPING:
LOG.warn(_LW("Product name %s is deprecated, update your "
"configuration to the new product name."), product)
LOG.warning(_LW("Product name %s is deprecated, update your "
"configuration to the new product name."),
product)
product = MAPPING[product]
driver_module = self._product[product]

View File

@ -39,8 +39,8 @@ def parse_xml_file(filepath):
root = tree.getroot()
return root
except IOError as err:
LOG.error(_LE('parse_xml_file: %s') % err)
raise err
LOG.error(_LE('parse_xml_file: %s'), err)
raise
def get_xml_item(xml_root, item):
@ -127,7 +127,7 @@ def get_conf_host_os_type(host_ip, config):
if not host_os:
host_os = os_type['Linux'] # default os type
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.'
% {'ip': host_ip, 'os': host_os})
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.',
{'ip': host_ip, 'os': host_os})
return host_os

View File

@ -85,14 +85,13 @@ class RestCommon(object):
'res': res})
except Exception as err:
LOG.error(_LE('\nBad response from server: %s.') % err)
LOG.error(_LE('\nBad response from server: %s.'), err)
raise
try:
res_json = json.loads(res)
except Exception as err:
err_msg = (_LE('JSON transfer error: %s.') % err)
LOG.error(err_msg)
LOG.error(_LE('JSON transfer error: %s.'), err)
raise
return res_json
@ -165,10 +164,8 @@ class RestCommon(object):
volume_description = volume['name']
volume_size = self._get_volume_size(volume)
LOG.info(_LI(
'Create Volume: %(volume)s Size: %(size)s.')
% {'volume': volume_name,
'size': volume_size})
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s.'),
{'volume': volume_name, 'size': volume_size})
params = self._get_lun_conf_params()
params['pool_id'] = poolinfo['ID']
@ -234,8 +231,8 @@ class RestCommon(object):
name = self._encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.')
% {'name': name, 'lun_id': lun_id})
LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.'),
{'name': name, 'lun_id': lun_id})
if lun_id:
if self._check_lun_exist(lun_id) is True:
# Get qos_id by lun_id.
@ -278,7 +275,7 @@ class RestCommon(object):
tree = ET.parse(filename)
root = tree.getroot()
except Exception as err:
LOG.error(_LE('_read_xml: %s') % err)
LOG.error(_LE('_read_xml: %s'), err)
raise
return root
@ -341,11 +338,10 @@ class RestCommon(object):
snapshot_description = snapshot['id']
volume_name = self._encode_name(snapshot['volume_id'])
LOG.info(_LI(
'_create_snapshot:snapshot name: %(snapshot)s, '
'volume name: %(volume)s.')
% {'snapshot': snapshot_name,
'volume': volume_name})
LOG.info(_LI('_create_snapshot:snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshot_name,
'volume': volume_name})
lun_id = self._get_volume_by_name(volume_name)
if lun_id is None:
@ -404,11 +400,10 @@ class RestCommon(object):
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
LOG.info(_LI(
'stop_snapshot:snapshot name: %(snapshot)s, '
'volume name: %(volume)s.')
% {'snapshot': snapshot_name,
'volume': volume_name})
LOG.info(_LI('stop_snapshot:snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshot_name,
'volume': volume_name})
snapshot_id = snapshot.get('provider_location', None)
if snapshot_id is None:
@ -556,12 +551,12 @@ class RestCommon(object):
tgt_lun_id = lun_info['ID']
luncopy_name = self._encode_name(volume['id'])
LOG.info(_LI(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s')
% {'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
LOG.info(_LI('create_volume_from_snapshot: src_lun_id: '
'%(src_lun_id)s, tgt_lun_id: %(tgt_lun_id)s, '
'copy_name: %(copy_name)s'),
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = self._get_wait_interval(event_type)
@ -601,11 +596,10 @@ class RestCommon(object):
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.CinderException:
LOG.warning(_LW(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.')
% {'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']})
LOG.warning(_LW('Failure deleting the snapshot '
'%(snapshot_id)s of volume %(volume_id)s.'),
{'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']})
return lun_info
@ -638,11 +632,10 @@ class RestCommon(object):
host_group_name = HOSTGROUP_PREFIX + host_id
hostgroup_id = self._find_hostgroup(host_group_name)
LOG.info(_LI(
'_add_host_into_hostgroup, hostgroup name: %(name)s, '
'hostgroup id: %(id)s.')
% {'name': host_group_name,
'id': hostgroup_id})
LOG.info(_LI('_add_host_into_hostgroup, hostgroup name: %(name)s, '
'hostgroup id: %(id)s.'),
{'name': host_group_name,
'id': hostgroup_id})
if hostgroup_id is None:
hostgroup_id = self._create_hostgroup(host_group_name)
@ -663,12 +656,11 @@ class RestCommon(object):
lun_id = self._get_volume_by_name(volume_name)
view_id = self._find_mapping_view(mapping_view_name)
LOG.info(_LI(
'_mapping_hostgroup_and_lungroup, lun_group: %(lun_group)s, '
'view_id: %(view_id)s, lun_id: %(lun_id)s.')
% {'lun_group': six.text_type(lungroup_id),
'view_id': six.text_type(view_id),
'lun_id': six.text_type(lun_id)})
LOG.info(_LI('_mapping_hostgroup_and_lungroup, lun_group: '
'%(lun_group)s, view_id: %(view_id)s, lun_id: '
'%(lun_id)s.'), {'lun_group': lungroup_id,
'view_id': view_id,
'lun_id': lun_id})
try:
# Create lungroup and add LUN into to lungroup.
@ -691,10 +683,9 @@ class RestCommon(object):
except Exception:
with excutils.save_and_reraise_exception():
err_msg = (_LE(
'Error occurred when adding hostgroup and lungroup to '
'view. Remove lun from lungroup now.'))
LOG.error(err_msg)
LOG.error(_LE('Error occurred when adding hostgroup and '
'lungroup to view. Remove lun from lungroup '
'now.'))
self._remove_lun_from_lungroup(lungroup_id, lun_id)
return lun_id
@ -717,18 +708,16 @@ class RestCommon(object):
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
LOG.info(_LI(
'initiator name: %(initiator_name)s, '
'volume name: %(volume)s.')
% {'initiator_name': initiator_name,
'volume': volume_name})
LOG.info(_LI('initiator name: %(initiator_name)s, '
'volume name: %(volume)s.'),
{'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
LOG.info(_LI(
'initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s.')
% {'iscsi_iqn': iscsi_iqn,
'target_ip': target_ip})
LOG.info(_LI('initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s.'),
{'iscsi_iqn': iscsi_iqn,
'target_ip': target_ip})
# Create host_group if not exist.
host_name = connector['host']
@ -746,8 +735,8 @@ class RestCommon(object):
hostlunid = self._find_host_lun_id(hostid, lun_id)
LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s.")
% hostlunid)
LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s."),
hostlunid)
# Return iSCSI properties.
properties = {}
@ -757,8 +746,8 @@ class RestCommon(object):
properties['target_lun'] = int(hostlunid)
properties['volume_id'] = volume['id']
LOG.info(_LI("initialize_connection_iscsi success. Return data: %s.")
% properties)
LOG.info(_LI("initialize_connection_iscsi success. Return data: %s."),
properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
@ -767,11 +756,10 @@ class RestCommon(object):
host_name = connector['host']
volume_name = self._encode_name(volume['id'])
LOG.info(_LI(
'initialize_connection_fc, initiator: %(initiator_name)s,'
' volume name: %(volume)s.')
% {'initiator_name': wwns,
'volume': volume_name})
LOG.info(_LI('initialize_connection_fc, initiator: %(initiator_name)s,'
' volume name: %(volume)s.'),
{'initiator_name': wwns,
'volume': volume_name})
# Create host_group if not exist.
hostid = self._find_host(host_name)
@ -782,8 +770,8 @@ class RestCommon(object):
hostgroup_id = self._add_host_into_hostgroup(hostid)
free_wwns = self._get_connected_free_wwns()
LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s")
% free_wwns)
LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s"),
free_wwns)
for wwn in wwns:
if wwn in free_wwns:
self._add_fc_port_to_host(hostid, wwn)
@ -810,8 +798,7 @@ class RestCommon(object):
'volume_id': volume['id'],
'initiator_target_map': init_targ_map}}
LOG.info(_LI("initialize_connection_fc, return data is: %s.")
% info)
LOG.info(_LI("initialize_connection_fc, return data is: %s."), info)
return info
@ -926,8 +913,7 @@ class RestCommon(object):
host_lun_id = hostassoinfo['HostLUNID']
break
except Exception as err:
msg = (_LE("JSON transfer data error. %s") % err)
LOG.error(msg)
LOG.error(_LE("JSON transfer data error. %s"), err)
raise
return host_lun_id
@ -1155,13 +1141,11 @@ class RestCommon(object):
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
lun_id = volume.get('provider_location', None)
LOG.info(_LI(
'terminate_connection:volume name: %(volume)s, '
'initiator name: %(ini)s, '
'lun_id: %(lunid)s.')
% {'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id})
LOG.info(_LI('terminate_connection:volume name: %(volume)s, '
'initiator name: %(ini)s, lun_id: %(lunid)s.'),
{'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id})
if lun_id:
if self._check_lun_exist(lun_id) is True:
@ -1393,10 +1377,10 @@ class RestCommon(object):
ip_info = self._get_iscsi_port_info(iscsiip)
iqn_prefix = self._get_iscsi_tgt_port()
LOG.info(_LI('Request ip info is: %s.') % ip_info)
LOG.info(_LI('Request ip info is: %s.'), ip_info)
split_list = ip_info.split(".")
newstr = split_list[1] + split_list[2]
LOG.info(_LI('New str info is: %s.') % newstr)
LOG.info(_LI('New str info is: %s.'), newstr)
if ip_info:
if newstr[0] == 'A':
@ -1411,7 +1395,7 @@ class RestCommon(object):
iqn_suffix = iqn_suffix[i:]
break
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.') % iqn)
LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn)
return iqn
else:
return None
@ -1495,7 +1479,7 @@ class RestCommon(object):
try:
tree.write(filename, 'UTF-8')
except Exception as err:
LOG.warning(_LW('Unable to access config file. %s') % err)
LOG.warning(_LW('Unable to access config file. %s'), err)
return logininfo
@ -1589,12 +1573,11 @@ class RestCommon(object):
new_volume_size = int(new_size) * units.Gi / 512
volume_name = self._encode_name(volume['id'])
LOG.info(_LI(
'Extend Volume: %(volumename)s, oldsize:'
' %(oldsize)s newsize: %(newsize)s.')
% {'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size})
LOG.info(_LI('Extend Volume: %(volumename)s, oldsize: %(oldsize)s '
'newsize: %(newsize)s.'),
{'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size})
lun_id = self._get_volume_by_name(volume_name)
@ -1634,7 +1617,7 @@ class RestCommon(object):
else:
kvs = specs
LOG.info(_LI('The QoS sepcs is: %s.') % kvs)
LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
for key, value in kvs.iteritems():
if key in huawei_valid_keys:
qos[key.upper()] = value

View File

@ -406,7 +406,7 @@ class FlashSystemDriver(san.SanDriver):
out, err = self._ssh(ssh_cmd)
except processutils.ProcessExecutionError:
LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to '
'run command: %s.'), six.text_type(ssh_cmd))
'run command: %s.'), ssh_cmd)
# Does not raise exception when command encounters error.
# Only return and the upper logic decides what to do.
return None
@ -1140,9 +1140,8 @@ class FlashSystemDriver(san.SanDriver):
def validate_connector(self, connector):
"""Check connector."""
if 'FC' == self._protocol and 'wwpns' not in connector:
msg = (_LE('The connector does not contain the '
'required information: wwpns is missing'))
LOG.error(msg)
LOG.error(_LE('The connector does not contain the '
'required information: wwpns is missing'))
raise exception.InvalidConnectorException(missing='wwpns')
def create_volume(self, volume):

View File

@ -136,9 +136,8 @@ class GPFSDriver(driver.VolumeDriver):
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
exception_message = (_('GPFS is not running, state: %s.') %
gpfs_state)
raise exception.VolumeBackendAPIException(data=exception_message)
raise exception.VolumeBackendAPIException(
data=_('GPFS is not running, state: %s.') % gpfs_state)
def _get_filesystem_from_path(self, path):
"""Return filesystem for specified path."""

View File

@ -36,6 +36,7 @@ from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LI, _LW
@ -170,7 +171,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
self._run_ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('Failed in _ssh_operation while execution of ssh_cmd:'
'%(cmd)s. Error: %(error)s') % {'cmd': ssh_cmd, 'error': e})
'%(cmd)s. Error: %(error)s') %
{'cmd': ssh_cmd, 'error': six.text_type(e)})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
@ -224,7 +226,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
msg = (_("Failed to resize volume "
"%(volume_id)s, error: %(error)s") %
{'volume_id': os.path.basename(path).split('-')[1],
'error': e.stderr})
'error': six.text_type(e.stderr)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return True
@ -247,7 +249,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
try:
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
except processutils.ProcessExecutionError as e:
msg = (_("Failed in _delete_snapfiles. Error: %s") % e.stderr)
msg = (_("Failed in _delete_snapfiles. Error: %s") %
six.text_type(e.stderr))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
fparent = None
@ -285,9 +288,9 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have '
'provider_location specified, '
'skipping.'), volume['name'])
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping.'), volume['name'])
return
export_path = self._get_export_path(volume['id'])

View File

@ -44,7 +44,7 @@ from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
@ -290,8 +290,8 @@ class StorwizeSVCDriver(san.SanDriver):
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_LE('ensure_export: Volume %s not found on storage')
% volume['name'])
LOG.error(_LE('ensure_export: Volume %s not found on storage'),
volume['name'])
def create_export(self, ctxt, volume):
model_update = None
@ -309,9 +309,8 @@ class StorwizeSVCDriver(san.SanDriver):
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
msg = (_LE('The connector does not contain the required '
'information.'))
LOG.error(msg)
LOG.error(_LE('The connector does not contain the required '
'information.'))
raise exception.InvalidConnectorException(
missing='initiator or wwpns')
@ -386,10 +385,10 @@ class StorwizeSVCDriver(san.SanDriver):
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s') % e)
msg = (_('initialize_connection: Missing volume '
'attribute for volume %s') % volume_name)
raise exception.VolumeBackendAPIException(data=msg)
'lsvdisk: %s'), e)
raise exception.VolumeBackendAPIException(
data=_('initialize_connection: Missing volume attribute for '
'volume %s') % volume_name)
try:
# Get preferred node and other nodes in I/O group
@ -413,8 +412,8 @@ class StorwizeSVCDriver(san.SanDriver):
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warn(_LW('initialize_connection: Did not find a preferred '
'node for volume %s') % volume_name)
LOG.warning(_LW('initialize_connection: Did not find a '
'preferred node for volume %s'), volume_name)
properties = {}
properties['target_discovered'] = False
@ -471,7 +470,7 @@ class StorwizeSVCDriver(san.SanDriver):
LOG.warning(_LW('Unable to find a preferred node match'
' for node %(node)s in the list of '
'available WWPNs on %(host)s. '
'Using first available.') %
'Using first available.'),
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
@ -651,7 +650,7 @@ class StorwizeSVCDriver(san.SanDriver):
return replica_status
def extend_volume(self, volume, new_size):
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
LOG.debug('enter: extend_volume: volume %s', volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
@ -662,7 +661,7 @@ class StorwizeSVCDriver(san.SanDriver):
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
LOG.debug('leave: extend_volume: volume %s', volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
@ -703,37 +702,34 @@ class StorwizeSVCDriver(san.SanDriver):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.'), volume['id'])
return
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
'specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
'the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
'have any registered vdisk copy operations.'),
volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
'have the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
'not have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
if len(curr_ops_list):
@ -775,7 +771,7 @@ class StorwizeSVCDriver(san.SanDriver):
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warn(_LW('Volume %s does not exist.'), vol_id)
LOG.warning(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
@ -787,12 +783,11 @@ class StorwizeSVCDriver(san.SanDriver):
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
'have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.')
% {'vol': volume['id'], 'orig': copy_op[0],
LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
'not have the specified vdisk copy '
'operation: orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
LOG.info(msg)
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
@ -813,7 +808,7 @@ class StorwizeSVCDriver(san.SanDriver):
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
@ -831,7 +826,7 @@ class StorwizeSVCDriver(san.SanDriver):
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
return (True, None)
@ -854,10 +849,10 @@ class StorwizeSVCDriver(san.SanDriver):
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
@ -944,10 +939,10 @@ class StorwizeSVCDriver(san.SanDriver):
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def manage_existing(self, volume, ref):

View File

@ -156,8 +156,8 @@ class StorwizeHelpers(object):
if 'active' == s:
wwpns.add(i)
node['WWPN'] = list(wwpns)
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
% {'node': node['id'], 'wwpn': node['WWPN']})
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s'),
{'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
@ -192,7 +192,7 @@ class StorwizeHelpers(object):
def get_host_from_connector(self, connector):
"""Return the Storwize host described by the connector."""
LOG.debug('enter: get_host_from_connector: %s' % connector)
LOG.debug('enter: get_host_from_connector: %s', connector)
# If we have FC information, we have a faster lookup option
host_name = None
@ -210,7 +210,7 @@ class StorwizeHelpers(object):
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
LOG.debug('leave: get_host_from_connector: host %s', host_name)
return host_name
# That didn't work, so try exhaustive search
@ -234,7 +234,7 @@ class StorwizeHelpers(object):
if found:
break
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
LOG.debug('leave: get_host_from_connector: host %s', host_name)
return host_name
def create_host(self, connector):
@ -245,7 +245,7 @@ class StorwizeHelpers(object):
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug('enter: create_host: host %s' % connector['host'])
LOG.debug('enter: create_host: host %s', connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
@ -292,7 +292,7 @@ class StorwizeHelpers(object):
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug('leave: create_host: host %(host)s - %(host_name)s' %
LOG.debug('leave: create_host: host %(host)s - %(host_name)s',
{'host': connector['host'], 'host_name': host_name})
return host_name
@ -303,8 +303,8 @@ class StorwizeHelpers(object):
"""Create a mapping between a volume to a host."""
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
'host %(host_name)s',
{'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
mapped = False
@ -329,7 +329,7 @@ class StorwizeHelpers(object):
multihostmap)
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s' %
'%(volume_name)s, host %(host_name)s',
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
@ -339,21 +339,21 @@ class StorwizeHelpers(object):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
'host %(host_name)s',
{'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.') %
'%(vol_name)s to any host found.'),
{'vol_name': volume_name})
return
if host_name is None:
if len(resp) > 1:
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
'specified.') % {'vol_name': volume_name})
'specified.'), {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
@ -364,7 +364,7 @@ class StorwizeHelpers(object):
found = True
if not found:
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.') %
'%(vol_name)s to host %(host)s found.'),
{'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists
@ -376,8 +376,8 @@ class StorwizeHelpers(object):
self.delete_host(host_name)
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
'host %(host_name)s',
{'volume_name': volume_name, 'host_name': host_name})
@staticmethod
def build_default_opts(config):
@ -627,10 +627,10 @@ class StorwizeHelpers(object):
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug('enter: create_vdisk: vdisk %s ' % name)
LOG.debug('enter: create_vdisk: vdisk %s ', name)
params = self._get_vdisk_create_params(opts)
self.ssh.mkvdisk(name, size, units, pool, opts, params)
LOG.debug('leave: _create_vdisk: volume %s ' % name)
LOG.debug('leave: _create_vdisk: volume %s ', name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
@ -877,7 +877,7 @@ class StorwizeHelpers(object):
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s', name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
@ -936,26 +936,26 @@ class StorwizeHelpers(object):
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s'
% name)
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s',
name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
LOG.debug('enter: delete_vdisk: vdisk %s', vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info(_LI('Tried to delete non-existent vdisk %s.') % vdisk)
LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
LOG.debug('leave: delete_vdisk: vdisk %s', vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s',
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
@ -978,7 +978,7 @@ class StorwizeHelpers(object):
self.delete_vdisk(tgt, True)
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s' %
'vdisk %(src)s',
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
@ -1080,7 +1080,7 @@ class StorwizeHelpers(object):
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0' %
'%(code_level)s, below the required 6.4.0.0',
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))

View File

@ -17,7 +17,7 @@
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LI
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@ -89,8 +89,8 @@ class StorwizeSVCReplicationStretchedCluster(StorwizeSVCReplication):
self.driver._helpers.rm_vdisk_copy(volume['name'],
secondary['copy_id'])
else:
LOG.info(('Could not find replica to delete of'
' volume %(vol)s.') % {'vol': vdisk})
LOG.info(_LI('Could not find replica to delete of'
' volume %(vol)s.'), {'vol': vdisk})
def test_replica(self, tgt_volume, src_volume):
vdisk = src_volume['name']

View File

@ -167,10 +167,10 @@ class StorwizeSSH(object):
if not multihostmap:
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.'))
msg = 'CMMVC6071E The VDisk-to-host mapping '\
'was not created because the VDisk is '\
'already mapped to a host.\n"'
raise exception.VolumeDriverException(message=msg)
raise exception.VolumeDriverException(
message=_('CMMVC6071E The VDisk-to-host mapping was not '
'created because the VDisk is already mapped '
'to a host.\n"'))
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
return self.run_ssh_check_created(ssh_cmd)

View File

@ -24,6 +24,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
@ -132,15 +133,15 @@ class LVMVolumeDriver(driver.VolumeDriver):
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_LE('Volume device file path %s does not exist.')
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_LE("Size for volume: %s not found, "
"cannot secure delete.") % volume['id'])
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
@ -170,7 +171,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(("Updating volume stats"))
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
@ -290,7 +291,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% exc.stderr)
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
@ -336,8 +337,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unabled to delete due to existing snapshot '
'for volume: %s') % volume['name'])
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
@ -355,7 +356,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations") % snapshot['name'])
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
@ -393,7 +394,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
@ -541,9 +542,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
try:
(vg for vg in vg_list if vg['name'] == dest_vg).next()
except StopIteration:
message = (_LE("Destination Volume Group %s does not exist") %
dest_vg)
LOG.error(message)
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
@ -574,7 +574,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s."),
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)

View File

@ -73,8 +73,8 @@ class NetAppDriver(driver.ProxyVD):
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()
LOG.info(_LI('OpenStack OS Version Info: %(info)s') % {
'info': app_version})
LOG.info(_LI('OpenStack OS Version Info: %(info)s'),
{'info': app_version})
kwargs['app_version'] = app_version
return NetAppDriver.create_driver(config.netapp_storage_family,
@ -91,7 +91,7 @@ class NetAppDriver(driver.ProxyVD):
fmt = {'storage_family': storage_family,
'storage_protocol': storage_protocol}
LOG.info(_LI('Requested unified config: %(storage_family)s and '
'%(storage_protocol)s.') % fmt)
'%(storage_protocol)s.'), fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None:
@ -109,5 +109,5 @@ class NetAppDriver(driver.ProxyVD):
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol '
'%(storage_protocol)s loaded.') % fmt)
'%(storage_protocol)s loaded.'), fmt)
return driver

View File

@ -24,7 +24,6 @@ Volume driver library for NetApp 7-mode block storage systems.
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LW
@ -125,7 +124,7 @@ class NetAppBlockStorage7modeLibrary(block_base.
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
LOG.warning(_LW('Could not determine root volume name '
'on %s.') % self._get_owner())
'on %s.'), self._get_owner())
return None
def _get_owner(self):
@ -314,7 +313,7 @@ class NetAppBlockStorage7modeLibrary(block_base.
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
LOG.warning(_LW("Error refreshing volume info. Message: %s"),
six.text_type(e))
e)
finally:
na_utils.set_safe_attr(self, 'vol_refresh_running', False)

View File

@ -56,9 +56,8 @@ class NetAppLun(object):
if prop in self.metadata:
return self.metadata[prop]
name = self.name
msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
msg_fmt = {'prop': prop, 'name': name}
LOG.debug(msg % msg_fmt)
LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s",
{'prop': prop, 'name': name})
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
@ -144,7 +143,7 @@ class NetAppBlockStorageLibrary(object):
def create_volume(self, volume):
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
LOG.debug('create_volume on %s' % volume['host'])
LOG.debug('create_volume on %s', volume['host'])
# get Data ONTAP volume name as pool name
ontap_volume_name = volume_utils.extract_host(volume['host'],
@ -174,7 +173,7 @@ class NetAppBlockStorageLibrary(object):
self._create_lun(ontap_volume_name, lun_name, size,
metadata, qos_policy_group)
LOG.debug('Created LUN with name %s' % lun_name)
LOG.debug('Created LUN with name %s', lun_name)
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
metadata['Volume'] = ontap_volume_name
@ -188,9 +187,8 @@ class NetAppBlockStorageLibrary(object):
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
msg_fmt = {'name': name}
LOG.warning(msg % msg_fmt)
LOG.warning(_LW("No entry in LUN table for volume/snapshot"
" %(name)s."), {'name': name})
return
self.zapi_client.destroy_lun(metadata['Path'])
self.lun_table.pop(name)
@ -229,7 +227,7 @@ class NetAppBlockStorageLibrary(object):
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
@ -381,8 +379,7 @@ class NetAppBlockStorageLibrary(object):
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
LOG.error(_LE("Error getting LUN attribute. Exception: %s"),
e.__str__())
LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
return None
def _create_lun_meta(self, lun):
@ -518,7 +515,7 @@ class NetAppBlockStorageLibrary(object):
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
LOG.error(_LE("Exception details: %s") % (e.__str__()))
LOG.error(_LE("Exception details: %s"), e)
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
@ -633,19 +630,17 @@ class NetAppBlockStorageLibrary(object):
name = volume['name']
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
msg = "Mapped LUN %(name)s to the initiator %(initiator_name)s"
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
target_list = self.zapi_client.get_iscsi_target_details()
if not target_list:
msg = _('Failed to get LUN target list for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target list for the LUN %s') % name)
msg = ("Successfully fetched target list for LUN %(name)s and "
"initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
LOG.debug("Successfully fetched target list for LUN %(name)s and "
"initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
preferred_target = self._get_preferred_target_from_list(
target_list)
@ -690,9 +685,9 @@ class NetAppBlockStorageLibrary(object):
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, [initiator_name])
msg = _("Unmapped LUN %(name)s from the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
LOG.debug("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
@ -744,21 +739,20 @@ class NetAppBlockStorageLibrary(object):
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
msg = _("Mapped LUN %(name)s to the initiator(s) %(initiators)s")
msg_fmt = {'name': volume_name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
target_wwpns, initiator_target_map, num_paths = \
self._build_initiator_target_map(connector)
if target_wwpns:
msg = _("Successfully fetched target details for LUN %(name)s "
"and initiator(s) %(initiators)s")
msg_fmt = {'name': volume_name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
LOG.debug("Successfully fetched target details for LUN %(name)s "
"and initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
else:
msg = _('Failed to get LUN target details for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % volume_name)
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target details for '
'the LUN %s') % volume_name)
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
@ -790,9 +784,8 @@ class NetAppBlockStorageLibrary(object):
self._unmap_lun(path, initiators)
msg = _("Unmapped LUN %(name)s from the initiator %(initiators)s")
msg_fmt = {'name': name, 'initiators': initiators}
LOG.debug(msg % msg_fmt)
LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s",
{'name': name, 'initiators': initiators})
info = {'driver_volume_type': 'fibre_channel',
'data': {}}

View File

@ -111,7 +111,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.
volume = metadata['Volume']
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
src_block=0, dest_block=0, block_count=0)
LOG.debug("Cloned LUN with new name %s" % new_name)
LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
% (volume, new_name))

View File

@ -62,7 +62,7 @@ class NaServer(object):
self._password = password
self._refresh_conn = True
LOG.debug('Using NetApp controller: %s' % self._host)
LOG.debug('Using NetApp controller: %s', self._host)
def get_transport_type(self):
"""Get the transport type protocol."""

View File

@ -127,7 +127,7 @@ class Client(client_base.Client):
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning(_LW("Error finding LUNs for volume %s."
" Verify volume exists.") % vol)
" Verify volume exists."), vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
@ -262,10 +262,10 @@ class Client(client_base.Client):
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed" % fmt)
" and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed" % fmt)
" and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
@ -312,9 +312,8 @@ class Client(client_base.Client):
% (export_path))
def clone_file(self, src_path, dest_path):
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
LOG.debug("""Cloning with src %(src_path)s, dest %(dest_path)s"""
% msg_fmt)
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
@ -392,8 +391,8 @@ class Client(client_base.Client):
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': bytes})
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):

View File

@ -83,12 +83,11 @@ class Client(object):
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
msg = _LE("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s")
msg_args = {'lun_name': lun_name,
'volume_name': volume_name,
'ex': six.text_type(ex)}
LOG.error(msg % msg_args)
LOG.error(_LE("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s"),
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
@ -99,7 +98,7 @@ class Client(object):
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s" % seg[-1])
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
@ -114,9 +113,8 @@ class Client(object):
except netapp_api.NaApiError as e:
code = e.code
message = e.message
msg = _LW('Error mapping LUN. Code :%(code)s, Message:%(message)s')
msg_fmt = {'code': code, 'message': message}
LOG.warning(msg % msg_fmt)
LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s'), {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
@ -127,11 +125,10 @@ class Client(object):
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
msg = _LW("Error unmapping LUN. Code :%(code)s,"
" Message:%(message)s")
msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
LOG.warning(msg % msg_fmt)
LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s"), {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
@ -186,8 +183,8 @@ class Client(object):
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s")
% {'path': path, 'msg': e.message})
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"),
{'path': path, 'msg': e.message})
return geometry
def get_volume_options(self, volume_name):
@ -205,8 +202,8 @@ class Client(object):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s."
% {'name': seg[-1], 'new_name': new_seg[-1]})
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
@ -337,6 +334,6 @@ class Client(object):
na_server.invoke_successfully(ems, True)
LOG.debug("ems executed successfully.")
except netapp_api.NaApiError as e:
LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
LOG.warning(_LW("Failed to invoke ems. Message : %s"), e)
finally:
requester.last_ems = timeutils.utcnow()

View File

@ -98,7 +98,7 @@ class Client(client_base.Client):
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug('No iSCSI service found for vserver %s' % (self.vserver))
LOG.debug('No iSCSI service found for vserver %s', self.vserver)
return None
def get_lun_list(self):
@ -360,11 +360,10 @@ class Client(client_base.Client):
def clone_file(self, flex_vol, src_path, dest_path, vserver,
dest_exists=False):
"""Clones file on vserver."""
msg = ("Cloning with params volume %(volume)s, src %(src_path)s,"
"dest %(dest_path)s, vserver %(vserver)s")
msg_fmt = {'volume': flex_vol, 'src_path': src_path,
'dest_path': dest_path, 'vserver': vserver}
LOG.debug(msg % msg_fmt)
LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, "
"dest %(dest_path)s, vserver %(vserver)s",
{'volume': flex_vol, 'src_path': src_path,
'dest_path': dest_path, 'vserver': vserver})
clone_create = netapp_api.NaElement.create_node_with_children(
'clone-create',
**{'volume': flex_vol, 'source-path': src_path,
@ -381,8 +380,8 @@ class Client(client_base.Client):
'file-usage-get', **{'path': path})
res = self._invoke_vserver_api(file_use, vserver)
unique_bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s'
% {'path': path, 'bytes': unique_bytes})
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': unique_bytes})
return unique_bytes
def get_vserver_ips(self, vserver):

View File

@ -22,7 +22,6 @@ Volume driver for NetApp NFS storage.
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
@ -74,7 +73,7 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
:param volume: volume reference
"""
LOG.debug('create_volume on %s' % volume['host'])
LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
@ -85,17 +84,17 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
raise exception.InvalidHost(reason=msg)
volume['provider_location'] = share
LOG.info(_LI('Creating volume at location %s')
% volume['provider_location'])
LOG.info(_LI('Creating volume at location %s'),
volume['provider_location'])
try:
self._do_create_volume(volume)
except Exception as ex:
LOG.error(_LE("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': six.text_type(ex)})
"share %(share)s. Details: %(ex)s"),
{'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(
data=msg % (volume['name']))

View File

@ -241,8 +241,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
volume['provider_location'], file_name)
except Exception as e:
LOG.warning(_LW('Exception while registering image %(image_id)s'
' in cache. Exception: %(exc)s')
% {'image_id': image_id, 'exc': e.__str__()})
' in cache. Exception: %(exc)s'),
{'image_id': image_id, 'exc': e})
def _find_image_in_cache(self, image_id):
"""Finds image in cache and returns list of shares with file name."""
@ -254,8 +254,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
file_path = '%s/%s' % (dir, file_name)
if os.path.exists(file_path):
LOG.debug('Found cache file for image %(image_id)s'
' on share %(share)s'
% {'image_id': image_id, 'share': share})
' on share %(share)s',
{'image_id': image_id, 'share': share})
result.append((share, file_name))
return result
@ -309,8 +309,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
continue
except Exception as e:
LOG.warning(_LW('Exception during cache cleaning'
' %(share)s. Message - %(ex)s')
% {'share': share, 'ex': e.__str__()})
' %(share)s. Message - %(ex)s'),
{'share': share, 'ex': e})
continue
finally:
LOG.debug('Image cache cleaning done.')
@ -366,7 +366,7 @@ class NetAppNfsDriver(nfs.NfsDriver):
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as ex:
LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
LOG.warning(_LW('Exception during deleting %s'), ex)
return False
def clone_image(self, context, volume,
@ -394,10 +394,10 @@ class NetAppNfsDriver(nfs.NfsDriver):
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
msg = e.msg if getattr(e, 'msg', None) else e.__str__()
msg = e.msg if getattr(e, 'msg', None) else e
LOG.info(_LI('Image cloning unsuccessful for image'
' %(image_id)s. Message: %(msg)s')
% {'image_id': image_id, 'msg': msg})
' %(image_id)s. Message: %(msg)s'),
{'image_id': image_id, 'msg': msg})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
@ -645,8 +645,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
try:
return _move_file(source_path, dest_path)
except Exception as e:
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
% {'src': source_path, 'e': e})
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'),
{'src': source_path, 'e': e})
return False
def _get_export_ip_path(self, volume_id=None, share=None):

View File

@ -81,7 +81,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
:param volume: volume reference
"""
LOG.debug('create_volume on %s' % volume['host'])
LOG.debug('create_volume on %s', volume['host'])
self._ensure_shares_mounted()
# get share as pool name
@ -100,18 +100,18 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
try:
volume['provider_location'] = share
LOG.info(_LI('casted to %s') % volume['provider_location'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_LW("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
LOG.error(_LE("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s"),
{'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
@ -349,8 +349,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
LOG.debug("Found volume %(vol)s for share %(share)s.",
{'vol': netapp_vol, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
@ -383,8 +383,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
'copy offload workflow.')
% {'img': image_id, 'vol': volume['id']})
'copy offload workflow.'),
{'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
@ -498,8 +498,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
{'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
@ -507,8 +507,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
@ -533,8 +533,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
' %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)

View File

@ -242,7 +242,7 @@ def create_vol_list(vol_attrs):
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
' ssc vol list. Message - %s' % six.text_type(e))
' ssc vol list. Message - %s', e)
continue
return vols
@ -422,8 +422,8 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
@ -456,8 +456,8 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_LI('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
' %(server)s and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
@ -483,14 +483,14 @@ def get_cluster_latest_ssc(*args, **kwargs):
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_LI('Successfully completed ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:

View File

@ -128,11 +128,11 @@ class RestClient(WebserviceClient):
if 'storedPassword' in scrubbed_data:
scrubbed_data['storedPassword'] = "****"
params = {'m': method, 'p': path, 'd': scrubbed_data,
'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs}
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
" verify: %(v)s, kwargs: %(k)s." % (params))
" verify: %(v)s, kwargs: %(k)s.",
{'m': method, 'p': path, 'd': scrubbed_data,
'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs})
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',

View File

@ -33,8 +33,7 @@ LOG = logging.getLogger(__name__)
def map_volume_to_single_host(client, volume, eseries_vol, host,
vol_map):
"""Maps the e-series volume to host with initiator."""
msg = "Attempting to map volume %s to single host."
LOG.debug(msg % volume['id'])
LOG.debug("Attempting to map volume %s to single host." % volume['id'])
# If volume is not mapped on the backend, map directly to host
if not vol_map:
@ -63,10 +62,9 @@ def map_volume_to_single_host(client, volume, eseries_vol, host,
# If volume is not currently attached according to Cinder, it is
# safe to delete the mapping
if not (volume['attach_status'] == 'attached'):
msg = (_("Volume %(vol)s is not currently attached, "
"moving existing mapping to host %(host)s.")
% {'vol': volume['id'], 'host': host['label']})
LOG.debug(msg)
LOG.debug("Volume %(vol)s is not currently attached, moving "
"existing mapping to host %(host)s.",
{'vol': volume['id'], 'host': host['label']})
mappings = _get_vol_mapping_for_host_frm_array(
client, host['hostRef'])
lun = _get_free_lun(client, host, mappings)
@ -86,8 +84,7 @@ def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
mapping):
"""Maps the e-series volume to multiattach host group."""
msg = "Attempting to map volume %s to multiple hosts."
LOG.debug(msg % volume['id'])
LOG.debug("Attempting to map volume %s to multiple hosts." % volume['id'])
# If volume is already mapped to desired host, return the mapping
if mapping['mapRef'] == target_host['hostRef']:
@ -143,8 +140,8 @@ def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
# Once both existing and target hosts are in the multiattach host group,
# move the volume mapping to said group.
if not mapped_host_group:
msg = "Moving mapping for volume %s to multiattach host group."
LOG.debug(msg % volume['id'])
LOG.debug("Moving mapping for volume %s to multiattach host group.",
volume['id'])
return client.move_volume_mapping_via_symbol(
mapping.get('lunMappingRef'),
multiattach_host_group['clusterRef'],
@ -187,9 +184,9 @@ def _get_vol_mapping_for_host_group_frm_array(client, hg_ref):
def unmap_volume_from_host(client, volume, host, mapping):
# Volume is mapped directly to host, so delete the mapping
if mapping.get('mapRef') == host['hostRef']:
msg = ("Volume %(vol)s is mapped directly to host %(host)s; removing "
"mapping.")
LOG.debug(msg % {'vol': volume['id'], 'host': host['label']})
LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; "
"removing mapping.", {'vol': volume['id'],
'host': host['label']})
client.delete_volume_mapping(mapping['lunMappingRef'])
return
@ -212,9 +209,8 @@ def unmap_volume_from_host(client, volume, host, mapping):
# Remove mapping if volume should no longer be attached after this
# operation.
if volume['status'] == 'detaching':
msg = ("Volume %s is mapped directly to multiattach host group "
"but is not currently attached; removing mapping.")
LOG.debug(msg % volume['id'])
LOG.debug("Volume %s is mapped directly to multiattach host group but "
"is not currently attached; removing mapping.", volume['id'])
client.delete_volume_mapping(mapping['lunMappingRef'])

View File

@ -162,26 +162,25 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
msg = _LW('Production use of "%(backend)s" backend requires the '
'Cinder controller to have multipathing properly set up '
'and the configuration option "%(mpflag)s" to be set to '
'"True".') % {'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'}
LOG.warning(msg)
LOG.warning(_LW('Production use of "%(backend)s" backend requires '
'the Cinder controller to have multipathing '
'properly set up and the configuration option '
'"%(mpflag)s" to be set to "True".'),
{'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'})
def _ensure_multi_attach_host_group_exists(self):
try:
host_group = self._client.get_host_group_by_name(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
msg = _LI("The multi-attach E-Series host group '%(label)s' "
"already exists with clusterRef %(clusterRef)s")
LOG.info(msg % host_group)
LOG.info(_LI("The multi-attach E-Series host group '%(label)s' "
"already exists with clusterRef %(clusterRef)s"),
host_group)
except exception.NotFound:
host_group = self._client.create_host_group(
utils.MULTI_ATTACH_HOST_GROUP_NAME)
msg = _LI("Created multi-attach E-Series host group '%(label)s' "
"with clusterRef %(clusterRef)s")
LOG.info(msg % host_group)
LOG.info(_LI("Created multi-attach E-Series host group %(label)s "
"with clusterRef %(clusterRef)s"), host_group)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
@ -190,11 +189,11 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
% {'host': host, 'e': e})
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
{'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
% {'host': host, 'e': six.text_type(e)})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
@ -216,9 +215,9 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
msg = _LI("System with controller addresses [%s] is not"
" registered with web service.")
LOG.info(msg % self.configuration.netapp_controller_ips)
LOG.info(_LI("System with controller addresses [%s] is not "
"registered with web service."),
self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
@ -248,9 +247,10 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
msg = _("System %(id)s found with bad status - %(status)s.")
raise exception.NetAppDriverException(msg % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
raise exception.NetAppDriverException(
_("System %(id)s found with bad status - "
"%(status)s.") % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict)
return True
def _populate_system_objects(self):
@ -384,7 +384,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s' % volume['host'])
LOG.debug('create_volume on %s', volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
@ -436,8 +436,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."),
six.text_type(e))
LOG.error(_LE("Error creating volume. Msg - %s."), e)
return vol
@ -492,8 +491,8 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
% {'src': src_vol['label'], 'dst': dst_vol['label']})
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."),
{'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
@ -504,13 +503,13 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
if j_st['status'] == 'failed' or j_st['status'] == 'halted':
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
msg = _("Vol copy job for dest %s failed.")\
% dst_vol['label']
raise exception.NetAppDriverException(msg)
LOG.info(_LI("Vol copy job completed for dest %s.")
% dst_vol['label'])
raise exception.NetAppDriverException(
_("Vol copy job for dest %s failed.") %
dst_vol['label'])
LOG.info(_LI("Vol copy job completed for dest %s."),
dst_vol['label'])
break
finally:
if job:
@ -579,7 +578,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
try:
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
except KeyError:
LOG.warning(_LW("Snapshot %s already deleted.") % snapshot['id'])
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
snapshot_name = snap_grp['label']
@ -622,16 +621,15 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
current_map)
lun_id = mapping['lun']
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.",
msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
msg = _("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.")
LOG.debug(msg % msg_fmt)
LOG.debug("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.", msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
@ -688,9 +686,9 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
host = self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
msg = _LW("Unable to update host type for host with "
"label %(l)s. %(e)s")
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
LOG.warning(_LW("Unable to update host type for host with "
"label %(l)s. %(e)s"),
{'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
@ -791,7 +789,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'") % self._backend_name)
"backend '%s'"), self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._objects["disk_pool_refs"])
self._ssc_stats = \
@ -852,8 +850,8 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
(int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0) >= size))]
if not avl_pools:
msg = _LW("No storage pool found with available capacity %s.")
LOG.warning(msg % size_gb)
LOG.warning(_LW("No storage pool found with available capacity "
"%s."), size_gb)
return avl_pools
def extend_volume(self, volume, new_size):

View File

@ -128,15 +128,14 @@ def round_down(value, precision):
def log_extra_spec_warnings(extra_specs):
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(OBSOLETE_SSC_SPECS.keys())):
msg = _LW('Extra spec %(old)s is obsolete. Use %(new)s instead.')
args = {'old': spec, 'new': OBSOLETE_SSC_SPECS[spec]}
LOG.warning(msg % args)
LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
'instead.'), {'old': spec,
'new': OBSOLETE_SSC_SPECS[spec]})
for spec in (set(extra_specs.keys() if extra_specs else []) &
set(DEPRECATED_SSC_SPECS.keys())):
msg = _LW('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.')
args = {'old': spec, 'new': DEPRECATED_SSC_SPECS[spec]}
LOG.warning(msg % args)
LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
'instead.'), {'old': spec,
'new': DEPRECATED_SSC_SPECS[spec]})
def get_iscsi_connection_properties(lun_id, volume, iqn,
@ -228,7 +227,7 @@ class OpenStackInfo(object):
"'%{version}\t%{release}\t%{vendor}'",
self.PACKAGE_NAME)
if not out:
LOG.info(_LI('No rpm info found for %(pkg)s package.') % {
LOG.info(_LI('No rpm info found for %(pkg)s package.'), {
'pkg': self.PACKAGE_NAME})
return False
parts = out.split()
@ -237,7 +236,7 @@ class OpenStackInfo(object):
self._vendor = ' '.join(parts[2::])
return True
except Exception as e:
LOG.info(_LI('Could not run rpm command: %(msg)s.') % {'msg': e})
LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e})
return False
# ubuntu, mirantis on ubuntu
@ -248,8 +247,8 @@ class OpenStackInfo(object):
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
self.PACKAGE_NAME)
if not out:
LOG.info(_LI('No dpkg-query info found for %(pkg)s package.')
% {'pkg': self.PACKAGE_NAME})
LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'),
{'pkg': self.PACKAGE_NAME})
return False
# debian format: [epoch:]upstream_version[-debian_revision]
deb_version = out
@ -266,7 +265,7 @@ class OpenStackInfo(object):
self._vendor = _vendor
return True
except Exception as e:
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), {
'msg': e})
return False

View File

@ -121,12 +121,12 @@ class NfsDriver(remotefs.RemoteFSDriver):
if not config:
msg = (_("There's no NFS config file configured (%s)") %
'nfs_shares_config')
LOG.warn(msg)
LOG.warning(msg)
raise exception.NfsException(msg)
if not os.path.exists(config):
msg = (_("NFS config file at %(config)s doesn't exist") %
{'config': config})
LOG.warn(msg)
LOG.warning(msg)
raise exception.NfsException(msg)
if not self.configuration.nfs_oversub_ratio > 0:
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
@ -172,12 +172,13 @@ class NfsDriver(remotefs.RemoteFSDriver):
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.') % {
'%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(e)
LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
(attempt, six.text_type(e)))
raise exception.NfsException(six.text_type(e))
LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n'
'Retrying mount ...',
{'attempt': attempt, 'exc': e})
time.sleep(1)
def _find_share(self, volume_size_in_gib):
@ -332,16 +333,16 @@ class NfsDriver(remotefs.RemoteFSDriver):
self.configuration.nas_secure_file_permissions,
nfs_mount, is_new_cinder_install)
LOG.debug('NAS variable secure_file_permissions setting is: %s' %
LOG.debug('NAS variable secure_file_permissions setting is: %s',
self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false':
LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure "
"NFS configuration.") %
doc_html)
LOG.warning(_LW("The NAS file permissions mode will be 666 "
"(allowing other/world read & write access). "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure "
"NFS configuration."),
doc_html)
self.configuration.nas_secure_file_operations = \
self._determine_nas_security_option_setting(
@ -353,13 +354,13 @@ class NfsDriver(remotefs.RemoteFSDriver):
if self.configuration.nas_secure_file_operations == 'true':
self._execute_as_root = False
LOG.debug('NAS variable secure_file_operations setting is: %s' %
LOG.debug('NAS variable secure_file_operations setting is: %s',
self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false':
LOG.warn(_LW("The NAS file operations will be run as "
"root: allowing root level access at the storage "
"backend. This is considered an insecure NAS "
"environment. Please see %s "
"for information on a secure NAS configuration.") %
doc_html)
LOG.warning(_LW("The NAS file operations will be run as "
"root: allowing root level access at the storage "
"backend. This is considered an insecure NAS "
"environment. Please see %s "
"for information on a secure NAS configuration."),
doc_html)

View File

@ -94,36 +94,36 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
def _get_discovery_ip(self, netconfig):
"""Get discovery ip."""
subnet_label = self.configuration.nimble_subnet_label
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s'
% {'netlabel': subnet_label, 'netconf': netconfig})
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
{'netlabel': subnet_label, 'netconf': netconfig})
ret_discovery_ip = ''
for subnet in netconfig['subnet-list']:
LOG.info(_LI('Exploring array subnet label %s') % subnet['label'])
LOG.info(_LI('Exploring array subnet label %s'), subnet['label'])
if subnet_label == '*':
# Use the first data subnet, save mgmt+data for later
if (subnet['subnet-id']['type'] == SM_SUBNET_DATA):
if subnet['subnet-id']['type'] == SM_SUBNET_DATA:
LOG.info(_LI('Discovery ip %(disc_ip)s is used '
'on data subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
'on data subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
elif (subnet['subnet-id']['type'] ==
SM_SUBNET_MGMT_PLUS_DATA):
LOG.info(_LI('Discovery ip %(disc_ip)s is found'
' on mgmt+data subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
' on mgmt+data subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
ret_discovery_ip = subnet['discovery-ip']
# If subnet is specified and found, use the subnet
elif subnet_label == subnet['label']:
LOG.info(_LI('Discovery ip %(disc_ip)s is used'
' on subnet %(net_label)s')
% {'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
' on subnet %(net_label)s'),
{'disc_ip': subnet['discovery-ip'],
'net_label': subnet['label']})
return subnet['discovery-ip']
if ret_discovery_ip:
LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet')
% ret_discovery_ip)
LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'),
ret_discovery_ip)
return ret_discovery_ip
else:
raise NimbleDriverException(_('No suitable discovery ip found'))
@ -151,8 +151,8 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
target_ipaddr = self._get_discovery_ip(netconfig)
iscsi_portal = target_ipaddr + ':3260'
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s')
% {'name': volume_name, 'loc': provider_location})
LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'),
{'name': volume_name, 'loc': provider_location})
return provider_location
def _get_model_info(self, volume_name):
@ -258,10 +258,10 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
float(units.Gi))
free_space = total_capacity - used_space
LOG.debug('total_capacity=%(capacity)f '
'used_space=%(used)f free_space=%(free)f'
% {'capacity': total_capacity,
'used': used_space,
'free': free_space})
'used_space=%(used)f free_space=%(free)f',
{'capacity': total_capacity,
'used': used_space,
'free': free_space})
backend_name = self.configuration.safe_get(
'volume_backend_name') or self.__class__.__name__
self.group_stats = {'volume_backend_name': backend_name,
@ -277,8 +277,9 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
volume_name = volume['name']
LOG.info(_LI('Entering extend_volume volume=%(vol)s new_size=%(size)s')
% {'vol': volume_name, 'size': new_size})
LOG.info(_LI('Entering extend_volume volume=%(vol)s '
'new_size=%(size)s'),
{'vol': volume_name, 'size': new_size})
vol_size = int(new_size) * units.Gi
reserve = not self.configuration.san_thin_provision
reserve_size = vol_size if reserve else 0
@ -295,8 +296,8 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
"""Creates igroup for an initiator and returns the igroup name."""
igrp_name = 'openstack-' + self._generate_random_string(12)
LOG.info(_LI('Creating initiator group %(grp)s '
'with initiator %(iname)s')
% {'grp': igrp_name, 'iname': initiator_name})
'with initiator %(iname)s'),
{'grp': igrp_name, 'iname': initiator_name})
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
return igrp_name
@ -308,28 +309,29 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
initiator_group['initiator-list'][0]['name'] ==
initiator_name):
LOG.info(_LI('igroup %(grp)s found for '
'initiator %(iname)s')
% {'grp': initiator_group['name'],
'iname': initiator_name})
'initiator %(iname)s'),
{'grp': initiator_group['name'],
'iname': initiator_name})
return initiator_group['name']
LOG.info(_LI('No igroup found for initiator %s') % initiator_name)
LOG.info(_LI('No igroup found for initiator %s'), initiator_name)
return ''
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
' connector=%(conn)s location=%(loc)s')
% {'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
' connector=%(conn)s location=%(loc)s'),
{'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
if not initiator_group_name:
initiator_group_name = self._create_igroup_for_initiator(
initiator_name)
LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s')
% {'grp': initiator_group_name, 'iname': initiator_name})
LOG.info(_LI('Initiator group name is %(grp)s for initiator '
'%(iname)s'),
{'grp': initiator_group_name, 'iname': initiator_name})
self.APIExecutor.add_acl(volume, initiator_group_name)
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
properties = {}
@ -346,10 +348,10 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
' connector=%(conn)s location=%(loc)s.')
% {'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
' connector=%(conn)s location=%(loc)s.'),
{'vol': volume,
'conn': connector,
'loc': volume['provider_location']})
initiator_name = connector['initiator']
initiator_group_name = self._get_igroupname_for_initiator(
initiator_name)
@ -397,7 +399,7 @@ def _connection_checker(func):
self.login()
continue
else:
LOG.error(_LE('Re-throwing Exception %s') % e)
LOG.error(_LE('Re-throwing Exception %s'), e)
raise
return inner_connection_checker
@ -411,14 +413,14 @@ class NimbleAPIExecutor(object):
self.username = kwargs['username']
self.password = kwargs['password']
wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip'])
LOG.debug('Using Nimble wsdl_url: %s' % wsdl_url)
LOG.debug('Using Nimble wsdl_url: %s', wsdl_url)
self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url)
self.client = client.Client(wsdl_url,
username=self.username,
password=self.password)
soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'],
'port': SOAP_PORT})
LOG.debug('Using Nimble soap_url: %s' % soap_url)
LOG.debug('Using Nimble soap_url: %s', soap_url)
self.client.set_options(location=soap_url)
self.login()
@ -453,7 +455,7 @@ class NimbleAPIExecutor(object):
def login(self):
"""Execute Https Login API."""
response = self._execute_login()
LOG.info(_LI('Successful login by user %s') % self.username)
LOG.info(_LI('Successful login by user %s'), self.username)
self.sid = response['authInfo']['sid']
@_connection_checker
@ -482,12 +484,12 @@ class NimbleAPIExecutor(object):
LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
' reserve=%(reserve)s in pool=%(pool)s'
' description=%(description)s')
% {'vol': volume['name'],
'size': volume_size,
'reserve': reserve,
'pool': pool_name,
'description': description})
' description=%(description)s'),
{'vol': volume['name'],
'size': volume_size,
'reserve': reserve,
'pool': pool_name,
'description': description})
return self.client.service.createVol(
request={'sid': self.sid,
'attr': {'name': volume['name'],
@ -504,7 +506,7 @@ class NimbleAPIExecutor(object):
def create_vol(self, volume, pool_name, reserve):
"""Execute createVol API."""
response = self._execute_create_vol(volume, pool_name, reserve)
LOG.info(_LI('Successfully create volume %s') % response['name'])
LOG.info(_LI('Successfully create volume %s'), response['name'])
return response['name']
@_connection_checker
@ -524,9 +526,9 @@ class NimbleAPIExecutor(object):
def add_acl(self, volume, initiator_group_name):
"""Execute addAcl API."""
LOG.info(_LI('Adding ACL to volume=%(vol)s with'
' initiator group name %(igrp)s')
% {'vol': volume['name'],
'igrp': initiator_group_name})
' initiator group name %(igrp)s'),
{'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.addVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
@ -539,9 +541,9 @@ class NimbleAPIExecutor(object):
def remove_acl(self, volume, initiator_group_name):
"""Execute removeVolAcl API."""
LOG.info(_LI('Removing ACL from volume=%(vol)s'
' for initiator group %(igrp)s')
% {'vol': volume['name'],
'igrp': initiator_group_name})
' for initiator group %(igrp)s'),
{'vol': volume['name'],
'igrp': initiator_group_name})
return self.client.service.removeVolAcl(
request={'sid': self.sid,
'volname': volume['name'],
@ -553,23 +555,23 @@ class NimbleAPIExecutor(object):
@_response_checker
def _execute_get_vol_info(self, vol_name):
LOG.info(_LI('Getting volume information '
'for vol_name=%s') % (vol_name))
'for vol_name=%s'), vol_name)
return self.client.service.getVolInfo(request={'sid': self.sid,
'name': vol_name})
def get_vol_info(self, vol_name):
"""Execute getVolInfo API."""
response = self._execute_get_vol_info(vol_name)
LOG.info(_LI('Successfully got volume information for volume %s')
% vol_name)
LOG.info(_LI('Successfully got volume information for volume %s'),
vol_name)
return response['vol']
@_connection_checker
@_response_checker
def online_vol(self, vol_name, online_flag, *args, **kwargs):
"""Execute onlineVol API."""
LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s')
% {'vol': vol_name, 'flag': online_flag})
LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s'),
{'vol': vol_name, 'flag': online_flag})
return self.client.service.onlineVol(request={'sid': self.sid,
'name': vol_name,
'online': online_flag})
@ -578,8 +580,8 @@ class NimbleAPIExecutor(object):
@_response_checker
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
"""Execute onlineSnap API."""
LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
% {'snap': snap_name, 'flag': online_flag})
LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s'),
{'snap': snap_name, 'flag': online_flag})
return self.client.service.onlineSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name,
@ -589,7 +591,7 @@ class NimbleAPIExecutor(object):
@_response_checker
def dissociate_volcoll(self, vol_name, *args, **kwargs):
"""Execute dissocProtPol API."""
LOG.info(_LI('Dissociating volume %s ') % vol_name)
LOG.info(_LI('Dissociating volume %s '), vol_name)
return self.client.service.dissocProtPol(
request={'sid': self.sid,
'vol-name': vol_name})
@ -598,7 +600,7 @@ class NimbleAPIExecutor(object):
@_response_checker
def delete_vol(self, vol_name, *args, **kwargs):
"""Execute deleteVol API."""
LOG.info(_LI('Deleting volume %s ') % vol_name)
LOG.info(_LI('Deleting volume %s '), vol_name)
return self.client.service.deleteVol(request={'sid': self.sid,
'name': vol_name})
@ -615,10 +617,10 @@ class NimbleAPIExecutor(object):
# Limit to 254 characters
snap_description = snap_description[:254]
LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
' snap_name=%(name)s snap_description=%(desc)s')
% {'vol': volume_name,
'name': snap_name,
'desc': snap_description})
' snap_name=%(name)s snap_description=%(desc)s'),
{'vol': volume_name,
'name': snap_name,
'desc': snap_description})
return self.client.service.snapVol(
request={'sid': self.sid,
'vol': volume_name,
@ -629,7 +631,7 @@ class NimbleAPIExecutor(object):
@_response_checker
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
"""Execute deleteSnap API."""
LOG.info(_LI('Deleting snapshot %s ') % snap_name)
LOG.info(_LI('Deleting snapshot %s '), snap_name)
return self.client.service.deleteSnap(request={'sid': self.sid,
'vol': vol_name,
'name': snap_name})
@ -645,12 +647,12 @@ class NimbleAPIExecutor(object):
reserve_size = snap_size * units.Gi if reserve else 0
LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
'reserve=%(reserve)s')
% {'vol': volume_name,
'snap': snap_name,
'clone': clone_name,
'size': snap_size,
'reserve': reserve})
'reserve=%(reserve)s'),
{'vol': volume_name,
'snap': snap_name,
'clone': clone_name,
'size': snap_size,
'reserve': reserve})
clone_size = snap_size * units.Gi
return self.client.service.cloneVol(
request={'sid': self.sid,
@ -668,8 +670,8 @@ class NimbleAPIExecutor(object):
@_response_checker
def edit_vol(self, vol_name, mask, attr):
"""Execute editVol API."""
LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s')
% {'vol': vol_name, 'mask': str(mask)})
LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s'),
{'vol': vol_name, 'mask': str(mask)})
return self.client.service.editVol(request={'sid': self.sid,
'name': vol_name,
'mask': mask,
@ -694,8 +696,8 @@ class NimbleAPIExecutor(object):
def create_initiator_group(self, initiator_group_name, initiator_name):
"""Execute createInitiatorGrp API."""
LOG.info(_LI('Creating initiator group %(igrp)s'
' with one initiator %(iname)s')
% {'igrp': initiator_group_name, 'iname': initiator_name})
' with one initiator %(iname)s'),
{'igrp': initiator_group_name, 'iname': initiator_name})
return self.client.service.createInitiatorGrp(
request={'sid': self.sid,
'attr': {'name': initiator_group_name,
@ -706,7 +708,7 @@ class NimbleAPIExecutor(object):
@_response_checker
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
"""Execute deleteInitiatorGrp API."""
LOG.info(_LI('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name)
return self.client.service.deleteInitiatorGrp(
request={'sid': self.sid,
'name': initiator_group_name})

View File

@ -66,8 +66,9 @@ class OVSVolumeDriver(driver.VolumeDriver):
Options come from CONF
"""
super(OVSVolumeDriver, self).__init__(*args, **kwargs)
LOG.debug('INIT %s %s %s ', CONF.vpool_name, str(args),
str(kwargs))
LOG.debug('INIT %(pool_name)s %(arg)s %(kwarg)s ',
{'pool_name': CONF.vpool_name, 'arg': args,
'kwarg': kwargs})
self.configuration.append_config_values(OPTS)
self._vpool_name = self.configuration.vpool_name
if vpoollist is not None:
@ -102,7 +103,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
location = '{}/{}.raw'.format(mountpoint, name)
size = volume.size
LOG.debug('DO_CREATE_VOLUME %s %s', location, size)
LOG.debug('DO_CREATE_VOLUME %(location)s %(size)s',
{'location': location, 'size': size})
vdisklib.VDiskController.create_volume(location = location,
size = size)
volume['provider_location'] = location
@ -136,7 +138,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
Downloads image from glance server into local .raw
:param volume: volume reference (sqlalchemy Model)
"""
LOG.debug("CP_IMG_TO_VOL %s %s", image_service, image_id)
LOG.debug("CP_IMG_TO_VOL %(image_service)s %(image_id)s",
{'image_service': image_service, 'image_id': image_id})
name = volume.display_name
if not name:
@ -172,7 +175,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
Called on "cinder upload-to-image ...volume... ...image-name..."
:param volume: volume reference (sqlalchemy Model)
"""
LOG.debug("CP_VOL_TO_IMG %s %s", image_service, image_meta)
LOG.debug("CP_VOL_TO_IMG %(image_service)s %(image_meta)s",
{'image_service': image_service, 'image_meta': image_meta})
super(OVSVolumeDriver, self).copy_volume_to_image(
context, volume, image_service, image_meta)
@ -222,8 +226,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
vdisk.cinder_id = volume.id
vdisk.name = name
LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s',
volume.id, name)
LOG.debug('[CREATE FROM TEMPLATE] Updating meta %(volume_id)s '
'%(name)s', {'volume_id': volume.id, 'name': name})
vdisk.save()
else:
LOG.debug('[THIN CLONE] VDisk %s is not a template',
@ -239,7 +243,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
'machineguid': source_ovs_disk.vmachine_guid,
'is_automatic': False}
LOG.debug('CREATE_SNAP %s %s', name, str(metadata))
LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
{'name': name, 'metadata': metadata})
snapshotid = vdisklib.VDiskController.create_snapshot(
diskguid = source_ovs_disk.guid,
metadata = metadata,
@ -306,8 +311,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
'machineguid': ovs_disk.vmachine_guid,
'is_automatic': False}
LOG.debug('CREATE_SNAP %s %s', snapshot.display_name,
str(metadata))
LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
{'name': snapshot.display_name, 'metadata': metadata})
vdisklib.VDiskController.create_snapshot(diskguid = ovs_disk.guid,
metadata = metadata,
snapshotid =
@ -352,9 +357,10 @@ class OVSVolumeDriver(driver.VolumeDriver):
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
six.text_type(volume.host))
LOG.debug('[CLONE FROM SNAP] %s %s %s %s',
ovs_snap_disk.guid, snapshot.id, devicename,
pmachineguid)
LOG.debug('[CLONE FROM SNAP] %(disk)s %(snapshot)s %(device)s '
'%(machine)s',
{'disk': ovs_snap_disk.guid, 'snapshot': snapshot.id,
'device': devicename, 'machine': pmachineguid})
disk_meta = vdisklib.VDiskController.clone(
diskguid = ovs_snap_disk.guid,
snapshotid = snapshot.id,
@ -427,7 +433,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
The volume is a .raw file on a virtual filesystem.
Connection is always allowed based on POSIX permissions.
"""
LOG.debug('TERM_CONN %s %s ', six.text_type(connector), force)
LOG.debug('TERM_CONN %(connector)s %(force)s ',
{'connector': six.text_type(connector), 'force': force})
def check_for_setup_error(self):
"""Validate driver setup"""
@ -493,12 +500,15 @@ class OVSVolumeDriver(driver.VolumeDriver):
_location = "{0}/{1}".format(vsr.mountpoint,
vd.devicename)
if _location == location:
LOG.debug('Location %s Disk found %s',
(location, vd.guid))
LOG.debug('Location %(location)s Disk '
'found %(id)s',
{'location': location,
'id': vd.guid})
disk = vdiskhybrid.VDisk(vd.guid)
return disk
msg = 'NO RESULT Attempt %s timeout %s max attempts %s'
LOG.debug(msg, attempt, timeout, retry)
LOG.debug('NO RESULT Attempt %(attempt)s timeout %(timeout)s max '
'attempts %(retry)s',
{'attempt': attempt, 'timeout': timeout, 'retry': retry})
if timeout:
time.sleep(timeout)
attempt += 1
@ -511,16 +521,16 @@ class OVSVolumeDriver(driver.VolumeDriver):
:return guid: GUID
"""
hostname = self._get_real_hostname(hostname)
LOG.debug('[_FIND OVS PMACHINE] Hostname %s' % (hostname))
LOG.debug('[_FIND OVS PMACHINE] Hostname %s', hostname)
mapping = [(pm.guid, six.text_type(sr.name))
for pm in pmachinelist.PMachineList.get_pmachines()
for sr in pm.storagerouters]
for item in mapping:
if item[1] == str(hostname):
msg = 'Found pmachineguid %s for Hostname %s'
LOG.debug(msg, item[0], hostname)
LOG.debug('Found pmachineguid %(item)s for Hostname %(host)s',
{'item': item[0], 'host': hostname})
return item[0]
msg = (_('No PMachine guid found for Hostname %s'), hostname)
msg = (_('No PMachine guid found for Hostname %s') % hostname)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
@ -528,13 +538,14 @@ class OVSVolumeDriver(driver.VolumeDriver):
"""Find OVS disk object based on snapshot id
:return VDisk: OVS DAL model object
"""
LOG.debug('[_FIND OVS DISK] Snapshotid %s' % snapshotid)
LOG.debug('[_FIND OVS DISK] Snapshotid %s', snapshotid)
for disk in vdisklist.VDiskList.get_vdisks():
snaps_guid = [s['guid'] for s in disk.snapshots]
if str(snapshotid) in snaps_guid:
LOG.debug('[_FIND OVS DISK] Snapshot id %s Disk found %s',
(snapshotid, disk))
LOG.debug('[_FIND OVS DISK] Snapshot id %(snapshot)s Disk '
'found %(disk)s',
{'snapshot': snapshotid, 'disk': disk})
return disk
msg = (_('No disk found for snapshotid %s'), snapshotid)
msg = (_('No disk found for snapshotid %s') % snapshotid)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)

View File

@ -135,10 +135,9 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename):
ret = 0
output = ''
msg = _('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s') \
% {'volume': volumeid, 'wwpns': targetwwpns,
'iqn': initiatorwwpns, 'volumename': volumename}
LOG.debug(msg)
LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s',
{'volume': volumeid, 'wwpns': targetwwpns,
'iqn': initiatorwwpns, 'volumename': volumename})
try:
ret, output = self.dpl.assign_vdev_fc(
self._conver_uuid2hex(volumeid), targetwwpns,

View File

@ -213,8 +213,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
ERR_MSG_NOT_EXIST in err.text:
# Happens if the volume does not exist.
ctxt.reraise = False
LOG.warn(_LW("Volume deletion failed with message: %s"),
err.text)
LOG.warning(_LW("Volume deletion failed with message: %s"),
err.text)
LOG.debug("Leave PureISCSIDriver.delete_volume.")
def create_snapshot(self, snapshot):
@ -279,11 +279,11 @@ class PureISCSIDriver(san.SanISCSIDriver):
self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
"-p", self._iscsi_port["portal"]])
except processutils.ProcessExecutionError as err:
LOG.warn(_LW("iSCSI discovery of port %(port_name)s at "
"%(port_portal)s failed with error: %(err_msg)s"),
{"port_name": self._iscsi_port["name"],
"port_portal": self._iscsi_port["portal"],
"err_msg": err.stderr})
LOG.warning(_LW("iSCSI discovery of port %(port_name)s at "
"%(port_portal)s failed with error: %(err_msg)s"),
{"port_name": self._iscsi_port["name"],
"port_portal": self._iscsi_port["portal"],
"err_msg": err.stderr})
self._iscsi_port = self._choose_target_iscsi_port()
return self._iscsi_port
@ -384,8 +384,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
"Connection already exists" in err.text):
# Happens if the volume is already connected to the host.
ctxt.reraise = False
LOG.warn(_LW("Volume connection already exists with "
"message: %s"), err.text)
LOG.warning(_LW("Volume connection already exists with "
"message: %s"), err.text)
# Get the info for the existing connection
connected_hosts = \
self._array.list_volume_private_connections(vol_name)
@ -724,5 +724,5 @@ class PureISCSIDriver(san.SanISCSIDriver):
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
LOG.warn(_LW("Volume unmanage was unable to rename "
"the volume, error message: %s"), err.text)
LOG.warning(_LW("Volume unmanage was unable to rename "
"the volume, error message: %s"), err.text)

View File

@ -102,10 +102,10 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
def check_for_setup_error(self):
if not self.configuration.quobyte_volume_url:
msg = (_LW("There's no Quobyte volume configured (%s). Example:"
" quobyte://<DIR host>/<volume name>") %
msg = (_("There's no Quobyte volume configured (%s). Example:"
" quobyte://<DIR host>/<volume name>") %
'quobyte_volume_url')
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeDriverException(msg)
# Check if mount.quobyte is installed
@ -148,11 +148,10 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s"
% {'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
@ -168,7 +167,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.quobyte_qcow2_volumes:
out_format = 'qcow2'
@ -187,8 +186,8 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have provider_location '
'specified, skipping'), volume['name'])
LOG.warning(_LW('Volume %s does not have provider_location '
'specified, skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
@ -355,7 +354,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
' one Quobyte volume.'
target_volume = self._mounted_shares[0]
LOG.debug('Selected %s as target Quobyte volume.' % target_volume)
LOG.debug('Selected %s as target Quobyte volume.', target_volume)
return target_volume
@ -387,16 +386,17 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
mounted = False
try:
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.') % mount_path)
' unmounted correctly.'), mount_path)
self._execute('umount.quobyte', mount_path,
run_as_root=False)
except processutils.ProcessExecutionError as exc:
LOG.warn(_LW("Failed to unmount previous mount: %s"),
exc)
LOG.warning(_LW("Failed to unmount previous mount: "
"%s"), exc)
else:
# TODO(quobyte): Extend exc analysis in here?
LOG.warn(_LW("Unknown error occurred while checking mount"
" point: %s Trying to continue."), exc)
LOG.warning(_LW("Unknown error occurred while checking "
"mount point: %s Trying to continue."),
exc)
if not mounted:
if not os.path.isdir(mount_path):
@ -407,13 +407,13 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
command.extend(['-c', self.configuration.quobyte_client_cfg])
try:
LOG.info(_LI('Mounting volume: %s ...') % quobyte_volume)
LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume)
self._execute(*command, run_as_root=False)
LOG.info(_LI('Mounting volume: %s succeeded') % quobyte_volume)
LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume)
mounted = True
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
LOG.warn(_LW("%s is already mounted"), quobyte_volume)
LOG.warning(_LW("%s is already mounted"), quobyte_volume)
else:
raise
@ -432,5 +432,5 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
raise exception.VolumeDriverException(msg)
if not os.access(mount_path, os.W_OK | os.X_OK):
LOG.warn(_LW("Volume is not writable. Please broaden the file"
" permissions. Mount: %s"), mount_path)
LOG.warning(_LW("Volume is not writable. Please broaden the file"
" permissions. Mount: %s"), mount_path)

View File

@ -308,8 +308,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
return args
def _connect_to_rados(self, pool=None):
LOG.debug("opening connection to ceph cluster (timeout=%s)." %
(self.configuration.rados_connect_timeout))
LOG.debug("opening connection to ceph cluster (timeout=%s).",
self.configuration.rados_connect_timeout)
# NOTE (e0ne): rados is binding to C lbirary librados.
# It blocks eventlet loop so we need to run it in a native
@ -468,8 +468,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume" %
(CONF.rbd_max_clone_depth))
"flattening source volume",
CONF.rbd_max_clone_depth)
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
@ -479,7 +479,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
_pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug("flattening source volume %s" % (src_name))
LOG.debug("flattening source volume %s", src_name)
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
@ -491,7 +491,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug("creating snapshot='%s'" % (clone_snap))
LOG.debug("creating snapshot='%s'", clone_snap)
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
@ -502,7 +502,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'" %
"'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
@ -517,7 +517,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
if volume['size'] != src_vref['size']:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
"%(dst_size)d" %
"%(dst_size)d",
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
'dst_size': volume['size']})
self._resize(volume)
@ -528,7 +528,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
"""Creates a logical volume."""
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'" % (volume['name']))
LOG.debug("creating volume '%s'", volume['name'])
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
@ -542,13 +542,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
features=client.features)
def _flatten(self, pool, volume_name):
LOG.debug('flattening %(pool)s/%(img)s' %
LOG.debug('flattening %(pool)s/%(img)s',
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s' %
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
@ -604,7 +604,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("volume %s is not a clone" % volume_name)
LOG.debug("volume %s is not a clone", volume_name)
volume.set_snap(None)
return (None, None, None)
@ -622,7 +622,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s" % (parent_snap))
LOG.debug("deleting parent snapshot %s", parent_snap)
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
@ -633,7 +633,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug("deleting parent %s" % (parent_name))
LOG.debug("deleting parent %s", parent_name)
self.RBDProxy().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
@ -649,8 +649,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("volume %s no longer exists in backend")
% (volume_name))
LOG.info(_LI("volume %s no longer exists in backend"),
volume_name)
return
clone_snap = None
@ -681,7 +681,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
rbd_image.close()
if clone_snap is None:
LOG.debug("deleting rbd volume %s" % (volume_name))
LOG.debug("deleting rbd volume %s", volume_name)
try:
self.RBDProxy().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
@ -690,14 +690,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warn(msg)
LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
msg = (_LI("RBD volume %s not found, allowing delete "
"operation to proceed.") % volume_name)
LOG.info(msg)
LOG.info(_LI("RBD volume %s not found, allowing delete "
"operation to proceed."), volume_name)
return
# If it is a clone, walk back up the parent chain deleting
@ -815,15 +814,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
return False
if self._get_fsid() != fsid:
reason = ('%s is in a different ceph cluster') % image_location
LOG.debug(reason)
LOG.debug('%s is in a different ceph cluster', image_location)
return False
if image_meta['disk_format'] != 'raw':
reason = ("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
image_location, image_meta['disk_format'])
LOG.debug(reason)
LOG.debug(("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
image_location, image_meta['disk_format']))
return False
# check that we can read the image
@ -834,7 +831,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' %
LOG.debug('Unable to open image %(loc)s: %(err)s',
dict(loc=image_location, err=e))
return False
@ -855,9 +852,9 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
CONF.image_conversion_dir or
tempfile.gettempdir())
if (tmpdir == self.configuration.volume_tmp_dir):
LOG.warn(_LW('volume_tmp_dir is now deprecated, please use '
'image_conversion_dir'))
if tmpdir == self.configuration.volume_tmp_dir:
LOG.warning(_LW('volume_tmp_dir is now deprecated, please use '
'image_conversion_dir'))
# ensure temporary directory exists
if not os.path.exists(tmpdir):

View File

@ -205,7 +205,7 @@ class RemoteFSDriver(driver.VolumeDriver):
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s') % volume['provider_location'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
@ -242,11 +242,11 @@ class RemoteFSDriver(driver.VolumeDriver):
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s') % (exc,))
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s' % self._mounted_shares)
LOG.debug('Available shares %s', self._mounted_shares)
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
@ -257,9 +257,9 @@ class RemoteFSDriver(driver.VolumeDriver):
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
@ -330,13 +330,13 @@ class RemoteFSDriver(driver.VolumeDriver):
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %s is being set with permissions: %s' %
(path, permissions))
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
parms = {'path': path, 'perm': permissions}
LOG.warn(_LW('%(path)s is being set with open permissions: '
'%(perm)s') % parms)
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
@ -424,7 +424,7 @@ class RemoteFSDriver(driver.VolumeDriver):
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.' % share_file)
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
@ -527,18 +527,18 @@ class RemoteFSDriver(driver.VolumeDriver):
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warn(_LW("The NAS file operations will be run as root: allowing "
"root level access at the storage backend. This is "
"considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration.") %
doc_html)
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered an "
"insecure NAS environment. Please see %s for information "
"on a secure NFS configuration.") %
doc_html)
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
@ -579,11 +579,11 @@ class RemoteFSDriver(driver.VolumeDriver):
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.') % file_path)
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s') %
format(err))
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
@ -837,7 +837,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
@ -881,7 +881,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
@ -901,7 +901,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
"""
LOG.debug('Deleting snapshot %s:' % snapshot['id'])
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
@ -922,11 +922,11 @@ class RemoteFSSnapDriver(RemoteFSDriver):
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.') % snapshot['id'])
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s' % snapshot_file)
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
@ -949,9 +949,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
if base_file is None:
# There should always be at least the original volume
# file as base.
msg = _('No backing file found for %s, allowing snapshot '
'to be deleted.') % snapshot_path
LOG.warn(msg)
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
@ -971,10 +970,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
@ -1042,8 +1039,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
higher_file),
None)
if highest_file is None:
msg = 'No file depends on %s.' % higher_file
LOG.debug(msg)
LOG.debug('No file depends on %s.', higher_file)
# Committing higher_file into snapshot_file
# And update pointer in highest_file
@ -1248,11 +1244,10 @@ class RemoteFSSnapDriver(RemoteFSDriver):
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s' % result)
LOG.debug('nova call result: %s', result)
except Exception as e:
LOG.error(_LE('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
LOG.error(_LE('Call to Nova to create snapshot failed %s'), e)
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
@ -1276,10 +1271,9 @@ class RemoteFSSnapDriver(RemoteFSDriver):
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
@ -1337,9 +1331,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
snapshot['id'],
delete_info)
except Exception as e:
LOG.error(_LE('Call to Nova delete snapshot failed'))
LOG.exception(e)
raise e
LOG.error(_LE('Call to Nova delete snapshot failed %s'), e)
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
@ -1355,9 +1348,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
# Nova tasks completed successfully
break
else:
msg = ('status of snapshot %s is '
'still "deleting"... waiting') % snapshot['id']
LOG.debug(msg)
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:

View File

@ -772,9 +772,8 @@ class HP3PARCommon(object):
break
if found_vlun is None:
msg = (_("3PAR vlun %(name)s not found on host %(host)s") %
{'name': volume_name, 'host': hostname})
LOG.info(msg)
LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
{'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None):
@ -794,10 +793,8 @@ class HP3PARCommon(object):
if volume_name in vlun['volumeName']:
break
else:
msg = (
_("3PAR vlun for volume %(name)s not found on host %(host)s") %
{'name': volume_name, 'host': hostname})
LOG.info(msg)
LOG.info(_LI("3PAR vlun for volume %(name)s not found on host "
"%(host)s"), {'name': volume_name, 'host': hostname})
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
@ -833,13 +830,11 @@ class HP3PARCommon(object):
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
msg = (_("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted because: "
"%(reason)s") %
{'name': volume_name,
'host': hostname,
'reason': ex.get_description()})
LOG.info(msg)
LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted "
"because: %(reason)s"),
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
@ -1215,23 +1210,23 @@ class HP3PARCommon(object):
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.CinderException as ex:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
@ -1315,7 +1310,7 @@ class HP3PARCommon(object):
except hpexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def delete_volume(self, volume):
@ -1336,11 +1331,11 @@ class HP3PARCommon(object):
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
else:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
except hpexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
@ -1370,23 +1365,23 @@ class HP3PARCommon(object):
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
else:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
msg = _("Delete volume id not found. Removing from cinder: "
"%(id)s Ex: %(msg)s") % {'id': volume['id'], 'msg': ex}
LOG.warning(msg)
LOG.warning(_LW("Delete volume id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': volume['id'], 'msg': ex})
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized(ex.get_description())
except hpexceptions.HTTPConflict as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot):
@ -1461,16 +1456,16 @@ class HP3PARCommon(object):
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
except Exception as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return model_update
@ -1508,10 +1503,10 @@ class HP3PARCommon(object):
self.client.createSnapshot(snap_name, vol_name, optional)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
def update_volume_key_value_pair(self, volume, key, value):
@ -1549,7 +1544,8 @@ class HP3PARCommon(object):
volume_name = self._get_3par_vol_name(volume['id'])
self.client.removeVolumeMetaData(volume_name, key)
except Exception as ex:
msg = _('Failure in clear_volume_key_value_pair:%s') % ex
msg = _('Failure in clear_volume_key_value_pair: '
'%s') % six.text_type(ex)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
@ -1685,16 +1681,16 @@ class HP3PARCommon(object):
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.CinderException as ex:
LOG.error(ex)
raise ex
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
@ -1707,16 +1703,16 @@ class HP3PARCommon(object):
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
msg = _("Delete Snapshot id not found. Removing from cinder: "
"%(id)s Ex: %(msg)s") % {'id': snapshot['id'], 'msg': ex}
LOG.warning(msg)
LOG.warning(_LW("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': snapshot['id'], 'msg': ex})
except hpexceptions.HTTPConflict as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
@ -1762,12 +1758,12 @@ class HP3PARCommon(object):
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if (hostname is None):
LOG.error(e)
if hostname is None:
LOG.error(_LE("Exception: %s"), e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(e)
LOG.error(_LE("Exception: %s"), e)
raise
# try again with name retrieved from 3par
@ -1822,17 +1818,17 @@ class HP3PARCommon(object):
if new_tpvv:
cop = self.CONVERT_TO_THIN
LOG.info(_LI("Converting %(volume_name)s to thin provisioning "
"with userCPG=%(new_cpg)s") %
"with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
elif new_tdvv:
cop = self.CONVERT_TO_DEDUP
LOG.info(_LI("Converting %(volume_name)s to thin dedup "
"provisioning with userCPG=%(new_cpg)s") %
"provisioning with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
else:
cop = self.CONVERT_TO_FULL
LOG.info(_LI("Converting %(volume_name)s to full provisioning "
"with userCPG=%(new_cpg)s") %
"with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
try:
@ -1849,7 +1845,7 @@ class HP3PARCommon(object):
# info and then raise.
LOG.info(_LI("tunevv failed because the volume '%s' "
"has snapshots."), volume_name)
raise ex
raise
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
@ -2241,7 +2237,7 @@ class ModifySpecsTask(flow_utils.CinderTask):
if ex.get_code() != 102:
LOG.error(_LE("Unexpected error when retype() tried to "
"deleteVolumeSet(%s)"), vvs_name)
raise ex
raise
if new_vvs or new_qos or new_flash_cache:
common._add_volume_to_volume_set(

View File

@ -467,7 +467,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
protocol = host['capabilities']['storage_protocol']
if protocol != 'FC':
LOG.debug("3PAR FC driver cannot migrate in-use volume "
"to a host with storage_protocol=%s." % protocol)
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()

View File

@ -159,8 +159,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
elif len(ip) == 2:
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
else:
msg = _("Invalid IP address format '%s'") % ip_addr
LOG.warn(msg)
LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr)
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
@ -193,15 +192,15 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
msg = (_("Found invalid iSCSI IP address(s) in configuration "
"option(s) hp3par_iscsi_ips or iscsi_ip_address '%s.'") %
(", ".join(temp_iscsi_ip)))
LOG.warn(msg)
LOG.warning(_LW("Found invalid iSCSI IP address(s) in "
"configuration option(s) hp3par_iscsi_ips or "
"iscsi_ip_address '%s.'"),
(", ".join(temp_iscsi_ip)))
if not len(self.iscsi_ips) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
raise exception.InvalidInput(reason=(msg))
raise exception.InvalidInput(reason=msg)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
@ -312,9 +311,8 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
vlun = common.create_vlun(volume, host, least_used_nsp)
if least_used_nsp is None:
msg = _("Least busy iSCSI port not found, "
"using first iSCSI port in list.")
LOG.warn(msg)
LOG.warning(_LW("Least busy iSCSI port not found, "
"using first iSCSI port in list."))
iscsi_ip = self.iscsi_ips.keys()[0]
else:
iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
@ -466,10 +464,9 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and
self.configuration.hp3par_iscsi_chap_enabled):
LOG.warn(_LW("Host exists without CHAP credentials set "
"and has iSCSI attachments but CHAP is "
"enabled. Updating host with new CHAP "
"credentials."))
LOG.warning(_LW("Host exists without CHAP credentials set and "
"has iSCSI attachments but CHAP is enabled. "
"Updating host with new CHAP credentials."))
self._set_3par_chaps(
common,
hostname,
@ -499,11 +496,12 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']:
LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled."))
except hpexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16)
LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
LOG.warning(_LW("No host or VLUNs exist. Generating new "
"CHAP key."))
else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present,
@ -528,15 +526,15 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
break
except hpexceptions.HTTPNotFound:
LOG.debug("The VLUN %s is missing CHAP credentials "
"but CHAP is enabled. Skipping." %
"but CHAP is enabled. Skipping.",
vlun['remoteName'])
else:
LOG.warn(_LW("Non-iSCSI VLUN detected."))
LOG.warning(_LW("Non-iSCSI VLUN detected."))
if not chap_exists:
chap_password = volume_utils.generate_password(16)
LOG.warn(_LW("No VLUN contained CHAP credentials. "
"Generating new CHAP key."))
LOG.warning(_LW("No VLUN contained CHAP credentials. "
"Generating new CHAP key."))
# Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id'])
@ -718,7 +716,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
protocol = host['capabilities']['storage_protocol']
if protocol != 'iSCSI':
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
"to a host with storage_protocol=%s." % protocol)
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()

View File

@ -203,7 +203,7 @@ class HPLeftHandCLIQProxy(san.SanISCSIDriver):
for k, v in status_node.attrib.items():
volume_attributes["permission." + k] = v
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s" %
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s",
{'volume_name': volume_name,
'volume_attributes': volume_attributes})
return volume_attributes
@ -259,7 +259,7 @@ class HPLeftHandCLIQProxy(san.SanISCSIDriver):
for k, v in status_node.attrib.items():
snapshot_attributes["permission." + k] = v
LOG.debug("Snapshot info: %(name)s => %(attributes)s" %
LOG.debug("Snapshot info: %(name)s => %(attributes)s",
{'name': snapshot_name, 'attributes': snapshot_attributes})
return snapshot_attributes

View File

@ -35,7 +35,7 @@ LeftHand array.
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder.i18n import _, _LI
from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
@ -81,7 +81,7 @@ class HPLeftHandISCSIDriver(driver.VolumeDriver):
self.proxy = self._create_proxy(*self.args, **self.kwargs)
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
"proxy %(proxy_ver)s") % {
"proxy %(proxy_ver)s"), {
"driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()})
@ -91,10 +91,10 @@ class HPLeftHandISCSIDriver(driver.VolumeDriver):
# Check minimum client version for REST proxy
client_version = rest_proxy.hplefthandclient.version
if (client_version < MIN_CLIENT_VERSION):
ex_msg = (_LE("Invalid hplefthandclient version found ("
"%(found)s). Version %(minimum)s or greater "
"required.")
if client_version < MIN_CLIENT_VERSION:
ex_msg = (_("Invalid hplefthandclient version found ("
"%(found)s). Version %(minimum)s or greater "
"required.")
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)

View File

@ -457,7 +457,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
client_options[client_key] = client_value
except KeyError:
LOG.error(_LE("'%(value)s' is an invalid value "
"for extra spec '%(key)s'") %
"for extra spec '%(key)s'"),
{'value': value, 'key': key})
return client_options
@ -477,10 +477,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
chap_secret = server_info['chapTargetSecret']
if not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
'disabled') % connector['host'])
'disabled'), connector['host'])
if chap_enabled and chap_secret is None:
LOG.warning(_LW('CHAP is enabled, but server secret not '
'configured on server %s') % connector['host'])
'configured on server %s'), connector['host'])
return server_info
except hpexceptions.HTTPNotFound:
# server does not exist, so create one
@ -522,10 +522,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
dictionary of its reported capabilities.
"""
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
@ -536,7 +536,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
new_extra_specs,
extra_specs_key_map.keys())
LOG.debug('LH specs=%(specs)s' % {'specs': lh_extra_specs})
LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs})
# only set the ones that have changed
changed_extra_specs = {}
@ -553,7 +553,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
except hpexceptions.HTTPNotFound:
raise exception.VolumeNotFound(volume_id=volume['id'])
except Exception as ex:
LOG.warning("%s" % ex)
LOG.warning(_LW("%s"), ex)
finally:
self._logout(client)
@ -581,7 +581,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'cluster=%(cluster)s' % {
'cluster=%(cluster)s', {
'id': volume['id'],
'host': host,
'cluster': self.configuration.hplefthand_clustername})
@ -596,24 +596,24 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
try:
# get the cluster info, if it exists and compare
cluster_info = client.getClusterByName(cluster)
LOG.debug('Cluster info: %s' % cluster_info)
LOG.debug('Cluster info: %s', cluster_info)
virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume is from a different "
"backend.") % volume['name'])
"backend."), volume['name'])
return false_ret
if vip != virtual_ips[0]['ipV4Address']:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
"management group.") % volume['name'])
"management group."), volume['name'])
return false_ret
except hpexceptions.HTTPNotFound:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
"management group.") % volume['name'])
"management group."), volume['name'])
return false_ret
finally:
self._logout(client)
@ -621,24 +621,24 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
client = self._login()
try:
volume_info = client.getVolumeByName(volume['name'])
LOG.debug('Volume info: %s' % volume_info)
LOG.debug('Volume info: %s', volume_info)
# can't migrate if server is attached
if volume_info['iscsiSessions'] is not None:
LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has been "
"exported.") % volume['name'])
"exported."), volume['name'])
return false_ret
# can't migrate if volume has snapshots
snap_info = client.getVolume(
volume_info['id'],
'fields=snapshots,snapshots[resource[members[name]]]')
LOG.debug('Snapshot info: %s' % snap_info)
LOG.debug('Snapshot info: %s', snap_info)
if snap_info['snapshots']['resource'] is not None:
LOG.info(_LI("Cannot provide backend assisted migration "
"for volume: %s because the volume has "
"snapshots.") % volume['name'])
"snapshots."), volume['name'])
return false_ret
options = {'clusterName': cluster}
@ -646,10 +646,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
except hpexceptions.HTTPNotFound:
LOG.info(_LI("Cannot provide backend assisted migration for "
"volume: %s because volume does not exist in this "
"management group.") % volume['name'])
"management group."), volume['name'])
return false_ret
except hpexceptions.HTTPServerError as ex:
LOG.error(ex)
LOG.error(_LE("Exception: %s"), ex)
return false_ret
finally:
self._logout(client)

View File

@ -147,7 +147,7 @@ class SanDriver(driver.VolumeDriver):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
LOG.error(_LE("Error running SSH command: %s"), command)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""

View File

@ -72,7 +72,7 @@ class ScalityDriver(driver.VolumeDriver):
config = self.configuration.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
# config can be a file path or a URL, check it
@ -83,13 +83,13 @@ class ScalityDriver(driver.VolumeDriver):
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
@lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
@ -104,7 +104,7 @@ class ScalityDriver(driver.VolumeDriver):
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _size_bytes(self, size_in_g):
@ -135,7 +135,7 @@ class ScalityDriver(driver.VolumeDriver):
self.configuration.scality_sofs_volume_dir)
if not os.path.isdir(voldir):
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
LOG.warn(msg)
LOG.warning(msg)
raise exception.VolumeBackendAPIException(data=msg)
def create_volume(self, volume):
@ -284,7 +284,7 @@ class ScalityDriver(driver.VolumeDriver):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_local_path = self.local_path(volume)
LOG.info(_LI('Begin backup of volume %s.') % volume['name'])
LOG.info(_LI('Begin backup of volume %s.'), volume['name'])
qemu_img_info = image_utils.qemu_img_info(volume_local_path)
if qemu_img_info.file_format != 'raw':
@ -303,7 +303,7 @@ class ScalityDriver(driver.VolumeDriver):
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.') %
LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.'),
{'backup': backup['id'], 'volume': volume['name']})
volume_local_path = self.local_path(volume)
with utils.temporary_chown(volume_local_path):

View File

@ -228,8 +228,8 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have provider_location '
'specified, skipping.'), volume['name'])
LOG.warning(_LW('Volume %s does not have provider_location '
'specified, skipping.'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
@ -239,7 +239,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
LOG.debug("Skipping deletion of volume %s as it does not exist." %
LOG.debug("Skipping deletion of volume %s as it does not exist.",
mounted_path)
info_path = self._local_path_volume_info(volume)
@ -264,7 +264,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("Creating new volume at %s." % volume_path)
LOG.debug("Creating new volume at %s.", volume_path)
if os.path.exists(volume_path):
msg = _('File already exists at %s.') % volume_path
@ -335,7 +335,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
raise exception.SmbfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug('Selected %s as target smbfs share.' % target_share)
LOG.debug('Selected %s as target smbfs share.', target_share)
return target_share
@ -365,13 +365,13 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
used = (total_size - total_available) / total_size
if used > used_ratio:
LOG.debug('%s is above smbfs_used_ratio.' % smbfs_share)
LOG.debug('%s is above smbfs_used_ratio.', smbfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug('%s is above smbfs_oversub_ratio.' % smbfs_share)
LOG.debug('%s is above smbfs_oversub_ratio.', smbfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug('%s reserved space is above smbfs_oversub_ratio.' %
LOG.debug('%s reserved space is above smbfs_oversub_ratio.',
smbfs_share)
return False
return True
@ -407,7 +407,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb)
LOG.info(_LI('Resizing file to %sG...') % size_gb)
LOG.info(_LI('Resizing file to %sG...'), size_gb)
self._do_extend_volume(volume_path, size_gb, volume['name'])
@ -458,7 +458,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
@ -477,7 +477,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
LOG.debug("Will copy from snapshot at %s" % path_to_snap_img)
LOG.debug("Will copy from snapshot at %s", path_to_snap_img)
image_utils.convert_image(path_to_snap_img,
self.local_path(volume),

View File

@ -86,8 +86,9 @@ def retry(exc_tuple, tries=5, delay=1, backoff=2):
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %s, (%s attempts remaining)...' %
(args, _tries))
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
@ -327,7 +328,7 @@ class SolidFireDriver(san.SanISCSIDriver):
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!') % sf_volume_id)
'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
@ -447,7 +448,7 @@ class SolidFireDriver(san.SanISCSIDriver):
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
'detected, using %s') % presets[0])
'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
@ -499,9 +500,9 @@ class SolidFireDriver(san.SanISCSIDriver):
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %s "
"to cinder ID %s.",
v['volumeID'], uuid)
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
@ -510,7 +511,7 @@ class SolidFireDriver(san.SanISCSIDriver):
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s.") %
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
@ -749,7 +750,7 @@ class SolidFireDriver(san.SanISCSIDriver):
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!") % volume['id'])
"delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
@ -1030,8 +1031,8 @@ class SolidFireDriver(san.SanISCSIDriver):
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
raise exception.SolidFireAPIException("Manage existing volume "
"requires 'source-id'.")
raise exception.SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
@ -1086,8 +1087,8 @@ class SolidFireDriver(san.SanISCSIDriver):
sfid = external_ref.get('source-id', None)
if sfid is None:
raise exception.SolidFireAPIException("Manage existing get size "
"requires 'id'.")
raise exception.SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
@ -1105,9 +1106,9 @@ class SolidFireDriver(san.SanISCSIDriver):
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!") % volume['id'])
raise exception.SolidFireAPIException("Failed to find account "
"for volume.")
"unmanage operation!"), volume['id'])
raise exception.SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)

View File

@ -87,9 +87,9 @@ class retry(object):
for attempt in xrange(self._count):
if attempt != 0:
LOG.warning(_LW('Retrying failed call to %(func)s, '
'attempt %(attempt)i.')
% {'func': func_name,
'attempt': attempt})
'attempt %(attempt)i.'),
{'func': func_name,
'attempt': attempt})
try:
return fun(*args, **kwargs)
except self._exceptions:
@ -127,9 +127,9 @@ class LVM(lvm.LVM):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating Volume Group'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def deactivate_vg(self):
@ -147,9 +147,9 @@ class LVM(lvm.LVM):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating Volume Group'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def destroy_vg(self):
@ -165,9 +165,9 @@ class LVM(lvm.LVM):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error destroying Volume Group'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def pv_resize(self, pv_name, new_size_str):
@ -183,9 +183,9 @@ class LVM(lvm.LVM):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error resizing Physical Volume'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def extend_thin_pool(self):
@ -209,9 +209,9 @@ class LVM(lvm.LVM):
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending thin provisioning pool'))
LOG.error(_LE('Cmd :%s') % err.cmd)
LOG.error(_LE('StdOut :%s') % err.stdout)
LOG.error(_LE('StdErr :%s') % err.stderr)
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@ -577,7 +577,8 @@ class SRBDriver(driver.VolumeDriver):
def _attach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
LOG.debug('Attaching volume %s as %s', name, devname)
LOG.debug('Attaching volume %(name)s as %(devname)s',
{'name': name, 'devname': devname})
count = self._get_attached_count(volume)
if count == 0:
@ -621,8 +622,8 @@ class SRBDriver(driver.VolumeDriver):
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.warning(_LW('All attempts to recover failed detach '
'of %(volume)s failed.')
% {'volume': volname})
'of %(volume)s failed.'),
{'volume': volname})
@lockutils.synchronized('devices', 'cinder-srb-')
def _detach_file(self, volume):
@ -634,9 +635,8 @@ class SRBDriver(driver.VolumeDriver):
count = self._get_attached_count(volume)
if count > 1:
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
'not detaching.')
% {'volume': volume['name'],
'count': count})
'not detaching.'),
{'volume': volume['name'], 'count': count})
return
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
@ -649,18 +649,15 @@ class SRBDriver(driver.VolumeDriver):
if vg is not None:
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
msg = _LE('Could not deactivate volume groupe %s')\
% (self._get_volname(volume))
LOG.error(msg)
LOG.error(_LE('Could not deactivate volume group %s'),
self._get_volname(volume))
raise
try:
self._do_detach(volume, vg=vg)
except putils.ProcessExecutionError:
msg = _LE('Could not detach volume '
'%(vol)s from device %(dev)s.') \
% {'vol': name, 'dev': devname}
LOG.error(msg)
LOG.error(_LE('Could not detach volume %(vol)s from device '
'%(dev)s.'), {'vol': name, 'dev': devname})
raise
self._decrement_attached_count(volume)
@ -748,8 +745,8 @@ class SRBDriver(driver.VolumeDriver):
self._destroy_lvm(volume)
self._detach_file(volume)
LOG.debug('Deleting volume %s, attached=%s',
volume['name'], attached)
LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s',
{'volume_name': volume['name'], 'attached': attached})
self._destroy_file(volume)

View File

@ -168,7 +168,7 @@ class V6000Common(object):
"""
lun_type = '0'
LOG.debug("Creating LUN %(name)s, %(size)s GB." %
LOG.debug("Creating LUN %(name)s, %(size)s GB.",
{'name': volume['name'], 'size': volume['size']})
if self.config.san_thin_provision:
@ -188,7 +188,7 @@ class V6000Common(object):
LOG.debug("Lun %s already exists, continuing.", volume['id'])
except Exception:
LOG.warn(_LW("Lun create for %s failed!"), volume['id'])
LOG.warning(_LW("Lun create for %s failed!"), volume['id'])
raise
@utils.synchronized('vmem-lun')
@ -213,8 +213,8 @@ class V6000Common(object):
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
except exception.ViolinBackendErrExists:
LOG.warn(_LW("Lun %s has dependent snapshots, skipping."),
volume['id'])
LOG.warning(_LW("Lun %s has dependent snapshots, skipping."),
volume['id'])
raise exception.VolumeIsBusy(volume_name=volume['id'])
except Exception:
@ -232,7 +232,7 @@ class V6000Common(object):
volume -- volume object provided by the Manager
new_size -- new (increased) size in GB to be applied
"""
LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB." %
LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB.",
{'id': volume['id'], 'size': volume['size'],
'new_size': new_size})

View File

@ -70,7 +70,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
self.common = v6000_common.V6000Common(self.configuration)
self.lookup_service = fczm_utils.create_lookup_service()
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
@ -169,8 +169,8 @@ class V6000FCDriver(driver.FibreChannelDriver):
properties['target_lun'] = lun_id
properties['initiator_target_map'] = init_targ_map
LOG.debug("Return FC data for zone addition: %(properties)s."
% {'properties': properties})
LOG.debug("Return FC data for zone addition: %(properties)s.",
{'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
@ -193,8 +193,8 @@ class V6000FCDriver(driver.FibreChannelDriver):
properties['target_wwn'] = target_wwns
properties['initiator_target_map'] = init_targ_map
LOG.debug("Return FC data for zone deletion: %(properties)s."
% {'properties': properties})
LOG.debug("Return FC data for zone deletion: %(properties)s.",
{'properties': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties}
@ -231,7 +231,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
else:
raise exception.Error(_("No initiators found, cannot proceed"))
LOG.debug("Exporting lun %s." % volume['id'])
LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
@ -361,7 +361,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
v = self.common.vip
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s." %
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s.",
{'wwpns': wwpns, 'igroup': igroup})
resp = v.igroup.add_initiators(igroup, wwpns)
@ -439,14 +439,14 @@ class V6000FCDriver(driver.FibreChannelDriver):
if bn1 in resp:
total_gb = resp[bn1] / units.Gi
else:
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] / units.Gi
else:
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
@ -461,7 +461,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
data['free_capacity_gb'] = free_gb
for i in data:
LOG.debug("stat update: %(name)s=%(data)s." %
LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data

View File

@ -71,7 +71,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
self.configuration.append_config_values(san.san_opts)
self.common = v6000_common.V6000Common(self.configuration)
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
@ -309,7 +309,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
target_name = self._get_short_name(volume['id'])
LOG.debug("Exporting lun %s." % volume['id'])
LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
@ -468,14 +468,14 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
if bn1 in resp:
total_gb = resp[bn1] / units.Gi
else:
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] / units.Gi
else:
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
@ -490,7 +490,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
data['free_capacity_gb'] = free_gb
for i in data:
LOG.debug("stat update: %(name)s=%(data)s." %
LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
@ -563,7 +563,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
if ret_dict:
hostname = ret_dict.items()[0][1]
else:
LOG.debug("Unable to fetch gateway hostname for %s." % mg_to_query)
LOG.debug("Unable to fetch gateway hostname for %s.", mg_to_query)
return hostname

View File

@ -223,8 +223,8 @@ class DatastoreSelector(object):
except exceptions.VimException:
# TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
# for empty datastore list.
LOG.warn(_LW("Unable to fetch datastores connected "
"to host %s."), host_ref, exc_info=True)
LOG.warning(_LW("Unable to fetch datastores connected "
"to host %s."), host_ref, exc_info=True)
continue
if not datastores:

View File

@ -201,10 +201,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
VERSION = '1.4.0'
def _do_deprecation_warning(self):
LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
'and will be removed in the Juno release. The VMware '
'vCenter VMDK driver will remain and continue to be '
'supported.'))
LOG.warning(_LW('The VMware ESX VMDK driver is now deprecated '
'and will be removed in the Juno release. The VMware '
'vCenter VMDK driver will remain and continue to be '
'supported.'))
def __init__(self, *args, **kwargs):
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
@ -477,9 +477,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
LOG.error(msg, storage_profile)
raise exceptions.VimException(msg % storage_profile)
elif storage_profile:
LOG.warn(_LW("Ignoring storage profile %s requirement for this "
"volume since policy based placement is "
"disabled."), storage_profile)
LOG.warning(_LW("Ignoring storage profile %s requirement for this "
"volume since policy based placement is "
"disabled."), storage_profile)
size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes,
@ -639,8 +639,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
if not backing:
# Create a backing in case it does not exist. It is a bad use
# case to boot from an empty volume.
LOG.warn(_LW("Trying to boot from an empty volume: %s."),
volume['name'])
LOG.warning(_LW("Trying to boot from an empty volume: %s."),
volume['name'])
# Create backing
backing = self._create_backing(volume)
@ -912,10 +912,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.delete_vmdk_file(
descriptor_ds_file_path, dc_ref)
except exceptions.VimException:
LOG.warn(_LW("Error occurred while deleting temporary "
"disk: %s."),
descriptor_ds_file_path,
exc_info=True)
LOG.warning(_LW("Error occurred while deleting temporary "
"disk: %s."),
descriptor_ds_file_path, exc_info=True)
def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref,
dest_path):
@ -1044,10 +1043,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.delete_file(
path.get_descriptor_ds_file_path(), dc_ref)
except exceptions.VimException:
LOG.warn(_LW("Error occurred while deleting "
"descriptor: %s."),
path.get_descriptor_ds_file_path(),
exc_info=True)
LOG.warning(_LW("Error occurred while deleting "
"descriptor: %s."),
path.get_descriptor_ds_file_path(),
exc_info=True)
if dest_path != path:
# Copy temporary disk to given destination.
@ -1077,9 +1076,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
try:
self.volumeops.delete_backing(backing)
except exceptions.VimException:
LOG.warn(_LW("Error occurred while deleting backing: %s."),
backing,
exc_info=True)
LOG.warning(_LW("Error occurred while deleting backing: %s."),
backing, exc_info=True)
def _create_volume_from_non_stream_optimized_image(
self, context, volume, image_service, image_id,
@ -1438,8 +1436,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
"""
# Can't attempt retype if the volume is in use.
if self._in_use(volume):
LOG.warn(_LW("Volume: %s is in use, can't retype."),
volume['name'])
LOG.warning(_LW("Volume: %s is in use, can't retype."),
volume['name'])
return False
# If the backing doesn't exist, retype is NOP.
@ -1507,9 +1505,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
best_candidate = self.ds_sel.select_datastore(req)
if not best_candidate:
# No candidate datastores; can't retype.
LOG.warn(_LW("There are no datastores matching new "
"requirements; can't retype volume: %s."),
volume['name'])
LOG.warning(_LW("There are no datastores matching new "
"requirements; can't retype volume: %s."),
volume['name'])
return False
(host, rp, summary) = best_candidate
@ -1559,12 +1557,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.rename_backing(backing,
volume['name'])
except exceptions.VimException:
LOG.warn(_LW("Changing backing: %(backing)s "
"name from %(new_name)s to "
"%(old_name)s failed."),
{'backing': backing,
'new_name': tmp_name,
'old_name': volume['name']})
LOG.warning(_LW("Changing backing: "
"%(backing)s name from "
"%(new_name)s to %(old_name)s "
"failed."),
{'backing': backing,
'new_name': tmp_name,
'old_name': volume['name']})
# Update the backing's storage profile if needed.
if need_profile_change:
@ -1802,12 +1801,12 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
self.volumeops.rename_backing(backing,
volume['name'])
except exceptions.VimException:
LOG.warn(_LW("Cannot undo volume rename; old name "
"was %(old_name)s and new name is "
"%(new_name)s."),
{'old_name': volume['name'],
'new_name': tmp_backing_name},
exc_info=True)
LOG.warning(_LW("Cannot undo volume rename; old "
"name was %(old_name)s and new "
"name is %(new_name)s."),
{'old_name': volume['name'],
'new_name': tmp_backing_name},
exc_info=True)
finally:
# Delete the temporary backing.
self._delete_temp_backing(src)

View File

@ -93,11 +93,10 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
if len(mappings) > 0:
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s' % smbfs_share)
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s '
% smbfs_share)
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
@ -115,7 +114,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
options.get('pass'))
try:
LOG.info(_LI('Mounting share: %s') % smbfs_share)
LOG.info(_LI('Mounting share: %s'), smbfs_share)
self.smb_conn.Msft_SmbMapping.Create(**smb_opts)
except wmi.x_wmi as exc:
err_msg = (_(
@ -123,7 +122,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
'WMI exception: %(wmi_exc)s'
'Options: %(options)s') % {'smbfs_share': smbfs_share,
'options': smb_opts,
'wmi_exc': exc})
'wmi_exc': six.text_type(exc)})
raise exception.VolumeBackendAPIException(data=err_msg)
def get_capacity_info(self, smbfs_share):
@ -137,7 +136,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
if retcode == 0:
LOG.error(_LE("Could not get share %s capacity info.") %
LOG.error(_LE("Could not get share %s capacity info."),
smbfs_share)
return 0, 0
return total_bytes.value, free_bytes.value

View File

@ -23,7 +23,7 @@ from oslo_log import log as logging
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import utils
@ -103,8 +103,10 @@ class WindowsSmbfsDriver(smbfs.SmbfsDriver):
smbfs_share)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %s Total size %s Total allocated %s'
% (smbfs_share, total_size, total_allocated))
LOG.info(_LI('Smb share %(share)s Total size %(size)s '
'Total allocated %(allocated)s'),
{'share': smbfs_share, 'size': total_size,
'allocated': total_allocated})
return [float(x) for x in return_value]
def _get_total_allocated(self, smbfs_share):
@ -223,7 +225,7 @@ class WindowsSmbfsDriver(smbfs.SmbfsDriver):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': snapshot['volume_size']})

View File

@ -21,6 +21,7 @@ import os
from oslo_config import cfg
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LI
@ -54,9 +55,10 @@ class WindowsUtils(object):
listen = wt_portal.Listen
except wmi.x_wmi as exc:
err_msg = (_('check_for_setup_error: the state of the WT Portal '
'could not be verified. WMI exception: %s'))
LOG.error(err_msg % exc)
raise exception.VolumeBackendAPIException(data=err_msg % exc)
'could not be verified. WMI exception: %s')
% six.text_type(exc))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
if not listen:
err_msg = (_('check_for_setup_error: there is no ISCSI traffic '
@ -70,9 +72,10 @@ class WindowsUtils(object):
wt_portal = self._conn_wmi.WT_Portal()[0]
except wmi.x_wmi as exc:
err_msg = (_('get_host_information: the state of the WT Portal '
'could not be verified. WMI exception: %s'))
LOG.error(err_msg % exc)
raise exception.VolumeBackendAPIException(data=err_msg % exc)
'could not be verified. WMI exception: %s')
% six.text_type(exc))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
(address, port) = (wt_portal.Address, wt_portal.Port)
# Getting the host information
try:
@ -80,8 +83,9 @@ class WindowsUtils(object):
host = hosts[0]
except wmi.x_wmi as exc:
err_msg = (_('get_host_information: the ISCSI target information '
'could not be retrieved. WMI exception: %s'))
LOG.error(err_msg % exc)
'could not be retrieved. WMI exception: %s')
% six.text_type(exc))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
properties = {}
@ -118,7 +122,7 @@ class WindowsUtils(object):
'target name: %(target)s could not be established. '
'WMI exception: %(wmi_exc)s') %
{'init': initiator_name, 'target': target_name,
'wmi_exc': exc})
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -133,9 +137,9 @@ class WindowsUtils(object):
except wmi.x_wmi as exc:
err_msg = (_(
'delete_iscsi_target: error when deleting the iscsi target '
'associated with target name: %(target)s . '
'WMI exception: %(wmi_exc)s') % {'target': target_name,
'wmi_exc': exc})
'associated with target name: %(target)s . WMI '
'exception: %(wmi_exc)s') % {'target': target_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -154,7 +158,8 @@ class WindowsUtils(object):
err_msg = (_(
'create_volume: error when creating the volume name: '
'%(vol_name)s . WMI exception: '
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -167,7 +172,7 @@ class WindowsUtils(object):
err_msg = (_("Failed to import disk: %(vhd_path)s. "
"WMI exception: %(exc)s") %
{'vhd_path': vhd_path,
'exc': exc})
'exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -180,7 +185,8 @@ class WindowsUtils(object):
err_msg = (_(
'Error changing disk status: '
'%(vol_name)s . WMI exception: '
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -190,7 +196,7 @@ class WindowsUtils(object):
disk = self._conn_wmi.WT_Disk(Description=vol_name)
if not disk:
LOG.debug('Skipping deleting disk %s as it does not '
'exist.' % vol_name)
'exist.', vol_name)
return
wt_disk = disk[0]
wt_disk.Delete_()
@ -203,7 +209,8 @@ class WindowsUtils(object):
err_msg = (_(
'delete_volume: error when deleting the volume name: '
'%(vol_name)s . WMI exception: '
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -223,7 +230,8 @@ class WindowsUtils(object):
err_msg = (_(
'create_snapshot: error when creating the snapshot name: '
'%(vol_name)s . WMI exception: '
'%(wmi_exc)s') % {'vol_name': snapshot_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'vol_name': snapshot_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -248,10 +256,10 @@ class WindowsUtils(object):
except wmi.x_wmi as exc:
err_msg = (_(
'create_volume_from_snapshot: error when creating the volume '
'name: %(vol_name)s from snapshot name: %(snap_name)s. '
'WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
'snap_name': snap_name,
'wmi_exc': exc})
'name: %(vol_name)s from snapshot name: %(snap_name)s. WMI '
'exception: %(wmi_exc)s') % {'vol_name': vol_name,
'snap_name': snap_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -264,7 +272,8 @@ class WindowsUtils(object):
err_msg = (_(
'delete_snapshot: error when deleting the snapshot name: '
'%(snap_name)s . WMI exception: '
'%(wmi_exc)s') % {'snap_name': snap_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'snap_name': snap_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -278,7 +287,8 @@ class WindowsUtils(object):
err_msg = (_(
'create_iscsi_target: error when creating iscsi target: '
'%(tar_name)s . WMI exception: '
'%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'tar_name': target_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
@ -291,7 +301,7 @@ class WindowsUtils(object):
host = self._conn_wmi.WT_Host(HostName=target_name)
if not host:
LOG.debug('Skipping removing target %s as it does not '
'exist.' % target_name)
'exist.', target_name)
return
wt_host = host[0]
wt_host.RemoveAllWTDisks()
@ -300,7 +310,8 @@ class WindowsUtils(object):
err_msg = (_(
'remove_iscsi_target: error when deleting iscsi target: '
'%(tar_name)s . WMI exception: '
'%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
'%(wmi_exc)s') % {'tar_name': target_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -314,10 +325,10 @@ class WindowsUtils(object):
except wmi.x_wmi as exc:
err_msg = (_(
'add_disk_to_target: error adding disk associated to volume : '
'%(vol_name)s to the target name: %(tar_name)s '
'. WMI exception: %(wmi_exc)s') % {'tar_name': target_name,
'vol_name': vol_name,
'wmi_exc': exc})
'%(vol_name)s to the target name: %(tar_name)s . WMI '
'exception: %(wmi_exc)s') % {'tar_name': target_name,
'vol_name': vol_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -370,16 +381,16 @@ class WindowsUtils(object):
wt_disk.Extend(additional_size)
except wmi.x_wmi as exc:
err_msg = (_(
'extend: error when extending the volume: %(vol_name)s '
'.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': exc})
'extend: error when extending the volume: %(vol_name)s .WMI '
'exception: %(wmi_exc)s') % {'vol_name': vol_name,
'wmi_exc': six.text_type(exc)})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def local_path(self, volume, format=None):
base_vhd_folder = CONF.windows_iscsi_lun_path
if not os.path.exists(base_vhd_folder):
LOG.debug('Creating folder: %s' % base_vhd_folder)
LOG.debug('Creating folder: %s', base_vhd_folder)
os.makedirs(base_vhd_folder)
if not format:
format = self.get_supported_format()

View File

@ -90,18 +90,15 @@ class XIOISEDriver(object):
LOG.debug("XIOISEDriver check_for_setup_error called.")
# The san_ip must always be set
if self.configuration.san_ip == "":
msg = _LE("san ip must be configured!")
LOG.error(msg)
LOG.error(_LE("san ip must be configured!"))
RaiseXIODriverException()
# The san_login must always be set
if self.configuration.san_login == "":
msg = _LE("san_login must be configured!")
LOG.error(msg)
LOG.error(_LE("san_login must be configured!"))
RaiseXIODriverException()
# The san_password must always be set
if self.configuration.san_password == "":
msg = _LE("san_password must be configured!")
LOG.error(msg)
LOG.error(_LE("san_password must be configured!"))
RaiseXIODriverException()
return
@ -118,8 +115,7 @@ class XIOISEDriver(object):
if status != 200:
# unsuccessful - this is fatal as we need the global id
# to build REST requests.
msg = _LE("Array query failed - No response (%d)!") % status
LOG.error(msg)
LOG.error(_LE("Array query failed - No response (%d)!"), status)
RaiseXIODriverException()
# Successfully fetched QUERY info. Parse out globalid along with
# ipaddress for Controller 1 and Controller 2. We assign primary
@ -134,8 +130,7 @@ class XIOISEDriver(object):
self.configuration.ise_qos = False
capabilities = xml_tree.find('capabilities')
if capabilities is None:
msg = _LE("Array query failed. No capabilities in response!")
LOG.error(msg)
LOG.error(_LE("Array query failed. No capabilities in response!"))
RaiseXIODriverException()
for node in capabilities:
if node.tag != 'capability':
@ -153,22 +148,19 @@ class XIOISEDriver(object):
support['thin-clones'] = True
# Make sure ISE support necessary features
if not support['clones']:
msg = _LE("ISE FW version is not compatible with Openstack!")
LOG.error(msg)
LOG.error(_LE("ISE FW version is not compatible with Openstack!"))
RaiseXIODriverException()
# set up thin provisioning support
self.configuration.san_thin_provision = support['thin-clones']
# Fill in global id, primary and secondary ip addresses
globalid = xml_tree.find('globalid')
if globalid is None:
msg = _LE("Array query failed. No global id in XML response!")
LOG.error(msg)
LOG.error(_LE("Array query failed. No global id in XML response!"))
RaiseXIODriverException()
self.ise_globalid = globalid.text
controllers = xml_tree.find('controllers')
if controllers is None:
msg = _LE("Array query failed. No controllers in response!")
LOG.error(msg)
LOG.error(_LE("Array query failed. No controllers in response!"))
RaiseXIODriverException()
for node in controllers:
if node.tag != 'controller':
@ -207,8 +199,7 @@ class XIOISEDriver(object):
# this call will populate globalid
self._send_query()
if self.ise_globalid is None:
msg = _LE("ISE globalid not set!")
LOG.error(msg)
LOG.error(_LE("ISE globalid not set!"))
RaiseXIODriverException()
return self.ise_globalid
@ -219,8 +210,7 @@ class XIOISEDriver(object):
self.ise_primary_ip = self.configuration.san_ip
if self.ise_primary_ip == '':
# No IP - fatal.
msg = _LE("Primary IP must be set!")
LOG.error(msg)
LOG.error(_LE("Primary IP must be set!"))
RaiseXIODriverException()
return self.ise_primary_ip
@ -346,7 +336,8 @@ class XIOISEDriver(object):
def _call_loop(loop_args):
remaining = loop_args['retries']
args = loop_args['args']
LOG.debug("In call loop (%d) %s", remaining, args)
LOG.debug("In call loop (%(remaining)d) %(args)s",
{'remaining': remaining, 'args': args})
(remaining, response) = loop_args['func'](args, remaining)
if remaining == 0:
# We are done - let our caller handle response
@ -383,7 +374,8 @@ class XIOISEDriver(object):
# successful, the request flag for a new QUERY will be set. The QUERY
# will be sent on next connection attempt to figure out which
# controller is primary in case it has changed.
LOG.debug("Connect: %s %s %s", method, url, body)
LOG.debug("Connect: %(method)s %(url)s %(body)s",
{'method': method, 'url': url, 'body': body})
using_secondary = 0
response = {}
response['status'] = 0
@ -414,9 +406,8 @@ class XIOISEDriver(object):
if secondary_ip is '':
# if secondary is not setup yet, then assert
# connection on primary and secondary ip failed
msg = (_LE("Connection to %s failed and no secondary!") %
primary_ip)
LOG.error(msg)
LOG.error(_LE("Connection to %s failed and no secondary!"),
primary_ip)
RaiseXIODriverException()
# swap primary for secondary ip in URL
url = string.replace(url, primary_ip, secondary_ip)
@ -426,9 +417,8 @@ class XIOISEDriver(object):
# connection failed on both IPs - break out of the loop
break
# connection on primary and secondary ip failed
msg = (_LE("Could not connect to %(primary)s or %(secondary)s!") %
{'primary': primary_ip, 'secondary': secondary_ip})
LOG.error(msg)
LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"),
{'primary': primary_ip, 'secondary': secondary_ip})
RaiseXIODriverException()
def _param_string(self, params):
@ -470,8 +460,7 @@ class XIOISEDriver(object):
resp = self._send_cmd('GET', url, {})
status = resp['status']
if status != 200:
msg = _LW("IOnetworks GET failed (%d)") % status
LOG.warning(msg)
LOG.warning(_LW("IOnetworks GET failed (%d)"), status)
return chap
# Got a good response. Parse out CHAP info. First check if CHAP is
# enabled and if so parse out username and password.
@ -501,8 +490,7 @@ class XIOISEDriver(object):
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Controller GET failed (%d)") % status
LOG.error(msg)
LOG.error(_LE("Controller GET failed (%d)"), status)
RaiseXIODriverException()
# Good response. Parse out IQN that matches iscsi_ip_address
# passed in from cinder.conf. IQN is 'hidden' in globalid field.
@ -527,8 +515,7 @@ class XIOISEDriver(object):
if target_iqn != '':
return target_iqn
# Did not find a matching IQN. Upsetting.
msg = _LE("Failed to get IQN!")
LOG.error(msg)
LOG.error(_LE("Failed to get IQN!"))
RaiseXIODriverException()
def find_target_wwns(self):
@ -541,8 +528,7 @@ class XIOISEDriver(object):
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Controller GET failed (%d)") % status
LOG.error(msg)
LOG.error(_LE("Controller GET failed (%d)"), status)
RaiseXIODriverException()
# Good response. Parse out globalid (WWN) of endpoint that matches
# protocol and type (array).
@ -569,8 +555,8 @@ class XIOISEDriver(object):
status = resp['status']
if status != 200:
# Not good. Throw an exception.
msg = _LE("Failed to get allocation information (%d)!") % status
LOG.error(msg)
LOG.error(_LE("Failed to get allocation information (%d)!"),
status)
RaiseXIODriverException()
# Good response. Parse out LUN.
xml_tree = etree.fromstring(resp['content'])
@ -580,8 +566,7 @@ class XIOISEDriver(object):
if luntag is not None:
return luntag.text
# Did not find LUN. Throw an exception.
msg = _LE("Failed to get LUN information!")
LOG.error(msg)
LOG.error(_LE("Failed to get LUN information!"))
RaiseXIODriverException()
def _get_volume_info(self, vol_name):
@ -600,24 +585,21 @@ class XIOISEDriver(object):
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
resp = self._send_cmd('GET', url, {'name': vol_name})
if resp['status'] != 200:
msg = (_LW("Could not get status for %(name)s (%(status)d).") %
{'name': vol_name, 'status': resp['status']})
LOG.warning(msg)
LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."),
{'name': vol_name, 'status': resp['status']})
return vol_info
# Good response. Parse down to Volume tag in list of one.
root = etree.fromstring(resp['content'])
volume_node = root.find('volume')
if volume_node is None:
msg = _LW("No volume node in XML content.")
LOG.warning(msg)
LOG.warning(_LW("No volume node in XML content."))
return vol_info
# Location can be found as an attribute in the volume node tag.
vol_info['location'] = volume_node.attrib['self']
# Find status tag
status = volume_node.find('status')
if status is None:
msg = _LW("No status payload for volume %s.") % vol_name
LOG.warning(msg)
LOG.warning(_LW("No status payload for volume %s."), vol_name)
return vol_info
# Fill in value and string from status tag attributes.
vol_info['value'] = status.attrib['value']
@ -642,9 +624,8 @@ class XIOISEDriver(object):
resp = self._send_cmd('GET', url, {'name': volume['name'],
'hostname': hostname})
if resp['status'] != 200:
msg = (_LE("Could not GET allocation information (%d)!") %
resp['status'])
LOG.error(msg)
LOG.error(_LE("Could not GET allocation information (%d)!"),
resp['status'])
RaiseXIODriverException()
# Good response. Find the allocation based on volume name.
allocation_tree = etree.fromstring(resp['content'])
@ -706,13 +687,11 @@ class XIOISEDriver(object):
if status == 201:
LOG.info(_LI("Volume %s presented."), volume['name'])
elif status == 409:
msg = (_LW("Volume %(name)s already presented (%(status)d)!") %
{'name': volume['name'], 'status': status})
LOG.warning(msg)
LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"),
{'name': volume['name'], 'status': status})
else:
msg = (_LE("Failed to present volume %(name)s (%(status)d)!") %
{'name': volume['name'], 'status': status})
LOG.error(msg)
LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"),
{'name': volume['name'], 'status': status})
RaiseXIODriverException()
# Fetch LUN. In theory the LUN should be what caller requested.
# We try to use shortcut as location comes back in Location header.
@ -725,8 +704,9 @@ class XIOISEDriver(object):
if location != '':
target_lun = self._find_target_lun(location)
# Success. Return target LUN.
LOG.debug("Volume %s presented: %s %s",
volume['name'], hostname, target_lun)
LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s",
{'volume': volume['name'], 'host': hostname,
'lun': target_lun})
return target_lun
def find_allocations(self, hostname):
@ -736,10 +716,9 @@ class XIOISEDriver(object):
resp = self._send_cmd('GET', url, {'hostname': hostname})
status = resp['status']
if status != 200:
msg = (_LE("Failed to get allocation information: "
"%(host)s (%(status)d)!") %
{'host': hostname, 'status': status})
LOG.error(msg)
LOG.error(_LE("Failed to get allocation information: "
"%(host)s (%(status)d)!"),
{'host': hostname, 'status': status})
RaiseXIODriverException()
# Good response. Count the number of allocations.
allocation_tree = etree.fromstring(resp['content'])
@ -771,8 +750,7 @@ class XIOISEDriver(object):
resp = self._send_cmd('GET', url, params)
status = resp['status']
if resp['status'] != 200:
msg = _LE("Could not find any hosts (%s)") % status
LOG.error(msg)
LOG.error(_LE("Could not find any hosts (%s)"), status)
RaiseXIODriverException()
# Good response. Try to match up a host based on end point string.
host_tree = etree.fromstring(resp['content'])
@ -820,7 +798,8 @@ class XIOISEDriver(object):
else:
endpoint_str = endpoints
# Log host creation.
LOG.debug("Create host %s; %s", hostname, endpoint_str)
LOG.debug("Create host %(host)s; %(endpoint)s",
{'host': hostname, 'endpoint': endpoint_str})
# Issue REST call to create host entry of Openstack type.
params = {}
params = {'name': hostname, 'endpoint': endpoint_str,
@ -829,8 +808,7 @@ class XIOISEDriver(object):
resp = self._send_cmd('POST', url, params)
status = resp['status']
if status != 201 and status != 409:
msg = _LE("POST for host create failed (%s)!") % status
LOG.error(msg)
LOG.error(_LE("POST for host create failed (%s)!"), status)
RaiseXIODriverException()
# Successfully created host entry. Return host name.
return hostname
@ -857,8 +835,7 @@ class XIOISEDriver(object):
if vol_info['value'] == '0':
LOG.debug('Source volume %s ready.', volume_name)
else:
msg = _LE("Source volume %s not ready!") % volume_name
LOG.error(msg)
LOG.error(_LE("Source volume %s not ready!"), volume_name)
RaiseXIODriverException()
# Prepare snapshot
# get extra_specs and qos specs from source volume
@ -866,7 +843,8 @@ class XIOISEDriver(object):
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
{'volume_name': volume['name'], 'extra_specs': extra_specs})
qos = self._get_qos_specs(ctxt, type_id)
# Wait until snapshot/clone is prepared.
args['method'] = 'POST'
@ -883,8 +861,7 @@ class XIOISEDriver(object):
args, retries)
if resp['status'] != 202:
# clone prepare failed - bummer
msg = _LE("Prepare clone failed for %s.") % clone['name']
LOG.error(msg)
LOG.error(_LE("Prepare clone failed for %s."), clone['name'])
RaiseXIODriverException()
# clone prepare request accepted
# make sure not to continue until clone prepared
@ -896,16 +873,14 @@ class XIOISEDriver(object):
if PREPARED_STATUS in clone_info['details']:
LOG.debug('Clone %s prepared.', clone['name'])
else:
msg = (_LE("Clone %s not in prepared state!") % clone['name'])
LOG.error(msg)
LOG.error(_LE("Clone %s not in prepared state!"), clone['name'])
RaiseXIODriverException()
# Clone prepared, now commit the create
resp = self._send_cmd('PUT', clone_info['location'],
{clone_type: 'true'})
if resp['status'] != 201:
msg = (_LE("Commit clone failed: %(name)s (%(status)d)!") %
{'name': clone['name'], 'status': resp['status']})
LOG.error(msg)
LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"),
{'name': clone['name'], 'status': resp['status']})
RaiseXIODriverException()
# Clone create request accepted. Make sure not to return until clone
# operational.
@ -915,11 +890,9 @@ class XIOISEDriver(object):
clone_info = self._wait_for_completion(self._help_wait_for_status,
args, retries)
if OPERATIONAL_STATUS in clone_info['string']:
msg = _LI("Clone %s created."), clone['name']
LOG.info(msg)
LOG.info(_LI("Clone %s created."), clone['name'])
else:
msg = _LE("Commit failed for %s!") % clone['name']
LOG.error(msg)
LOG.error(_LE("Commit failed for %s!"), clone['name'])
RaiseXIODriverException()
return
@ -983,8 +956,7 @@ class XIOISEDriver(object):
status = resp['status']
if status != 200:
# Request failed. Return what we have, which isn't much.
msg = _LW("Could not get pool information (%s)!") % status
LOG.warning(msg)
LOG.warning(_LW("Could not get pool information (%s)!"), status)
return (pools, vol_cnt)
# Parse out available (free) and used. Add them up to get total.
xml_tree = etree.fromstring(resp['content'])
@ -1085,9 +1057,9 @@ class XIOISEDriver(object):
"""Get volume stats."""
if refresh:
self._vol_stats = self._update_volume_stats()
LOG.debug("ISE get_volume_stats (total, free): %s, %s",
self._vol_stats['total_capacity_gb'],
self._vol_stats['free_capacity_gb'])
LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s",
{'total': self._vol_stats['total_capacity_gb'],
'free': self._vol_stats['free_capacity_gb']})
return self._vol_stats
def _get_extra_specs(self, ctxt, type_id):
@ -1158,7 +1130,8 @@ class XIOISEDriver(object):
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
extra_specs = self._get_extra_specs(ctxt, type_id)
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
{'volume_name': volume['name'], 'extra_specs': extra_specs})
qos = self._get_qos_specs(ctxt, type_id)
# Make create call
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
@ -1173,9 +1146,8 @@ class XIOISEDriver(object):
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if resp['status'] != 201:
msg = (_LE("Failed to create volume: %(name)s (%(status)s)") %
{'name': volume['name'], 'status': resp['status']})
LOG.error(msg)
LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"),
{'name': volume['name'], 'status': resp['status']})
RaiseXIODriverException()
# Good response. Make sure volume is in operational state before
# returning. Volume creation completes asynchronously.
@ -1187,11 +1159,9 @@ class XIOISEDriver(object):
args, retries)
if OPERATIONAL_STATUS in vol_info['string']:
# Ready.
msg = _LI("Volume %s created"), volume['name']
LOG.info(msg)
LOG.info(_LI("Volume %s created"), volume['name'])
else:
msg = _LE("Failed to create volume %s.") % volume['name']
LOG.error(msg)
LOG.error(_LE("Failed to create volume %s."), volume['name'])
RaiseXIODriverException()
return
@ -1223,8 +1193,7 @@ class XIOISEDriver(object):
# in response. Used for DELETE call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LW("Delete volume: %s not found!") % volume['name']
LOG.warning(msg)
LOG.warning(_LW("Delete volume: %s not found!"), volume['name'])
return
# Make DELETE call.
args = {}
@ -1235,8 +1204,7 @@ class XIOISEDriver(object):
retries = self.configuration.ise_completion_retries
resp = self._wait_for_completion(self._help_call_method, args, retries)
if resp['status'] == 204:
msg = (_LI("Volume %s deleted."), volume['name'])
LOG.info(msg)
LOG.info(_LI("Volume %s deleted."), volume['name'])
return
def delete_volume(self, volume):
@ -1255,8 +1223,7 @@ class XIOISEDriver(object):
# in response. Used for PUT call below.
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LE("modify volume: %s does not exist!") % volume['name']
LOG.error(msg)
LOG.error(_LE("modify volume: %s does not exist!"), volume['name'])
RaiseXIODriverException()
# Make modify volume REST call using PUT.
# Location from above is used as identifier.
@ -1265,9 +1232,8 @@ class XIOISEDriver(object):
if status == 201:
LOG.debug("Volume %s modified.", volume['name'])
return True
msg = (_LE("Modify volume PUT failed: %(name)s (%(status)d).") %
{'name': volume['name'], 'status': status})
LOG.error(msg)
LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."),
{'name': volume['name'], 'status': status})
RaiseXIODriverException()
def extend_volume(self, volume, new_size):
@ -1275,9 +1241,8 @@ class XIOISEDriver(object):
LOG.debug("extend_volume called")
ret = self._modify_volume(volume, {'size': new_size})
if ret is True:
msg = (_LI("volume %(name)s extended to %(size)d."),
{'name': volume['name'], 'size': new_size})
LOG.info(msg)
LOG.info(_LI("volume %(name)s extended to %(size)d."),
{'name': volume['name'], 'size': new_size})
return
def retype(self, ctxt, volume, new_type, diff, host):
@ -1288,16 +1253,14 @@ class XIOISEDriver(object):
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
msg = _LI("Volume %s retyped."), volume['name']
LOG.info(msg)
LOG.info(_LI("Volume %s retyped."), volume['name'])
return True
def manage_existing(self, volume, ise_volume_ref):
"""Convert an existing ISE volume to a Cinder volume."""
LOG.debug("X-IO manage_existing called")
if 'source-name' not in ise_volume_ref:
msg = _LE("manage_existing: No source-name in ref!")
LOG.error(msg)
LOG.error(_LE("manage_existing: No source-name in ref!"))
RaiseXIODriverException()
# copy the source-name to 'name' for modify volume use
ise_volume_ref['name'] = ise_volume_ref['source-name']
@ -1309,24 +1272,21 @@ class XIOISEDriver(object):
'IOPSmax': qos['maxIOPS'],
'IOPSburst': qos['burstIOPS']})
if ret is True:
msg = _LI("Volume %s converted."), ise_volume_ref['name']
LOG.info(msg)
LOG.info(_LI("Volume %s converted."), ise_volume_ref['name'])
return ret
def manage_existing_get_size(self, volume, ise_volume_ref):
"""Get size of an existing ISE volume."""
LOG.debug("X-IO manage_existing_get_size called")
if 'source-name' not in ise_volume_ref:
msg = _LE("manage_existing_get_size: No source-name in ref!")
LOG.error(msg)
LOG.error(_LE("manage_existing_get_size: No source-name in ref!"))
RaiseXIODriverException()
ref_name = ise_volume_ref['source-name']
# get volume status including size
vol_info = self._get_volume_info(ref_name)
if vol_info['location'] == '':
msg = (_LE("manage_existing_get_size: %s does not exist!") %
ref_name)
LOG.error(msg)
LOG.error(_LE("manage_existing_get_size: %s does not exist!"),
ref_name)
RaiseXIODriverException()
return int(vol_info['size'])
@ -1335,8 +1295,8 @@ class XIOISEDriver(object):
LOG.debug("X-IO unmanage called")
vol_info = self._get_volume_info(volume['name'])
if vol_info['location'] == '':
msg = _LE("unmanage: Volume %s does not exist!") % volume['name']
LOG.error(msg)
LOG.error(_LE("unmanage: Volume %s does not exist!"),
volume['name'])
RaiseXIODriverException()
# This is a noop. ISE does not store any Cinder specific information.
@ -1354,8 +1314,7 @@ class XIOISEDriver(object):
host = self._find_host(endpoints)
if host['name'] == '':
# host still not found, this is fatal.
msg = _LE("Host could not be found!")
LOG.error(msg)
LOG.error(_LE("Host could not be found!"))
RaiseXIODriverException()
elif string.upper(host['type']) != 'OPENSTACK':
# Make sure host type is marked as Openstack host
@ -1363,8 +1322,7 @@ class XIOISEDriver(object):
resp = self._send_cmd('PUT', host['locator'], params)
status = resp['status']
if status != 201 and status != 409:
msg = _LE("Host PUT failed (%s).") % status
LOG.error(msg)
LOG.error(_LE("Host PUT failed (%s)."), status)
RaiseXIODriverException()
# We have a host object.
target_lun = ''
@ -1422,8 +1380,7 @@ class XIOISEISCSIDriver(driver.ISCSIDriver):
# The iscsi_ip_address must always be set.
if self.configuration.iscsi_ip_address == '':
err_msg = _LE("iscsi_ip_address must be set!")
LOG.error(err_msg)
LOG.error(_LE("iscsi_ip_address must be set!"))
RaiseXIODriverException()
# Setup common driver
self.driver = XIOISEDriver(configuration=self.configuration)

View File

@ -92,8 +92,8 @@ class RestResult(object):
self.status = self.error.code
self.data = httplib.responses[self.status]
LOG.debug('Response code: %s' % self.status)
LOG.debug('Response data: %s' % self.data)
LOG.debug('Response code: %s', self.status)
LOG.debug('Response data: %s', self.data)
def get_header(self, name):
"""Get an HTTP header with the given name from the results
@ -177,7 +177,7 @@ class RestClientURL(object):
self.headers['x-auth-session'] = \
result.get_header('x-auth-session')
self.do_logout = True
LOG.info(_LI('ZFSSA version: %s') %
LOG.info(_LI('ZFSSA version: %s'),
result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND:
@ -268,35 +268,33 @@ class RestClientURL(object):
retry = 0
response = None
LOG.debug('Request: %s %s' % (request, zfssaurl))
LOG.debug('Out headers: %s' % out_hdrs)
LOG.debug('Request: %s %s', (request, zfssaurl))
LOG.debug('Out headers: %s', out_hdrs)
if body and body != '':
LOG.debug('Body: %s' % body)
LOG.debug('Body: %s', body)
while retry < maxreqretries:
try:
response = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as err:
if err.code == httplib.NOT_FOUND:
LOG.debug('REST Not Found: %s' % err.code)
LOG.debug('REST Not Found: %s', err.code)
else:
LOG.error(_LE('REST Not Available: %s') % err.code)
LOG.error(_LE('REST Not Available: %s'), err.code)
if err.code == httplib.SERVICE_UNAVAILABLE and \
retry < maxreqretries:
retry += 1
time.sleep(1)
LOG.error(_LE('Server Busy retry request: %s') % retry)
LOG.error(_LE('Server Busy retry request: %s'), retry)
continue
if (err.code == httplib.UNAUTHORIZED or
err.code == httplib.INTERNAL_SERVER_ERROR) and \
'/access/v1' not in zfssaurl:
try:
LOG.error(_LE('Authorizing request: '
'%(zfssaurl)s'
'retry: %(retry)d .')
% {'zfssaurl': zfssaurl,
'retry': retry})
LOG.error(_LE('Authorizing request: %(zfssaurl)s '
'retry: %(retry)d .'),
{'zfssaurl': zfssaurl, 'retry': retry})
self._authorize()
req.add_header('x-auth-session',
self.headers['x-auth-session'])
@ -309,7 +307,7 @@ class RestClientURL(object):
return RestResult(err=err)
except urllib2.URLError as err:
LOG.error(_LE('URLError: %s') % err.reason)
LOG.error(_LE('URLError: %s'), err.reason)
raise RestClientError(-1, name="ERR_URLError",
message=err.reason)

View File

@ -81,27 +81,24 @@ class ZFSSAWebDAVClient(object):
request.get_method = lambda: method
LOG.debug('Sending WebDAV request:%s %s %s' % (method, src_url,
dst_url))
LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
{'method': method, 'src': src_url, 'des': dst_url})
while retry < maxretries:
try:
response = urllib2.urlopen(request, timeout=None)
except urllib2.HTTPError as err:
LOG.error(_LE('WebDAV returned with %(code)s error during '
'%(method)s call.')
% {'code': err.code,
'method': method})
'%(method)s call.'),
{'code': err.code, 'method': method})
if err.code == httplib.INTERNAL_SERVER_ERROR:
exception_msg = (_('WebDAV operation failed with '
'error code: %(code)s '
'reason: %(reason)s '
'Retry attempt %(retry)s in progress.')
% {'code': err.code,
'reason': err.reason,
'retry': retry})
LOG.error(exception_msg)
LOG.error(_LE('WebDAV operation failed with error code: '
'%(code)s reason: %(reason)s Retry attempt '
'%(retry)s in progress.'),
{'code': err.code,
'reason': err.reason,
'retry': retry})
if retry < maxretries:
retry += 1
time.sleep(1)

View File

@ -146,8 +146,8 @@ class ZFSSANFSDriver(nfs.NfsDriver):
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
'/shares' + mountpoint
LOG.debug('NFS mount path: %s' % self.mount_path)
LOG.debug('WebDAV path to the share: %s' % https_path)
LOG.debug('NFS mount path: %s', self.mount_path)
LOG.debug('WebDAV path to the share: %s', https_path)
self.shares = {}
mnt_opts = self.configuration.zfssa_nfs_mount_options
@ -167,10 +167,10 @@ class ZFSSANFSDriver(nfs.NfsDriver):
try:
self._ensure_share_mounted(self.mount_path)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s.') % exc)
LOG.error(_LE('Exception during mounting %s.'), exc)
self._mounted_shares = [self.mount_path]
LOG.debug('Available shares %s' % self._mounted_shares)
LOG.debug('Available shares %s', self._mounted_shares)
def check_for_setup_error(self):
"""Check that driver can login.
@ -203,7 +203,7 @@ class ZFSSANFSDriver(nfs.NfsDriver):
snapshot['name'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug('Error thrown during snapshot: %s creation' %
LOG.debug('Error thrown during snapshot: %s creation',
snapshot['name'])
finally:
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
@ -232,17 +232,15 @@ class ZFSSANFSDriver(nfs.NfsDriver):
self.extend_volume(volume, volume['size'])
except Exception:
vol_path = self.local_path(volume)
exception_msg = (_('Error in extending volume size: '
'Volume: %(volume)s '
'Vol_Size: %(vol_size)d with '
'Snapshot: %(snapshot)s '
'Snap_Size: %(snap_size)d')
% {'volume': volume['name'],
'vol_size': volume['size'],
'snapshot': snapshot['name'],
'snap_size': snapshot['volume_size']})
with excutils.save_and_reraise_exception():
LOG.error(exception_msg)
LOG.error(_LE('Error in extending volume size: Volume: '
'%(volume)s Vol_Size: %(vol_size)d with '
'Snapshot: %(snapshot)s Snap_Size: '
'%(snap_size)d'),
{'volume': volume['name'],
'vol_size': volume['size'],
'snapshot': snapshot['name'],
'snap_size': snapshot['volume_size']})
self._execute('rm', '-f', vol_path, run_as_root=True)
return {'provider_location': volume['provider_location']}

View File

@ -89,12 +89,9 @@ class ZFSSAApi(object):
val = json.loads(ret.data)
if not self._is_pool_owned(val):
exception_msg = (_('Error Pool ownership: '
'Pool %(pool)s is not owned '
'by %(host)s.')
% {'pool': pool,
'host': self.host})
LOG.error(exception_msg)
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
'by %(host)s.'),
{'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
@ -464,20 +461,16 @@ class ZFSSAApi(object):
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Setting '
'Volume: %(lun)s to '
'InitiatorGroup: %(initiatorgroup)s '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'initiatorgroup': initiatorgroup,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
'%(initiatorgroup)s Pool: %(pool)s Project: '
'%(project)s Return code: %(ret.status)d Message: '
'%(ret.data)s.'),
{'lun': lun,
'initiatorgroup': initiatorgroup,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def delete_lun(self, pool, project, lun):
"""delete iscsi lun."""
@ -486,18 +479,14 @@ class ZFSSAApi(object):
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
'Project: %(project)s Return code: %(ret.status)d '
'Message: %(ret.data)s.'),
{'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
@ -633,9 +622,9 @@ class ZFSSAApi(object):
svc = "/api/san/v1/iscsi/initiator-groups"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
LOG.error(_LE('Error getting initiator groups.'))
exception_msg = (_('Error getting initiator groups.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
msg = _('Error getting initiator groups.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
val = json.loads(ret.data)
for initiator_group in val['groups']:
if initiator in initiator_group['initiators']:
@ -762,7 +751,8 @@ class ZFSSANfsApi(ZFSSAApi):
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('%s service state: %s' % (service, data))
LOG.debug('%(service)s service state: %(data)s',
{'service': service, 'data': data})
status = 'online' if state == 'enable' else 'disabled'
@ -833,9 +823,9 @@ class ZFSSANfsApi(ZFSSAApi):
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('Modify %(service)s service '
'return data: %(data)s'
% {'service': service,
'data': data})
'return data: %(data)s',
{'service': service,
'data': data})
def create_share(self, pool, project, share, args):
"""Create a share in the specified pool and project"""

View File

@ -209,7 +209,7 @@ class VolumeManager(manager.SchedulerDependentManager):
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
@ -226,7 +226,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
@ -241,10 +241,9 @@ class VolumeManager(manager.SchedulerDependentManager):
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception as err:
LOG.error(_LE('Fetch volume pool name failed.'),
resource=volume)
LOG.exception(err)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
@ -295,17 +294,16 @@ class VolumeManager(manager.SchedulerDependentManager):
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
LOG.exception(ex)
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
@ -325,11 +323,10 @@ class VolumeManager(manager.SchedulerDependentManager):
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
LOG.exception(export_ex)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
@ -357,10 +354,9 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
except Exception as ex:
LOG.error(_LE("Error during re-export on driver init."),
resource=volume)
LOG.exception(ex)
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
@ -431,9 +427,7 @@ class VolumeManager(manager.SchedulerDependentManager):
cgsnapshot_id=cgsnapshot_id)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception((msg),
resource={'type': 'volume',
'id': volume_id})
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
if snapshot_id is not None:
@ -525,7 +519,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
if vol_utils.extract_host(volume_ref['host']) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
@ -673,7 +667,7 @@ class VolumeManager(manager.SchedulerDependentManager):
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = 'error'
snapshot.save(context)
raise exception.MetadataCopyFailure(reason=ex)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = 'available'
snapshot.progress = '100%'
@ -759,13 +753,13 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
raise exception.InvalidVolume(
reason=_("volume is already attached"))
attachment = None
host_name_sanitized = utils.sanitize_hostname(
@ -915,7 +909,8 @@ class VolumeManager(manager.SchedulerDependentManager):
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id, reason=ex)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
@ -1063,10 +1058,10 @@ class VolumeManager(manager.SchedulerDependentManager):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=err)
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err))."), {'err': six.text_type(err)})
"(error: %(err)).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
@ -1085,7 +1080,7 @@ class VolumeManager(manager.SchedulerDependentManager):
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=ex)
raise exception.ExportFailure(reason=six.text_type(ex))
initiator_data = self._get_driver_initiator_data(context, connector)
try:
@ -1098,7 +1093,7 @@ class VolumeManager(manager.SchedulerDependentManager):
connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s)."), {'err': six.text_type(err)})
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
@ -1271,9 +1266,8 @@ class VolumeManager(manager.SchedulerDependentManager):
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg, {'vol1': volume['id'],
'vol2': new_volume['id']})
LOG.error(_LE("Failed to copy volume %(vol1)s to %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
@ -1337,9 +1331,9 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
@ -1347,10 +1341,9 @@ class VolumeManager(manager.SchedulerDependentManager):
orig_volume_status = self._get_original_status(volume)
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
@ -1367,8 +1360,8 @@ class VolumeManager(manager.SchedulerDependentManager):
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _LE("Delete migration source volume failed: %(err)s")
LOG.error(msg, {'err': six.text_type(ex)}, resource=volume)
LOG.error(_LE("Delete migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
@ -1688,12 +1681,11 @@ class VolumeManager(manager.SchedulerDependentManager):
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
except Exception:
retyped = False
LOG.error(_LE("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume_ref['id'])
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
@ -1758,11 +1750,9 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_id,
ref)
except Exception:
LOG.exception(_LE("Failed to create manage_existing flow."),
resource={'type': 'volume',
'id': volume_id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@ -2072,7 +2062,7 @@ class VolumeManager(manager.SchedulerDependentManager):
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.") %
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
@ -2080,7 +2070,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
@ -2423,13 +2413,14 @@ class VolumeManager(manager.SchedulerDependentManager):
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',