Fix missing translations for log messages
Fixed log translations in volume.drivers and volume.manager modules for error, info and warning messages with appropriate marker function according to the logging standards [1]. As LOG.warn has deprecated [2] so I have changed LOG.warn to LOG.warning. [1] http://docs.openstack.org/developer/oslo.i18n/guidelines.html [2] http://bugs.python.org/issue13235 Note: Any new occurrences added in master during the review can be handled separately if they are not caught by hacking checks. Partial-Bug: 1431256 Change-Id: I9b7b89097b296ea62b43f1d948dcf59e2e5a40c4
This commit is contained in:
parent
86b384ca2e
commit
a09c4e1747
|
@ -214,9 +214,7 @@ def validate_log_translations(logical_line, filename):
|
||||||
# TODO(smcginnis): The following is temporary as a series
|
# TODO(smcginnis): The following is temporary as a series
|
||||||
# of patches are done to address these issues. It should be
|
# of patches are done to address these issues. It should be
|
||||||
# removed completely when bug 1433216 is closed.
|
# removed completely when bug 1433216 is closed.
|
||||||
ignore_dirs = [
|
ignore_dirs = ["cinder/openstack"]
|
||||||
"cinder/openstack",
|
|
||||||
"cinder/volume"]
|
|
||||||
for directory in ignore_dirs:
|
for directory in ignore_dirs:
|
||||||
if directory in filename:
|
if directory in filename:
|
||||||
return
|
return
|
||||||
|
|
|
@ -632,13 +632,13 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||||
self._driver._refresh_mounts()
|
self._driver._refresh_mounts()
|
||||||
|
|
||||||
self.assertTrue(mock_unmount_shares.called)
|
self.assertTrue(mock_unmount_shares.called)
|
||||||
self.assertTrue(mock_logger.warn.called)
|
self.assertTrue(mock_logger.warning.called)
|
||||||
self.assertTrue(mock_ensure_shares_mounted.called)
|
self.assertTrue(mock_ensure_shares_mounted.called)
|
||||||
|
|
||||||
mock_unmount_shares.reset_mock()
|
mock_unmount_shares.reset_mock()
|
||||||
mock_ensure_shares_mounted.reset_mock()
|
mock_ensure_shares_mounted.reset_mock()
|
||||||
mock_logger.reset_mock()
|
mock_logger.reset_mock()
|
||||||
mock_logger.warn.reset_mock()
|
mock_logger.warning.reset_mock()
|
||||||
|
|
||||||
mock_stderr = _("umount: <mnt_path>: some other error")
|
mock_stderr = _("umount: <mnt_path>: some other error")
|
||||||
mock_unmount_shares.side_effect = \
|
mock_unmount_shares.side_effect = \
|
||||||
|
|
|
@ -1201,9 +1201,11 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
|
||||||
with mock.patch.object(drv, '_ensure_shares_mounted'):
|
with mock.patch.object(drv, '_ensure_shares_mounted'):
|
||||||
with mock.patch.object(drv, '_do_create_volume'):
|
with mock.patch.object(drv, '_do_create_volume'):
|
||||||
self._driver.create_volume(FakeVolume(host, 1))
|
self._driver.create_volume(FakeVolume(host, 1))
|
||||||
warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
|
warn_msg = ('Extra spec %(old)s is obsolete. Use %(new)s '
|
||||||
'Use netapp_raid_type instead.'
|
'instead.')
|
||||||
utils.LOG.warning.assert_called_once_with(warn_msg)
|
utils.LOG.warning.assert_called_once_with(
|
||||||
|
warn_msg, {'new': 'netapp_raid_type',
|
||||||
|
'old': 'netapp:raid_type'})
|
||||||
|
|
||||||
@mock.patch.object(utils, 'LOG', mock.Mock())
|
@mock.patch.object(utils, 'LOG', mock.Mock())
|
||||||
def test_create_volume_deprecated_extra_spec(self):
|
def test_create_volume_deprecated_extra_spec(self):
|
||||||
|
@ -1220,9 +1222,11 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
|
||||||
with mock.patch.object(drv, '_ensure_shares_mounted'):
|
with mock.patch.object(drv, '_ensure_shares_mounted'):
|
||||||
with mock.patch.object(drv, '_do_create_volume'):
|
with mock.patch.object(drv, '_do_create_volume'):
|
||||||
self._driver.create_volume(FakeVolume(host, 1))
|
self._driver.create_volume(FakeVolume(host, 1))
|
||||||
warn_msg = 'Extra spec netapp_thick_provisioned is ' \
|
warn_msg = ('Extra spec %(old)s is deprecated. Use %(new)s '
|
||||||
'deprecated. Use netapp_thin_provisioned instead.'
|
'instead.')
|
||||||
utils.LOG.warning.assert_called_once_with(warn_msg)
|
utils.LOG.warning.assert_called_once_with(
|
||||||
|
warn_msg, {'new': 'netapp_thin_provisioned',
|
||||||
|
'old': 'netapp_thick_provisioned'})
|
||||||
|
|
||||||
def test_create_volume_no_pool_specified(self):
|
def test_create_volume_no_pool_specified(self):
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
|
@ -119,7 +119,7 @@ class RemoteFsDriverTestCase(test.TestCase):
|
||||||
|
|
||||||
drv._set_rw_permissions(self.TEST_FILE_NAME)
|
drv._set_rw_permissions(self.TEST_FILE_NAME)
|
||||||
|
|
||||||
self.assertFalse(LOG.warn.called)
|
self.assertFalse(LOG.warning.called)
|
||||||
|
|
||||||
@mock.patch.object(remotefs, 'LOG')
|
@mock.patch.object(remotefs, 'LOG')
|
||||||
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
|
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
|
||||||
|
@ -129,10 +129,10 @@ class RemoteFsDriverTestCase(test.TestCase):
|
||||||
|
|
||||||
drv._set_rw_permissions(self.TEST_FILE_NAME)
|
drv._set_rw_permissions(self.TEST_FILE_NAME)
|
||||||
|
|
||||||
self.assertTrue(LOG.warn.called)
|
self.assertTrue(LOG.warning.called)
|
||||||
warn_msg = "%s is being set with open permissions: ugo+rw" % \
|
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
|
||||||
self.TEST_FILE_NAME
|
LOG.warning.assert_called_once_with(
|
||||||
LOG.warn.assert_called_once_with(warn_msg)
|
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
|
||||||
|
|
||||||
@mock.patch('os.path.join')
|
@mock.patch('os.path.join')
|
||||||
@mock.patch('os.path.isfile', return_value=False)
|
@mock.patch('os.path.isfile', return_value=False)
|
||||||
|
@ -309,7 +309,7 @@ class RemoteFsDriverTestCase(test.TestCase):
|
||||||
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
|
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
|
||||||
self.assertEqual('false',
|
self.assertEqual('false',
|
||||||
drv.configuration.nas_secure_file_permissions)
|
drv.configuration.nas_secure_file_permissions)
|
||||||
self.assertTrue(LOG.warn.called)
|
self.assertTrue(LOG.warning.called)
|
||||||
|
|
||||||
def test_secure_file_operations_enabled_true(self):
|
def test_secure_file_operations_enabled_true(self):
|
||||||
"""Test nas_secure_file_operations = 'true'
|
"""Test nas_secure_file_operations = 'true'
|
||||||
|
@ -1003,7 +1003,7 @@ class NfsDriverTestCase(test.TestCase):
|
||||||
|
|
||||||
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
|
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
|
||||||
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
|
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
|
||||||
self.assertFalse(LOG.warn.called)
|
self.assertFalse(LOG.warning.called)
|
||||||
|
|
||||||
@mock.patch.object(nfs, 'LOG')
|
@mock.patch.object(nfs, 'LOG')
|
||||||
def test_set_nas_security_options_when_false(self, LOG):
|
def test_set_nas_security_options_when_false(self, LOG):
|
||||||
|
@ -1027,7 +1027,7 @@ class NfsDriverTestCase(test.TestCase):
|
||||||
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
|
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
|
||||||
self.assertEqual('false',
|
self.assertEqual('false',
|
||||||
drv.configuration.nas_secure_file_permissions)
|
drv.configuration.nas_secure_file_permissions)
|
||||||
self.assertTrue(LOG.warn.called)
|
self.assertTrue(LOG.warning.called)
|
||||||
|
|
||||||
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
|
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
|
||||||
"""Ensure proper exception is raised if there are no mounted shares."""
|
"""Ensure proper exception is raised if there are no mounted shares."""
|
||||||
|
|
|
@ -195,8 +195,8 @@ class QuobyteDriverTestCase(test.TestCase):
|
||||||
mock_execute.assert_has_calls([mkdir_call, mount_call],
|
mock_execute.assert_has_calls([mkdir_call, mount_call],
|
||||||
any_order=False)
|
any_order=False)
|
||||||
|
|
||||||
mock_LOG.warn.assert_called_once_with('%s is already mounted',
|
mock_LOG.warning.assert_called_once_with('%s is already mounted',
|
||||||
self.TEST_QUOBYTE_VOLUME)
|
self.TEST_QUOBYTE_VOLUME)
|
||||||
|
|
||||||
def test_mount_quobyte_should_reraise_already_mounted_error(self):
|
def test_mount_quobyte_should_reraise_already_mounted_error(self):
|
||||||
"""Same as
|
"""Same as
|
||||||
|
|
|
@ -362,9 +362,9 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||||
'id': uuid.uuid4(),
|
'id': uuid.uuid4(),
|
||||||
'host': 'hostname@backend#vol1'})
|
'host': 'hostname@backend#vol1'})
|
||||||
|
|
||||||
warn_msg = 'Extra spec netapp:raid_type is obsolete. ' \
|
warn_msg = 'Extra spec %(old)s is obsolete. Use %(new)s instead.'
|
||||||
'Use netapp_raid_type instead.'
|
na_utils.LOG.warning.assert_called_once_with(
|
||||||
na_utils.LOG.warning.assert_called_once_with(warn_msg)
|
warn_msg, {'new': 'netapp_raid_type', 'old': 'netapp:raid_type'})
|
||||||
|
|
||||||
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
|
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
|
||||||
'_create_lun', mock.Mock())
|
'_create_lun', mock.Mock())
|
||||||
|
@ -383,9 +383,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||||
'id': uuid.uuid4(),
|
'id': uuid.uuid4(),
|
||||||
'host': 'hostname@backend#vol1'})
|
'host': 'hostname@backend#vol1'})
|
||||||
|
|
||||||
warn_msg = 'Extra spec netapp_thick_provisioned is deprecated. ' \
|
warn_msg = "Extra spec %(old)s is deprecated. Use %(new)s instead."
|
||||||
'Use netapp_thin_provisioned instead.'
|
na_utils.LOG.warning.assert_called_once_with(
|
||||||
na_utils.LOG.warning.assert_called_once_with(warn_msg)
|
warn_msg, {'new': 'netapp_thin_provisioned',
|
||||||
|
'old': 'netapp_thick_provisioned'})
|
||||||
|
|
||||||
@mock.patch.object(na_utils, 'check_flags')
|
@mock.patch.object(na_utils, 'check_flags')
|
||||||
def test_do_setup_san_configured(self, mock_check_flags):
|
def test_do_setup_san_configured(self, mock_check_flags):
|
||||||
|
|
|
@ -351,12 +351,12 @@ class BaseVD(object):
|
||||||
self.terminate_connection(volume, properties, force=force)
|
self.terminate_connection(volume, properties, force=force)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
err_msg = (_('Unable to terminate volume connection: %(err)s')
|
err_msg = (_('Unable to terminate volume connection: %(err)s')
|
||||||
% {'err': err})
|
% {'err': six.text_type(err)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.debug(("volume %s: removing export"), volume['id'])
|
LOG.debug("volume %s: removing export", volume['id'])
|
||||||
self.remove_export(context, volume)
|
self.remove_export(context, volume)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(_LE("Error detaching volume %(volume)s, "
|
LOG.exception(_LE("Error detaching volume %(volume)s, "
|
||||||
|
@ -390,7 +390,7 @@ class BaseVD(object):
|
||||||
cgroup_name)
|
cgroup_name)
|
||||||
except processutils.ProcessExecutionError as err:
|
except processutils.ProcessExecutionError as err:
|
||||||
LOG.warning(_LW('Failed to activate volume copy throttling: '
|
LOG.warning(_LW('Failed to activate volume copy throttling: '
|
||||||
'%(err)s'), {'err': six.text_type(err)})
|
'%(err)s'), {'err': err})
|
||||||
throttling.Throttle.set_default(self._throttle)
|
throttling.Throttle.set_default(self._throttle)
|
||||||
|
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
|
@ -482,8 +482,8 @@ class BaseVD(object):
|
||||||
|
|
||||||
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
|
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
|
||||||
"""Copy data from src_vol to dest_vol."""
|
"""Copy data from src_vol to dest_vol."""
|
||||||
LOG.debug(('copy_data_between_volumes %(src)s -> %(dest)s.')
|
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', {
|
||||||
% {'src': src_vol['name'], 'dest': dest_vol['name']})
|
'src': src_vol['name'], 'dest': dest_vol['name']})
|
||||||
|
|
||||||
use_multipath = self.configuration.use_multipath_for_image_xfer
|
use_multipath = self.configuration.use_multipath_for_image_xfer
|
||||||
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
||||||
|
@ -499,8 +499,8 @@ class BaseVD(object):
|
||||||
remote=dest_remote)
|
remote=dest_remote)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _("Failed to attach volume %(vol)s")
|
LOG.error(_LE("Failed to attach volume %(vol)s"),
|
||||||
LOG.error(msg % {'vol': dest_vol['id']})
|
{'vol': dest_vol['id']})
|
||||||
self.db.volume_update(context, dest_vol['id'],
|
self.db.volume_update(context, dest_vol['id'],
|
||||||
{'status': dest_orig_status})
|
{'status': dest_orig_status})
|
||||||
|
|
||||||
|
@ -513,8 +513,8 @@ class BaseVD(object):
|
||||||
remote=src_remote)
|
remote=src_remote)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _("Failed to attach volume %(vol)s")
|
LOG.error(_LE("Failed to attach volume %(vol)s"),
|
||||||
LOG.error(msg % {'vol': src_vol['id']})
|
{'vol': src_vol['id']})
|
||||||
self.db.volume_update(context, src_vol['id'],
|
self.db.volume_update(context, src_vol['id'],
|
||||||
{'status': src_orig_status})
|
{'status': src_orig_status})
|
||||||
self._detach_volume(context, dest_attach_info, dest_vol,
|
self._detach_volume(context, dest_attach_info, dest_vol,
|
||||||
|
@ -532,8 +532,8 @@ class BaseVD(object):
|
||||||
copy_error = False
|
copy_error = False
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _("Failed to copy volume %(src)s to %(dest)s.")
|
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
|
||||||
LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
|
{'src': src_vol['id'], 'dest': dest_vol['id']})
|
||||||
finally:
|
finally:
|
||||||
self._detach_volume(context, dest_attach_info, dest_vol,
|
self._detach_volume(context, dest_attach_info, dest_vol,
|
||||||
properties, force=copy_error,
|
properties, force=copy_error,
|
||||||
|
@ -544,7 +544,7 @@ class BaseVD(object):
|
||||||
|
|
||||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||||
"""Fetch the image from image_service and write it to the volume."""
|
"""Fetch the image from image_service and write it to the volume."""
|
||||||
LOG.debug(('copy_image_to_volume %s.') % volume['name'])
|
LOG.debug('copy_image_to_volume %s.', volume['name'])
|
||||||
|
|
||||||
use_multipath = self.configuration.use_multipath_for_image_xfer
|
use_multipath = self.configuration.use_multipath_for_image_xfer
|
||||||
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
||||||
|
@ -564,7 +564,7 @@ class BaseVD(object):
|
||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||||
"""Copy the volume to the specified image."""
|
"""Copy the volume to the specified image."""
|
||||||
LOG.debug(('copy_volume_to_image %s.') % volume['name'])
|
LOG.debug('copy_volume_to_image %s.', volume['name'])
|
||||||
|
|
||||||
use_multipath = self.configuration.use_multipath_for_image_xfer
|
use_multipath = self.configuration.use_multipath_for_image_xfer
|
||||||
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
||||||
|
@ -663,7 +663,7 @@ class BaseVD(object):
|
||||||
# clean this up in the future.
|
# clean this up in the future.
|
||||||
model_update = None
|
model_update = None
|
||||||
try:
|
try:
|
||||||
LOG.debug(("Volume %s: creating export"), volume['id'])
|
LOG.debug("Volume %s: creating export", volume['id'])
|
||||||
model_update = self.create_export(context, volume)
|
model_update = self.create_export(context, volume)
|
||||||
if model_update:
|
if model_update:
|
||||||
volume = self.db.volume_update(context, volume['id'],
|
volume = self.db.volume_update(context, volume['id'],
|
||||||
|
@ -672,7 +672,7 @@ class BaseVD(object):
|
||||||
if model_update:
|
if model_update:
|
||||||
LOG.exception(_LE("Failed updating model of volume "
|
LOG.exception(_LE("Failed updating model of volume "
|
||||||
"%(volume_id)s with driver provided "
|
"%(volume_id)s with driver provided "
|
||||||
"model %(model)s") %
|
"model %(model)s"),
|
||||||
{'volume_id': volume['id'],
|
{'volume_id': volume['id'],
|
||||||
'model': model_update})
|
'model': model_update})
|
||||||
raise exception.ExportFailure(reason=ex)
|
raise exception.ExportFailure(reason=ex)
|
||||||
|
@ -682,13 +682,15 @@ class BaseVD(object):
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
try:
|
try:
|
||||||
err_msg = (_('Unable to fetch connection information from '
|
err_msg = (_('Unable to fetch connection information from '
|
||||||
'backend: %(err)s') % {'err': err})
|
'backend: %(err)s') %
|
||||||
|
{'err': six.text_type(err)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
LOG.debug("Cleaning up failed connect initialization.")
|
LOG.debug("Cleaning up failed connect initialization.")
|
||||||
self.remove_export(context, volume)
|
self.remove_export(context, volume)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
ex_msg = (_('Error encountered during cleanup '
|
ex_msg = (_('Error encountered during cleanup '
|
||||||
'of a failed attach: %(ex)s') % {'ex': ex})
|
'of a failed attach: %(ex)s') %
|
||||||
|
{'ex': six.text_type(ex)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=ex_msg)
|
raise exception.VolumeBackendAPIException(data=ex_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
@ -728,8 +730,7 @@ class BaseVD(object):
|
||||||
"""Create a new backup from an existing volume."""
|
"""Create a new backup from an existing volume."""
|
||||||
volume = self.db.volume_get(context, backup['volume_id'])
|
volume = self.db.volume_get(context, backup['volume_id'])
|
||||||
|
|
||||||
LOG.debug(('Creating a new backup for volume %s.') %
|
LOG.debug('Creating a new backup for volume %s.', volume['name'])
|
||||||
volume['name'])
|
|
||||||
|
|
||||||
use_multipath = self.configuration.use_multipath_for_image_xfer
|
use_multipath = self.configuration.use_multipath_for_image_xfer
|
||||||
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
|
||||||
|
@ -755,7 +756,7 @@ class BaseVD(object):
|
||||||
def restore_backup(self, context, backup, volume, backup_service):
|
def restore_backup(self, context, backup, volume, backup_service):
|
||||||
"""Restore an existing backup to a new or existing volume."""
|
"""Restore an existing backup to a new or existing volume."""
|
||||||
LOG.debug(('Restoring backup %(backup)s to '
|
LOG.debug(('Restoring backup %(backup)s to '
|
||||||
'volume %(volume)s.') %
|
'volume %(volume)s.'),
|
||||||
{'backup': backup['id'],
|
{'backup': backup['id'],
|
||||||
'volume': volume['name']})
|
'volume': volume['name']})
|
||||||
|
|
||||||
|
@ -1391,8 +1392,8 @@ class ISCSIDriver(VolumeDriver):
|
||||||
def _do_iscsi_discovery(self, volume):
|
def _do_iscsi_discovery(self, volume):
|
||||||
# TODO(justinsb): Deprecate discovery and use stored info
|
# TODO(justinsb): Deprecate discovery and use stored info
|
||||||
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
||||||
LOG.warn(_LW("ISCSI provider_location not "
|
LOG.warning(_LW("ISCSI provider_location not "
|
||||||
"stored, using discovery"))
|
"stored, using discovery"))
|
||||||
|
|
||||||
volume_name = volume['name']
|
volume_name = volume['name']
|
||||||
|
|
||||||
|
@ -1405,9 +1406,9 @@ class ISCSIDriver(VolumeDriver):
|
||||||
volume['host'].split('@')[0],
|
volume['host'].split('@')[0],
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except processutils.ProcessExecutionError as ex:
|
except processutils.ProcessExecutionError as ex:
|
||||||
LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
|
LOG.error(_LE("ISCSI discovery attempt failed for:%s"),
|
||||||
volume['host'].split('@')[0])
|
volume['host'].split('@')[0])
|
||||||
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
|
LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
for target in out.splitlines():
|
for target in out.splitlines():
|
||||||
|
@ -1468,7 +1469,7 @@ class ISCSIDriver(VolumeDriver):
|
||||||
(volume['name']))
|
(volume['name']))
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
LOG.debug("ISCSI Discovery: Found %s" % (location))
|
LOG.debug("ISCSI Discovery: Found %s", location)
|
||||||
properties['target_discovered'] = True
|
properties['target_discovered'] = True
|
||||||
|
|
||||||
results = location.split(" ")
|
results = location.split(" ")
|
||||||
|
@ -1524,8 +1525,8 @@ class ISCSIDriver(VolumeDriver):
|
||||||
'-p', iscsi_properties['target_portal'],
|
'-p', iscsi_properties['target_portal'],
|
||||||
*iscsi_command, run_as_root=True,
|
*iscsi_command, run_as_root=True,
|
||||||
check_exit_code=check_exit_code)
|
check_exit_code=check_exit_code)
|
||||||
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
|
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
|
||||||
(iscsi_command, out, err))
|
{'command': iscsi_command, 'out': out, 'err': err})
|
||||||
return (out, err)
|
return (out, err)
|
||||||
|
|
||||||
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
|
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
|
||||||
|
@ -1534,8 +1535,8 @@ class ISCSIDriver(VolumeDriver):
|
||||||
*iscsi_command,
|
*iscsi_command,
|
||||||
run_as_root=True,
|
run_as_root=True,
|
||||||
check_exit_code=check_exit_code)
|
check_exit_code=check_exit_code)
|
||||||
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
|
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
|
||||||
(iscsi_command, out, err))
|
{'command': iscsi_command, 'out': out, 'err': err})
|
||||||
return (out, err)
|
return (out, err)
|
||||||
|
|
||||||
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
|
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
|
||||||
|
@ -1597,9 +1598,8 @@ class ISCSIDriver(VolumeDriver):
|
||||||
# iSCSI drivers require the initiator information
|
# iSCSI drivers require the initiator information
|
||||||
required = 'initiator'
|
required = 'initiator'
|
||||||
if required not in connector:
|
if required not in connector:
|
||||||
err_msg = (_LE('The volume driver requires %(data)s '
|
LOG.error(_LE('The volume driver requires %(data)s '
|
||||||
'in the connector.'), {'data': required})
|
'in the connector.'), {'data': required})
|
||||||
LOG.error(*err_msg)
|
|
||||||
raise exception.InvalidConnectorException(missing=required)
|
raise exception.InvalidConnectorException(missing=required)
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
def terminate_connection(self, volume, connector, **kwargs):
|
||||||
|
@ -1847,11 +1847,10 @@ class FibreChannelDriver(VolumeDriver):
|
||||||
def validate_connector_has_setting(connector, setting):
|
def validate_connector_has_setting(connector, setting):
|
||||||
"""Test for non-empty setting in connector."""
|
"""Test for non-empty setting in connector."""
|
||||||
if setting not in connector or not connector[setting]:
|
if setting not in connector or not connector[setting]:
|
||||||
msg = (_LE(
|
LOG.error(_LE(
|
||||||
"FibreChannelDriver validate_connector failed. "
|
"FibreChannelDriver validate_connector failed. "
|
||||||
"No '%(setting)s'. Make sure HBA state is Online."),
|
"No '%(setting)s'. Make sure HBA state is Online."),
|
||||||
{'setting': setting})
|
{'setting': setting})
|
||||||
LOG.error(*msg)
|
|
||||||
raise exception.InvalidConnectorException(missing=setting)
|
raise exception.InvalidConnectorException(missing=setting)
|
||||||
|
|
||||||
def get_volume_stats(self, refresh=False):
|
def get_volume_stats(self, refresh=False):
|
||||||
|
|
|
@ -61,7 +61,8 @@ class BlockDeviceDriver(driver.VolumeDriver):
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
device = self.find_appropriate_size_device(volume['size'])
|
device = self.find_appropriate_size_device(volume['size'])
|
||||||
LOG.info("Create %s on %s" % (volume['name'], device))
|
LOG.info(_LI("Create %(volume)s on %(device)s"),
|
||||||
|
{"volume": volume['name'], "device": device})
|
||||||
return {
|
return {
|
||||||
'provider_location': device,
|
'provider_location': device,
|
||||||
}
|
}
|
||||||
|
@ -103,7 +104,7 @@ class BlockDeviceDriver(driver.VolumeDriver):
|
||||||
self.local_path(volume))
|
self.local_path(volume))
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
|
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
|
||||||
device = self.find_appropriate_size_device(src_vref['size'])
|
device = self.find_appropriate_size_device(src_vref['size'])
|
||||||
volutils.copy_volume(
|
volutils.copy_volume(
|
||||||
self.local_path(src_vref), device,
|
self.local_path(src_vref), device,
|
||||||
|
|
|
@ -20,9 +20,10 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
import requests
|
import requests
|
||||||
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _, _LE, _LI, _LW
|
||||||
from cinder.openstack.common import versionutils
|
from cinder.openstack.common import versionutils
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume.drivers.san import san
|
from cinder.volume.drivers.san import san
|
||||||
|
@ -160,9 +161,9 @@ class DateraDriver(san.SanISCSIDriver):
|
||||||
try:
|
try:
|
||||||
self._issue_api_request('volumes', 'delete', volume['id'])
|
self._issue_api_request('volumes', 'delete', volume['id'])
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _("Tried to delete volume %s, but it was not found in the "
|
LOG.info(_LI("Tried to delete volume %s, but it was not found in "
|
||||||
"Datera cluster. Continuing with delete.")
|
"the Datera cluster. Continuing with delete."),
|
||||||
LOG.info(msg, volume['id'])
|
volume['id'])
|
||||||
|
|
||||||
def _do_export(self, context, volume):
|
def _do_export(self, context, volume):
|
||||||
"""Gets the associated account, retrieves CHAP info and updates."""
|
"""Gets the associated account, retrieves CHAP info and updates."""
|
||||||
|
@ -203,18 +204,17 @@ class DateraDriver(san.SanISCSIDriver):
|
||||||
self._issue_api_request('volumes', 'delete', resource=volume['id'],
|
self._issue_api_request('volumes', 'delete', resource=volume['id'],
|
||||||
action='export')
|
action='export')
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _("Tried to delete export for volume %s, but it was not "
|
LOG.info(_LI("Tried to delete export for volume %s, but it was "
|
||||||
"found in the Datera cluster. Continuing with volume "
|
"not found in the Datera cluster. Continuing with "
|
||||||
"detach")
|
"volume detach"), volume['id'])
|
||||||
LOG.info(msg, volume['id'])
|
|
||||||
|
|
||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
try:
|
try:
|
||||||
self._issue_api_request('snapshots', 'delete', snapshot['id'])
|
self._issue_api_request('snapshots', 'delete', snapshot['id'])
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
msg = _("Tried to delete snapshot %s, but was not found in Datera "
|
LOG.info(_LI("Tried to delete snapshot %s, but was not found in "
|
||||||
"cluster. Continuing with delete.")
|
"Datera cluster. Continuing with delete."),
|
||||||
LOG.info(msg, snapshot['id'])
|
snapshot['id'])
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
body = {
|
body = {
|
||||||
|
@ -244,7 +244,8 @@ class DateraDriver(san.SanISCSIDriver):
|
||||||
try:
|
try:
|
||||||
self._update_cluster_stats()
|
self._update_cluster_stats()
|
||||||
except exception.DateraAPIException:
|
except exception.DateraAPIException:
|
||||||
LOG.error('Failed to get updated stats from Datera cluster.')
|
LOG.error(_LE('Failed to get updated stats from Datera '
|
||||||
|
'cluster.'))
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return self.cluster_stats
|
return self.cluster_stats
|
||||||
|
@ -360,7 +361,7 @@ class DateraDriver(san.SanISCSIDriver):
|
||||||
verify=False, cert=cert_data)
|
verify=False, cert=cert_data)
|
||||||
except requests.exceptions.RequestException as ex:
|
except requests.exceptions.RequestException as ex:
|
||||||
msg = _('Failed to make a request to Datera cluster endpoint due '
|
msg = _('Failed to make a request to Datera cluster endpoint due '
|
||||||
'to the following reason: %s') % ex.message
|
'to the following reason: %s') % six.text_type(ex.message)
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.DateraAPIException(msg)
|
raise exception.DateraAPIException(msg)
|
||||||
|
|
||||||
|
|
|
@ -166,8 +166,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
|
||||||
conn_info = self.cli.initialize_connection(volume,
|
conn_info = self.cli.initialize_connection(volume,
|
||||||
connector)
|
connector)
|
||||||
LOG.debug("Exit initialize_connection"
|
LOG.debug("Exit initialize_connection"
|
||||||
" - Returning FC connection info: %(conn_info)s."
|
" - Returning FC connection info: %(conn_info)s.",
|
||||||
% {'conn_info': conn_info})
|
{'conn_info': conn_info})
|
||||||
return conn_info
|
return conn_info
|
||||||
|
|
||||||
@zm_utils.RemoveFCZone
|
@zm_utils.RemoveFCZone
|
||||||
|
@ -175,8 +175,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
|
||||||
"""Disallow connection from connector."""
|
"""Disallow connection from connector."""
|
||||||
conn_info = self.cli.terminate_connection(volume, connector)
|
conn_info = self.cli.terminate_connection(volume, connector)
|
||||||
LOG.debug("Exit terminate_connection"
|
LOG.debug("Exit terminate_connection"
|
||||||
" - Returning FC connection info: %(conn_info)s."
|
" - Returning FC connection info: %(conn_info)s.",
|
||||||
% {'conn_info': conn_info})
|
{'conn_info': conn_info})
|
||||||
return conn_info
|
return conn_info
|
||||||
|
|
||||||
def get_volume_stats(self, refresh=False):
|
def get_volume_stats(self, refresh=False):
|
||||||
|
@ -211,7 +211,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
|
||||||
'id':lun_id
|
'id':lun_id
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
LOG.debug("Reference lun id %s." % existing_ref['id'])
|
LOG.debug("Reference lun id %s.", existing_ref['id'])
|
||||||
self.cli.manage_existing(volume, existing_ref)
|
self.cli.manage_existing(volume, existing_ref)
|
||||||
|
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
|
|
|
@ -190,7 +190,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
|
||||||
'id':lun_id
|
'id':lun_id
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
LOG.debug("Reference lun id %s." % existing_ref['id'])
|
LOG.debug("Reference lun id %s.", existing_ref['id'])
|
||||||
self.cli.manage_existing(volume, existing_ref)
|
self.cli.manage_existing(volume, existing_ref)
|
||||||
|
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
|
|
|
@ -334,7 +334,7 @@ class EMCVMAXCommon(object):
|
||||||
device_number = device_info['hostlunid']
|
device_number = device_info['hostlunid']
|
||||||
if device_number is None:
|
if device_number is None:
|
||||||
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
|
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
|
||||||
(volumename))
|
volumename)
|
||||||
return
|
return
|
||||||
|
|
||||||
vol_instance = self._find_lun(volume)
|
vol_instance = self._find_lun(volume)
|
||||||
|
@ -444,7 +444,7 @@ class EMCVMAXCommon(object):
|
||||||
(self.masking
|
(self.masking
|
||||||
._check_if_rollback_action_for_masking_required(
|
._check_if_rollback_action_for_masking_required(
|
||||||
self.conn, rollbackDict))
|
self.conn, rollbackDict))
|
||||||
exception_message = ("Error Attaching volume %(vol)s."
|
exception_message = (_("Error Attaching volume %(vol)s.")
|
||||||
% {'vol': volumeName})
|
% {'vol': volumeName})
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exception_message)
|
data=exception_message)
|
||||||
|
@ -673,12 +673,12 @@ class EMCVMAXCommon(object):
|
||||||
:returns: boolean -- Always returns True
|
:returns: boolean -- Always returns True
|
||||||
:returns: dict -- Empty dict {}
|
:returns: dict -- Empty dict {}
|
||||||
"""
|
"""
|
||||||
LOG.warn(_LW("The VMAX plugin only supports Retype. "
|
LOG.warning(_LW("The VMAX plugin only supports Retype. "
|
||||||
"If a pool based migration is necessary "
|
"If a pool based migration is necessary "
|
||||||
"this will happen on a Retype "
|
"this will happen on a Retype "
|
||||||
"From the command line: "
|
"From the command line: "
|
||||||
"cinder --os-volume-api-version 2 retype "
|
"cinder --os-volume-api-version 2 retype <volumeId> "
|
||||||
"<volumeId> <volumeType> --migration-policy on-demand"))
|
"<volumeType> --migration-policy on-demand"))
|
||||||
return True, {}
|
return True, {}
|
||||||
|
|
||||||
def _migrate_volume(
|
def _migrate_volume(
|
||||||
|
@ -710,7 +710,7 @@ class EMCVMAXCommon(object):
|
||||||
if moved is False and sourceFastPolicyName is not None:
|
if moved is False and sourceFastPolicyName is not None:
|
||||||
# Return the volume to the default source fast policy storage
|
# Return the volume to the default source fast policy storage
|
||||||
# group because the migrate was unsuccessful.
|
# group because the migrate was unsuccessful.
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Failed to migrate: %(volumeName)s from "
|
"Failed to migrate: %(volumeName)s from "
|
||||||
"default source storage group "
|
"default source storage group "
|
||||||
"for FAST policy: %(sourceFastPolicyName)s. "
|
"for FAST policy: %(sourceFastPolicyName)s. "
|
||||||
|
@ -738,7 +738,7 @@ class EMCVMAXCommon(object):
|
||||||
if not self._migrate_volume_fast_target(
|
if not self._migrate_volume_fast_target(
|
||||||
volumeInstance, storageSystemName,
|
volumeInstance, storageSystemName,
|
||||||
targetFastPolicyName, volumeName, extraSpecs):
|
targetFastPolicyName, volumeName, extraSpecs):
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Attempting a rollback of: %(volumeName)s to "
|
"Attempting a rollback of: %(volumeName)s to "
|
||||||
"original pool %(sourcePoolInstanceName)s."),
|
"original pool %(sourcePoolInstanceName)s."),
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
|
@ -770,8 +770,8 @@ class EMCVMAXCommon(object):
|
||||||
:param extraSpecs: extra specifications
|
:param extraSpecs: extra specifications
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warn(_LW("_migrate_rollback on : %(volumeName)s."),
|
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
|
||||||
storageRelocationService = self.utils.find_storage_relocation_service(
|
storageRelocationService = self.utils.find_storage_relocation_service(
|
||||||
conn, storageSystemName)
|
conn, storageSystemName)
|
||||||
|
@ -805,8 +805,8 @@ class EMCVMAXCommon(object):
|
||||||
:param extraSpecs: extra specifications
|
:param extraSpecs: extra specifications
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.warn(_LW("_migrate_cleanup on : %(volumeName)s."),
|
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
|
||||||
controllerConfigurationService = (
|
controllerConfigurationService = (
|
||||||
self.utils.find_controller_configuration_service(
|
self.utils.find_controller_configuration_service(
|
||||||
|
@ -934,11 +934,10 @@ class EMCVMAXCommon(object):
|
||||||
rc = self.provision.migrate_volume_to_storage_pool(
|
rc = self.provision.migrate_volume_to_storage_pool(
|
||||||
self.conn, storageRelocationService, volumeInstance.path,
|
self.conn, storageRelocationService, volumeInstance.path,
|
||||||
targetPoolInstanceName, extraSpecs)
|
targetPoolInstanceName, extraSpecs)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# Rollback by deleting the volume if adding the volume to the
|
# Rollback by deleting the volume if adding the volume to the
|
||||||
# default storage group were to fail.
|
# default storage group were to fail.
|
||||||
LOG.error(_LE("Exception: %s"), e)
|
LOG.exception(_LE(
|
||||||
LOG.error(_LE(
|
|
||||||
"Error migrating volume: %(volumename)s. "
|
"Error migrating volume: %(volumename)s. "
|
||||||
"to target pool %(targetPoolName)s."),
|
"to target pool %(targetPoolName)s."),
|
||||||
{'volumename': volumeName,
|
{'volumename': volumeName,
|
||||||
|
@ -993,8 +992,7 @@ class EMCVMAXCommon(object):
|
||||||
conn, controllerConfigurationService,
|
conn, controllerConfigurationService,
|
||||||
volumeInstance.path, volumeName, sourceFastPolicyName,
|
volumeInstance.path, volumeName, sourceFastPolicyName,
|
||||||
extraSpecs))
|
extraSpecs))
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s"), ex)
|
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Failed to remove: %(volumename)s. "
|
"Failed to remove: %(volumename)s. "
|
||||||
"from the default storage group for "
|
"from the default storage group for "
|
||||||
|
@ -1002,11 +1000,11 @@ class EMCVMAXCommon(object):
|
||||||
% {'volumename': volumeName,
|
% {'volumename': volumeName,
|
||||||
'fastPolicyName': sourceFastPolicyName})
|
'fastPolicyName': sourceFastPolicyName})
|
||||||
|
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"The volume: %(volumename)s "
|
"The volume: %(volumename)s "
|
||||||
"was not first part of the default storage "
|
"was not first part of the default storage "
|
||||||
"group for FAST policy %(fastPolicyName)s."),
|
"group for FAST policy %(fastPolicyName)s."),
|
||||||
|
@ -1098,7 +1096,7 @@ class EMCVMAXCommon(object):
|
||||||
self.utils.get_storage_group_from_volume(
|
self.utils.get_storage_group_from_volume(
|
||||||
self.conn, volumeInstanceName))
|
self.conn, volumeInstanceName))
|
||||||
if foundStorageGroupInstanceName is None:
|
if foundStorageGroupInstanceName is None:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume: %(volumeName)s is not currently "
|
"Volume: %(volumeName)s is not currently "
|
||||||
"belonging to any storage group."),
|
"belonging to any storage group."),
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
@ -1484,13 +1482,12 @@ class EMCVMAXCommon(object):
|
||||||
_rc, targetEndpoints = (
|
_rc, targetEndpoints = (
|
||||||
self.provision.get_target_endpoints(
|
self.provision.get_target_endpoints(
|
||||||
self.conn, storageHardwareService, hardwareIdInstance))
|
self.conn, storageHardwareService, hardwareIdInstance))
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s"), ex)
|
|
||||||
errorMessage = (_(
|
errorMessage = (_(
|
||||||
"Unable to get target endpoints for hardwareId "
|
"Unable to get target endpoints for hardwareId "
|
||||||
"%(hardwareIdInstance)s.")
|
"%(hardwareIdInstance)s.")
|
||||||
% {'hardwareIdInstance': hardwareIdInstance})
|
% {'hardwareIdInstance': hardwareIdInstance})
|
||||||
LOG.error(errorMessage)
|
LOG.exception(errorMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=errorMessage)
|
raise exception.VolumeBackendAPIException(data=errorMessage)
|
||||||
|
|
||||||
if targetEndpoints:
|
if targetEndpoints:
|
||||||
|
@ -1795,14 +1792,13 @@ class EMCVMAXCommon(object):
|
||||||
LOG.error(exceptionMessage)
|
LOG.error(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# Rollback by deleting the volume if adding the volume to the
|
# Rollback by deleting the volume if adding the volume to the
|
||||||
# default storage group were to fail.
|
# default storage group were to fail.
|
||||||
LOG.error(_LE("Exception: %s"), e)
|
|
||||||
errorMessage = (_(
|
errorMessage = (_(
|
||||||
"Rolling back %(volumeName)s by deleting it.")
|
"Rolling back %(volumeName)s by deleting it.")
|
||||||
% {'volumeName': volumeName})
|
% {'volumeName': volumeName})
|
||||||
LOG.error(errorMessage)
|
LOG.exception(errorMessage)
|
||||||
self.provision.delete_volume_from_pool(
|
self.provision.delete_volume_from_pool(
|
||||||
self.conn, storageConfigService, volumeInstance.path,
|
self.conn, storageConfigService, volumeInstance.path,
|
||||||
volumeName, extraSpecs)
|
volumeName, extraSpecs)
|
||||||
|
@ -2126,7 +2122,7 @@ class EMCVMAXCommon(object):
|
||||||
self.masking.get_associated_masking_groups_from_device(
|
self.masking.get_associated_masking_groups_from_device(
|
||||||
self.conn, volumeInstanceName))
|
self.conn, volumeInstanceName))
|
||||||
if storageGroupInstanceNames:
|
if storageGroupInstanceNames:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Pre check for deletion. "
|
"Pre check for deletion. "
|
||||||
"Volume: %(volumeName)s is part of a storage group. "
|
"Volume: %(volumeName)s is part of a storage group. "
|
||||||
"Attempting removal from %(storageGroupInstanceNames)s."),
|
"Attempting removal from %(storageGroupInstanceNames)s."),
|
||||||
|
@ -2289,10 +2285,9 @@ class EMCVMAXCommon(object):
|
||||||
repservice = self.utils.find_replication_service(self.conn,
|
repservice = self.utils.find_replication_service(self.conn,
|
||||||
storageSystem)
|
storageSystem)
|
||||||
if repservice is None:
|
if repservice is None:
|
||||||
exception_message = (_LE(
|
exception_message = _(
|
||||||
"Cannot find Replication Service to"
|
"Cannot find Replication Service to"
|
||||||
" delete snapshot %s.") %
|
" delete snapshot %s.") % snapshotname
|
||||||
snapshotname)
|
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exception_message)
|
data=exception_message)
|
||||||
# Break the replication relationship
|
# Break the replication relationship
|
||||||
|
@ -2339,12 +2334,11 @@ class EMCVMAXCommon(object):
|
||||||
self.conn, storageSystem)
|
self.conn, storageSystem)
|
||||||
self.provision.create_consistency_group(
|
self.provision.create_consistency_group(
|
||||||
self.conn, replicationService, cgName, extraSpecs)
|
self.conn, replicationService, cgName, extraSpecs)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
|
|
||||||
exceptionMessage = (_("Failed to create consistency group:"
|
exceptionMessage = (_("Failed to create consistency group:"
|
||||||
" %(cgName)s.")
|
" %(cgName)s.")
|
||||||
% {'cgName': cgName})
|
% {'cgName': cgName})
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
return modelUpdate
|
return modelUpdate
|
||||||
|
@ -2402,12 +2396,11 @@ class EMCVMAXCommon(object):
|
||||||
storageSystem, memberInstanceNames, storageConfigservice,
|
storageSystem, memberInstanceNames, storageConfigservice,
|
||||||
volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
|
volumes, modelUpdate, extraSpecs[ISV3], extraSpecs)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s"), ex)
|
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Failed to delete consistency group: %(cgName)s.")
|
"Failed to delete consistency group: %(cgName)s.")
|
||||||
% {'cgName': cgName})
|
% {'cgName': cgName})
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
return modelUpdate, volumes
|
return modelUpdate, volumes
|
||||||
|
@ -2574,15 +2567,14 @@ class EMCVMAXCommon(object):
|
||||||
rgSyncInstanceName,
|
rgSyncInstanceName,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
modelUpdate['status'] = 'error'
|
modelUpdate['status'] = 'error'
|
||||||
self.utils.populate_cgsnapshot_status(
|
self.utils.populate_cgsnapshot_status(
|
||||||
context, db, cgsnapshot['id'], modelUpdate['status'])
|
context, db, cgsnapshot['id'], modelUpdate['status'])
|
||||||
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
|
|
||||||
exceptionMessage = (_("Failed to create snapshot for cg:"
|
exceptionMessage = (_("Failed to create snapshot for cg:"
|
||||||
" %(cgName)s.")
|
" %(cgName)s.")
|
||||||
% {'cgName': cgName})
|
% {'cgName': cgName})
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
snapshots = self.utils.populate_cgsnapshot_status(
|
snapshots = self.utils.populate_cgsnapshot_status(
|
||||||
|
@ -2623,15 +2615,14 @@ class EMCVMAXCommon(object):
|
||||||
modelUpdate, snapshots = self._delete_cg_and_members(
|
modelUpdate, snapshots = self._delete_cg_and_members(
|
||||||
storageSystem, targetCgName, modelUpdate,
|
storageSystem, targetCgName, modelUpdate,
|
||||||
snapshots, extraSpecs)
|
snapshots, extraSpecs)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
modelUpdate['status'] = 'error_deleting'
|
modelUpdate['status'] = 'error_deleting'
|
||||||
self.utils.populate_cgsnapshot_status(
|
self.utils.populate_cgsnapshot_status(
|
||||||
context, db, cgsnapshot['id'], modelUpdate['status'])
|
context, db, cgsnapshot['id'], modelUpdate['status'])
|
||||||
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
|
|
||||||
exceptionMessage = (_("Failed to delete snapshot for cg: "
|
exceptionMessage = (_("Failed to delete snapshot for cg: "
|
||||||
"%(cgId)s.")
|
"%(cgId)s.")
|
||||||
% {'cgId': cgsnapshot['consistencygroup_id']})
|
% {'cgId': cgsnapshot['consistencygroup_id']})
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
raise exception.VolumeBackendAPIException(data=exceptionMessage)
|
||||||
|
|
||||||
snapshots = self.utils.populate_cgsnapshot_status(
|
snapshots = self.utils.populate_cgsnapshot_status(
|
||||||
|
@ -2819,7 +2810,7 @@ class EMCVMAXCommon(object):
|
||||||
extraSpecs))
|
extraSpecs))
|
||||||
if not self.utils.is_in_range(
|
if not self.utils.is_in_range(
|
||||||
volumeSize, maximumVolumeSize, minimumVolumeSize):
|
volumeSize, maximumVolumeSize, minimumVolumeSize):
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume: %(volume)s with size: %(volumeSize)s bits "
|
"Volume: %(volume)s with size: %(volumeSize)s bits "
|
||||||
"is not in the Performance Capacity range: "
|
"is not in the Performance Capacity range: "
|
||||||
"%(minimumVolumeSize)s-%(maximumVolumeSize)s bits. "
|
"%(minimumVolumeSize)s-%(maximumVolumeSize)s bits. "
|
||||||
|
@ -3012,7 +3003,7 @@ class EMCVMAXCommon(object):
|
||||||
self.utils.get_storage_group_from_volume(
|
self.utils.get_storage_group_from_volume(
|
||||||
self.conn, volumeInstance.path))
|
self.conn, volumeInstance.path))
|
||||||
if foundStorageGroupInstanceName is None:
|
if foundStorageGroupInstanceName is None:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume : %(volumeName)s is not currently "
|
"Volume : %(volumeName)s is not currently "
|
||||||
"belonging to any storage group."),
|
"belonging to any storage group."),
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
@ -3314,7 +3305,7 @@ class EMCVMAXCommon(object):
|
||||||
volumeInstance.path, volumeName, fastPolicyName,
|
volumeInstance.path, volumeName, fastPolicyName,
|
||||||
extraSpecs))
|
extraSpecs))
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"The volume: %(volumename)s. was not first part of the "
|
"The volume: %(volumename)s. was not first part of the "
|
||||||
"default storage group for FAST policy %(fastPolicyName)s"
|
"default storage group for FAST policy %(fastPolicyName)s"
|
||||||
"."),
|
"."),
|
||||||
|
@ -3343,7 +3334,7 @@ class EMCVMAXCommon(object):
|
||||||
self.conn, storageConfigService, volumeInstance.path,
|
self.conn, storageConfigService, volumeInstance.path,
|
||||||
volumeName, extraSpecs)
|
volumeName, extraSpecs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# If we cannot successfully delete the volume then we want to
|
# If we cannot successfully delete the volume then we want to
|
||||||
# return the volume to the default storage group.
|
# return the volume to the default storage group.
|
||||||
if (fastPolicyName is not None and
|
if (fastPolicyName is not None and
|
||||||
|
@ -3365,10 +3356,9 @@ class EMCVMAXCommon(object):
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
'fastPolicyName': fastPolicyName})
|
'fastPolicyName': fastPolicyName})
|
||||||
|
|
||||||
LOG.error(_LE("Exception: %s."), e)
|
|
||||||
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
|
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
LOG.error(errorMessage)
|
LOG.exception(errorMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=errorMessage)
|
raise exception.VolumeBackendAPIException(data=errorMessage)
|
||||||
|
|
||||||
return rc
|
return rc
|
||||||
|
@ -3410,7 +3400,7 @@ class EMCVMAXCommon(object):
|
||||||
self.conn, storageConfigService, volumeInstance.path,
|
self.conn, storageConfigService, volumeInstance.path,
|
||||||
volumeName, extraSpecs)
|
volumeName, extraSpecs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# If we cannot successfully delete the volume, then we want to
|
# If we cannot successfully delete the volume, then we want to
|
||||||
# return the volume to the default storage group,
|
# return the volume to the default storage group,
|
||||||
# which should be the SG it previously belonged to.
|
# which should be the SG it previously belonged to.
|
||||||
|
@ -3432,10 +3422,9 @@ class EMCVMAXCommon(object):
|
||||||
storageGroupInstanceName, volumeInstance, volumeName,
|
storageGroupInstanceName, volumeInstance, volumeName,
|
||||||
storageGroupName, extraSpecs)
|
storageGroupName, extraSpecs)
|
||||||
|
|
||||||
LOG.error(_LE("Exception: %s."), e)
|
|
||||||
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
|
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
LOG.error(errorMessage)
|
LOG.exception(errorMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=errorMessage)
|
raise exception.VolumeBackendAPIException(data=errorMessage)
|
||||||
|
|
||||||
return rc
|
return rc
|
||||||
|
|
|
@ -135,7 +135,7 @@ class EMCVMAXFast(object):
|
||||||
foundDefaultStorageGroupInstanceName = (
|
foundDefaultStorageGroupInstanceName = (
|
||||||
assocStorageGroupInstanceName)
|
assocStorageGroupInstanceName)
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume: %(volumeName)s Does not belong "
|
"Volume: %(volumeName)s Does not belong "
|
||||||
"to storage storage group %(defaultSgGroupName)s."),
|
"to storage storage group %(defaultSgGroupName)s."),
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
|
@ -406,7 +406,7 @@ class EMCVMAXFast(object):
|
||||||
|
|
||||||
if len(storageTierInstanceNames) == 0:
|
if len(storageTierInstanceNames) == 0:
|
||||||
storageTierInstanceNames = None
|
storageTierInstanceNames = None
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Unable to get storage tiers from tier policy rule."))
|
"Unable to get storage tiers from tier policy rule."))
|
||||||
|
|
||||||
return storageTierInstanceNames
|
return storageTierInstanceNames
|
||||||
|
@ -519,9 +519,8 @@ class EMCVMAXFast(object):
|
||||||
conn, tierPolicyServiceInstanceName,
|
conn, tierPolicyServiceInstanceName,
|
||||||
storageGroupInstanceName, tierPolicyRuleInstanceName,
|
storageGroupInstanceName, tierPolicyRuleInstanceName,
|
||||||
storageGroupName, fastPolicyName, extraSpecs)
|
storageGroupName, fastPolicyName, extraSpecs)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s"), ex)
|
LOG.exception(_LE(
|
||||||
LOG.error(_LE(
|
|
||||||
"Failed to add storage group %(storageGroupInstanceName)s "
|
"Failed to add storage group %(storageGroupInstanceName)s "
|
||||||
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
|
"to tier policy rule %(tierPolicyRuleInstanceName)s."),
|
||||||
{'storageGroupInstanceName': storageGroupInstanceName,
|
{'storageGroupInstanceName': storageGroupInstanceName,
|
||||||
|
|
|
@ -224,8 +224,8 @@ class EMCVMAXFCDriver(driver.FibreChannelDriver):
|
||||||
LOG.debug("Return FC data for zone removal: %(data)s.",
|
LOG.debug("Return FC data for zone removal: %(data)s.",
|
||||||
{'data': data})
|
{'data': data})
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Volume %(volume)s is not in any masking view."),
|
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
|
||||||
{'volume': volume['name']})
|
{'volume': volume['name']})
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def _build_initiator_target_map(self, storage_system, volume, connector):
|
def _build_initiator_target_map(self, storage_system, volume, connector):
|
||||||
|
|
|
@ -151,7 +151,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||||
iscsi_properties = self.smis_get_iscsi_properties(
|
iscsi_properties = self.smis_get_iscsi_properties(
|
||||||
volume, connector)
|
volume, connector)
|
||||||
|
|
||||||
LOG.info(_LI("Leaving initialize_connection: %s"), (iscsi_properties))
|
LOG.info(_LI("Leaving initialize_connection: %s"), iscsi_properties)
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': 'iscsi',
|
'driver_volume_type': 'iscsi',
|
||||||
'data': iscsi_properties
|
'data': iscsi_properties
|
||||||
|
@ -201,7 +201,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||||
" for volume %(volumeName)s.")
|
" for volume %(volumeName)s.")
|
||||||
% {'volumeName': volume['name']})
|
% {'volumeName': volume['name']})
|
||||||
|
|
||||||
LOG.debug("ISCSI Discovery: Found %s", (location))
|
LOG.debug("ISCSI Discovery: Found %s", location)
|
||||||
properties['target_discovered'] = True
|
properties['target_discovered'] = True
|
||||||
|
|
||||||
device_info = self.common.find_device_number(volume)
|
device_info = self.common.find_device_number(volume)
|
||||||
|
@ -243,7 +243,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
|
||||||
properties['auth_username'] = auth_username
|
properties['auth_username'] = auth_username
|
||||||
properties['auth_password'] = auth_secret
|
properties['auth_password'] = auth_secret
|
||||||
|
|
||||||
LOG.info(_LI("AUTH properties: %s."), (properties))
|
LOG.info(_LI("AUTH properties: %s."), properties)
|
||||||
|
|
||||||
return properties
|
return properties
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,7 @@ class EMCVMAXMasking(object):
|
||||||
maskingViewDict['workload'])
|
maskingViewDict['workload'])
|
||||||
|
|
||||||
if assocStorageGroupName != defaultSgGroupName:
|
if assocStorageGroupName != defaultSgGroupName:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume: %(volumeName)s Does not belong "
|
"Volume: %(volumeName)s Does not belong "
|
||||||
"to storage storage group %(defaultSgGroupName)s."),
|
"to storage storage group %(defaultSgGroupName)s."),
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
|
@ -472,7 +472,7 @@ class EMCVMAXMasking(object):
|
||||||
if self._is_volume_in_storage_group(
|
if self._is_volume_in_storage_group(
|
||||||
conn, storageGroupInstanceName,
|
conn, storageGroupInstanceName,
|
||||||
volumeInstance):
|
volumeInstance):
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume: %(volumeName)s is already part "
|
"Volume: %(volumeName)s is already part "
|
||||||
"of storage group %(sgGroupName)s."),
|
"of storage group %(sgGroupName)s."),
|
||||||
{'volumeName': volumeName,
|
{'volumeName': volumeName,
|
||||||
|
@ -1049,8 +1049,8 @@ class EMCVMAXMasking(object):
|
||||||
{'view': maskingViewName,
|
{'view': maskingViewName,
|
||||||
'masking': foundStorageGroupInstanceName})
|
'masking': foundStorageGroupInstanceName})
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Unable to find Masking view: %(view)s."),
|
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
|
||||||
{'view': maskingViewName})
|
{'view': maskingViewName})
|
||||||
|
|
||||||
return foundStorageGroupInstanceName
|
return foundStorageGroupInstanceName
|
||||||
|
|
||||||
|
@ -1212,7 +1212,7 @@ class EMCVMAXMasking(object):
|
||||||
# Volume is not associated with any storage group so add
|
# Volume is not associated with any storage group so add
|
||||||
# it back to the default.
|
# it back to the default.
|
||||||
if len(foundStorageGroupInstanceName) == 0:
|
if len(foundStorageGroupInstanceName) == 0:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"No storage group found. "
|
"No storage group found. "
|
||||||
"Performing rollback on Volume: %(volumeName)s "
|
"Performing rollback on Volume: %(volumeName)s "
|
||||||
"To return it to the default storage group for FAST "
|
"To return it to the default storage group for FAST "
|
||||||
|
@ -1257,8 +1257,7 @@ class EMCVMAXMasking(object):
|
||||||
rollbackDict['fastPolicyName'],
|
rollbackDict['fastPolicyName'],
|
||||||
rollbackDict['volumeName'], rollbackDict['extraSpecs'],
|
rollbackDict['volumeName'], rollbackDict['extraSpecs'],
|
||||||
False)
|
False)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s."), e)
|
|
||||||
errorMessage = (_(
|
errorMessage = (_(
|
||||||
"Rollback for Volume: %(volumeName)s has failed. "
|
"Rollback for Volume: %(volumeName)s has failed. "
|
||||||
"Please contact your system administrator to manually return "
|
"Please contact your system administrator to manually return "
|
||||||
|
@ -1266,7 +1265,7 @@ class EMCVMAXMasking(object):
|
||||||
"%(fastPolicyName)s failed.")
|
"%(fastPolicyName)s failed.")
|
||||||
% {'volumeName': rollbackDict['volumeName'],
|
% {'volumeName': rollbackDict['volumeName'],
|
||||||
'fastPolicyName': rollbackDict['fastPolicyName']})
|
'fastPolicyName': rollbackDict['fastPolicyName']})
|
||||||
LOG.error(errorMessage)
|
LOG.exception(errorMessage)
|
||||||
raise exception.VolumeBackendAPIException(data=errorMessage)
|
raise exception.VolumeBackendAPIException(data=errorMessage)
|
||||||
|
|
||||||
def _find_new_initiator_group(self, conn, maskingGroupDict):
|
def _find_new_initiator_group(self, conn, maskingGroupDict):
|
||||||
|
@ -1307,8 +1306,8 @@ class EMCVMAXMasking(object):
|
||||||
{'view': maskingViewName,
|
{'view': maskingViewName,
|
||||||
'masking': foundInitiatorMaskingGroupInstanceName})
|
'masking': foundInitiatorMaskingGroupInstanceName})
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Unable to find Masking view: %(view)s."),
|
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
|
||||||
{'view': maskingViewName})
|
{'view': maskingViewName})
|
||||||
|
|
||||||
return foundInitiatorMaskingGroupInstanceName
|
return foundInitiatorMaskingGroupInstanceName
|
||||||
|
|
||||||
|
@ -1582,7 +1581,7 @@ class EMCVMAXMasking(object):
|
||||||
volumeName, fastPolicyName))
|
volumeName, fastPolicyName))
|
||||||
|
|
||||||
if defaultStorageGroupInstanceName is None:
|
if defaultStorageGroupInstanceName is None:
|
||||||
LOG.warn(_LW(
|
LOG.warning(_LW(
|
||||||
"Volume %(volumeName)s was not first part of the default "
|
"Volume %(volumeName)s was not first part of the default "
|
||||||
"storage group for the FAST Policy."),
|
"storage group for the FAST Policy."),
|
||||||
{'volumeName': volumeName})
|
{'volumeName': volumeName})
|
||||||
|
@ -1733,15 +1732,15 @@ class EMCVMAXMasking(object):
|
||||||
|
|
||||||
if numVolInMaskingView == 1:
|
if numVolInMaskingView == 1:
|
||||||
# Last volume in the storage group.
|
# Last volume in the storage group.
|
||||||
LOG.warn(_LW("Only one volume remains in storage group "
|
LOG.warning(_LW("Only one volume remains in storage group "
|
||||||
"%(sgname)s. Driver will attempt cleanup."),
|
"%(sgname)s. Driver will attempt cleanup."),
|
||||||
{'sgname': storageGroupName})
|
{'sgname': storageGroupName})
|
||||||
mvInstanceName = self.get_masking_view_from_storage_group(
|
mvInstanceName = self.get_masking_view_from_storage_group(
|
||||||
conn, storageGroupInstanceName)
|
conn, storageGroupInstanceName)
|
||||||
if mvInstanceName is None:
|
if mvInstanceName is None:
|
||||||
LOG.warn(_LW("Unable to get masking view %(maskingView)s "
|
LOG.warning(_LW("Unable to get masking view %(maskingView)s "
|
||||||
"from storage group."),
|
"from storage group."),
|
||||||
{'maskingView': mvInstanceName})
|
{'maskingView': mvInstanceName})
|
||||||
else:
|
else:
|
||||||
maskingViewInstance = conn.GetInstance(
|
maskingViewInstance = conn.GetInstance(
|
||||||
mvInstanceName, LocalOnly=False)
|
mvInstanceName, LocalOnly=False)
|
||||||
|
@ -2053,10 +2052,10 @@ class EMCVMAXMasking(object):
|
||||||
ResultClass='Symm_FCSCSIProtocolEndpoint')
|
ResultClass='Symm_FCSCSIProtocolEndpoint')
|
||||||
numberOfPorts = len(targetPortInstanceNames)
|
numberOfPorts = len(targetPortInstanceNames)
|
||||||
if numberOfPorts <= 0:
|
if numberOfPorts <= 0:
|
||||||
LOG.warn(_LW("No target ports found in "
|
LOG.warning(_LW("No target ports found in "
|
||||||
"masking view %(maskingView)s."),
|
"masking view %(maskingView)s."),
|
||||||
{'numPorts': len(targetPortInstanceNames),
|
{'numPorts': len(targetPortInstanceNames),
|
||||||
'maskingView': mvInstanceName})
|
'maskingView': mvInstanceName})
|
||||||
for targetPortInstanceName in targetPortInstanceNames:
|
for targetPortInstanceName in targetPortInstanceNames:
|
||||||
targetWwns.append(targetPortInstanceName['Name'])
|
targetWwns.append(targetPortInstanceName['Name'])
|
||||||
return targetWwns
|
return targetWwns
|
||||||
|
@ -2107,8 +2106,8 @@ class EMCVMAXMasking(object):
|
||||||
'mv': maskingViewInstanceName})
|
'mv': maskingViewInstanceName})
|
||||||
return portGroupInstanceNames[0]
|
return portGroupInstanceNames[0]
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("No port group found in masking view %(mv)s."),
|
LOG.warning(_LW("No port group found in masking view %(mv)s."),
|
||||||
{'mv': maskingViewInstanceName})
|
{'mv': maskingViewInstanceName})
|
||||||
|
|
||||||
def get_initiator_group_from_masking_view(
|
def get_initiator_group_from_masking_view(
|
||||||
self, conn, maskingViewInstanceName):
|
self, conn, maskingViewInstanceName):
|
||||||
|
@ -2126,8 +2125,8 @@ class EMCVMAXMasking(object):
|
||||||
'mv': maskingViewInstanceName})
|
'mv': maskingViewInstanceName})
|
||||||
return initiatorGroupInstanceNames[0]
|
return initiatorGroupInstanceNames[0]
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("No port group found in masking view %(mv)s."),
|
LOG.warning(_LW("No port group found in masking view %(mv)s."),
|
||||||
{'mv': maskingViewInstanceName})
|
{'mv': maskingViewInstanceName})
|
||||||
|
|
||||||
def _get_sg_or_mv_associated_with_initiator(
|
def _get_sg_or_mv_associated_with_initiator(
|
||||||
self, conn, controllerConfigService, volumeInstanceName,
|
self, conn, controllerConfigService, volumeInstanceName,
|
||||||
|
|
|
@ -18,7 +18,7 @@ from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE
|
from cinder.i18n import _
|
||||||
from cinder.volume.drivers.emc import emc_vmax_utils
|
from cinder.volume.drivers.emc import emc_vmax_utils
|
||||||
|
|
||||||
|
|
||||||
|
@ -613,11 +613,10 @@ class EMCVMAXProvision(object):
|
||||||
try:
|
try:
|
||||||
rc = self._terminate_migrate_session(
|
rc = self._terminate_migrate_session(
|
||||||
conn, volumeInstanceName, extraSpecs)
|
conn, volumeInstanceName, extraSpecs)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE('Exception: %s.'), ex)
|
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Failed to terminate migrate session."))
|
"Failed to terminate migrate session."))
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
try:
|
try:
|
||||||
|
@ -625,19 +624,17 @@ class EMCVMAXProvision(object):
|
||||||
conn, storageRelocationServiceInstanceName,
|
conn, storageRelocationServiceInstanceName,
|
||||||
volumeInstanceName, targetPoolInstanceName,
|
volumeInstanceName, targetPoolInstanceName,
|
||||||
extraSpecs)
|
extraSpecs)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE('Exception: %s'), ex)
|
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Failed to migrate volume for the second time."))
|
"Failed to migrate volume for the second time."))
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Exception: %s'), ex)
|
|
||||||
exceptionMessage = (_(
|
exceptionMessage = (_(
|
||||||
"Failed to migrate volume for the first time."))
|
"Failed to migrate volume for the first time."))
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exceptionMessage)
|
data=exceptionMessage)
|
||||||
|
|
||||||
|
|
|
@ -141,10 +141,11 @@ class EMCVMAXUtils(object):
|
||||||
for elementCompositionService in elementCompositionServices:
|
for elementCompositionService in elementCompositionServices:
|
||||||
if storageSystemName == elementCompositionService['SystemName']:
|
if storageSystemName == elementCompositionService['SystemName']:
|
||||||
foundElementCompositionService = elementCompositionService
|
foundElementCompositionService = elementCompositionService
|
||||||
LOG.debug("Found Element Composition Service:"
|
LOG.debug(
|
||||||
"%(elementCompositionService)s."
|
"Found Element Composition Service: "
|
||||||
% {'elementCompositionService':
|
"%(elementCompositionService)s.", {
|
||||||
elementCompositionService})
|
'elementCompositionService':
|
||||||
|
elementCompositionService})
|
||||||
break
|
break
|
||||||
if foundElementCompositionService is None:
|
if foundElementCompositionService is None:
|
||||||
exceptionMessage = (_("Element Composition Service not found "
|
exceptionMessage = (_("Element Composition Service not found "
|
||||||
|
@ -324,10 +325,9 @@ class EMCVMAXUtils(object):
|
||||||
if not wait_for_job_called:
|
if not wait_for_job_called:
|
||||||
if self._is_job_finished(conn, job):
|
if self._is_job_finished(conn, job):
|
||||||
kwargs['wait_for_job_called'] = True
|
kwargs['wait_for_job_called'] = True
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s.") % six.text_type(e))
|
|
||||||
exceptionMessage = (_("Issue encountered waiting for job."))
|
exceptionMessage = (_("Issue encountered waiting for job."))
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(exceptionMessage)
|
raise exception.VolumeBackendAPIException(exceptionMessage)
|
||||||
|
|
||||||
kwargs = {'retries': 0,
|
kwargs = {'retries': 0,
|
||||||
|
@ -415,11 +415,10 @@ class EMCVMAXUtils(object):
|
||||||
if not wait_for_sync_called:
|
if not wait_for_sync_called:
|
||||||
if self._is_sync_complete(conn, syncName):
|
if self._is_sync_complete(conn, syncName):
|
||||||
kwargs['wait_for_sync_called'] = True
|
kwargs['wait_for_sync_called'] = True
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.error(_LE("Exception: %s") % six.text_type(e))
|
|
||||||
exceptionMessage = (_("Issue encountered waiting for "
|
exceptionMessage = (_("Issue encountered waiting for "
|
||||||
"synchronization."))
|
"synchronization."))
|
||||||
LOG.error(exceptionMessage)
|
LOG.exception(exceptionMessage)
|
||||||
raise exception.VolumeBackendAPIException(exceptionMessage)
|
raise exception.VolumeBackendAPIException(exceptionMessage)
|
||||||
|
|
||||||
kwargs = {'retries': 0,
|
kwargs = {'retries': 0,
|
||||||
|
|
|
@ -286,7 +286,7 @@ class CommandLineHelper(object):
|
||||||
self.primary_storage_ip = self.active_storage_ip
|
self.primary_storage_ip = self.active_storage_ip
|
||||||
self.secondary_storage_ip = configuration.san_secondary_ip
|
self.secondary_storage_ip = configuration.san_secondary_ip
|
||||||
if self.secondary_storage_ip == self.primary_storage_ip:
|
if self.secondary_storage_ip == self.primary_storage_ip:
|
||||||
LOG.warning(_LE("san_secondary_ip is configured as "
|
LOG.warning(_LW("san_secondary_ip is configured as "
|
||||||
"the same value as san_ip."))
|
"the same value as san_ip."))
|
||||||
self.secondary_storage_ip = None
|
self.secondary_storage_ip = None
|
||||||
if not configuration.san_ip:
|
if not configuration.san_ip:
|
||||||
|
@ -394,7 +394,7 @@ class CommandLineHelper(object):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.delete_lun(name)
|
self.delete_lun(name)
|
||||||
LOG.error(_LE("Error on enable compression on lun %s."),
|
LOG.error(_LE("Error on enable compression on lun %s."),
|
||||||
six.text_type(ex))
|
ex)
|
||||||
|
|
||||||
# handle consistency group
|
# handle consistency group
|
||||||
try:
|
try:
|
||||||
|
@ -405,7 +405,7 @@ class CommandLineHelper(object):
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.delete_lun(name)
|
self.delete_lun(name)
|
||||||
LOG.error(_LE("Error on adding lun to consistency"
|
LOG.error(_LE("Error on adding lun to consistency"
|
||||||
" group. %s"), six.text_type(ex))
|
" group. %s"), ex)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def create_lun_by_cmd(self, cmd, name):
|
def create_lun_by_cmd(self, cmd, name):
|
||||||
|
@ -514,7 +514,7 @@ class CommandLineHelper(object):
|
||||||
'_wait_for_a_condition: %(method_name)s '
|
'_wait_for_a_condition: %(method_name)s '
|
||||||
'execution failed for %(exception)s',
|
'execution failed for %(exception)s',
|
||||||
{'method_name': testmethod.__name__,
|
{'method_name': testmethod.__name__,
|
||||||
'exception': six.text_type(ex)})
|
'exception': ex})
|
||||||
if test_value:
|
if test_value:
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
|
|
||||||
|
@ -2260,9 +2260,7 @@ class EMCVnxCliBase(object):
|
||||||
self._client.delete_consistencygroup(cg_name)
|
self._client.delete_consistencygroup(cg_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = (_('Delete consistency group %s failed.')
|
LOG.error(_LE('Delete consistency group %s failed.'), cg_name)
|
||||||
% cg_name)
|
|
||||||
LOG.error(msg)
|
|
||||||
|
|
||||||
for volume_ref in volumes:
|
for volume_ref in volumes:
|
||||||
try:
|
try:
|
||||||
|
@ -3114,8 +3112,8 @@ class MigrateLunTask(task.Task):
|
||||||
dest_vol_lun_id,
|
dest_vol_lun_id,
|
||||||
None)
|
None)
|
||||||
if not migrated:
|
if not migrated:
|
||||||
msg = (_LE("Migrate volume failed between source vol %(src)s"
|
msg = (_("Migrate volume failed between source vol %(src)s"
|
||||||
" and dest vol %(dst)s."),
|
" and dest vol %(dst)s.") %
|
||||||
{'src': new_vol_name, 'dst': dest_vol_name})
|
{'src': new_vol_name, 'dst': dest_vol_name})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
|
@ -208,7 +208,7 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
|
||||||
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
|
if any(ln.startswith(('% Error', 'Error:')) for ln in out):
|
||||||
desc = _("Error executing EQL command")
|
desc = _("Error executing EQL command")
|
||||||
cmdout = '\n'.join(out)
|
cmdout = '\n'.join(out)
|
||||||
LOG.error(cmdout)
|
LOG.error(_LE("%s"), cmdout)
|
||||||
raise processutils.ProcessExecutionError(
|
raise processutils.ProcessExecutionError(
|
||||||
stdout=cmdout, cmd=command, description=desc)
|
stdout=cmdout, cmd=command, description=desc)
|
||||||
return out
|
return out
|
||||||
|
@ -412,8 +412,8 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
|
||||||
self._eql_execute('volume', 'select', volume['name'], 'offline')
|
self._eql_execute('volume', 'select', volume['name'], 'offline')
|
||||||
self._eql_execute('volume', 'delete', volume['name'])
|
self._eql_execute('volume', 'delete', volume['name'])
|
||||||
except exception.VolumeNotFound:
|
except exception.VolumeNotFound:
|
||||||
LOG.warn(_LW('Volume %s was not found while trying to delete it.'),
|
LOG.warning(_LW('Volume %s was not found while trying to delete '
|
||||||
volume['name'])
|
'it.'), volume['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to delete '
|
LOG.error(_LE('Failed to delete '
|
||||||
|
@ -527,8 +527,8 @@ class DellEQLSanISCSIDriver(san.SanISCSIDriver):
|
||||||
try:
|
try:
|
||||||
self._check_volume(volume)
|
self._check_volume(volume)
|
||||||
except exception.VolumeNotFound:
|
except exception.VolumeNotFound:
|
||||||
LOG.warn(_LW('Volume %s is not found!, it may have been deleted.'),
|
LOG.warning(_LW('Volume %s is not found!, it may have been '
|
||||||
volume['name'])
|
'deleted.'), volume['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Failed to ensure export of volume "%s".'),
|
LOG.error(_LE('Failed to ensure export of volume "%s".'),
|
||||||
|
|
|
@ -92,12 +92,12 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
if not config:
|
if not config:
|
||||||
msg = (_("There's no Gluster config file configured (%s)") %
|
msg = (_("There's no Gluster config file configured (%s)") %
|
||||||
'glusterfs_shares_config')
|
'glusterfs_shares_config')
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.GlusterfsException(msg)
|
raise exception.GlusterfsException(msg)
|
||||||
if not os.path.exists(config):
|
if not os.path.exists(config):
|
||||||
msg = (_("Gluster config file at %(config)s doesn't exist") %
|
msg = (_("Gluster config file at %(config)s doesn't exist") %
|
||||||
{'config': config})
|
{'config': config})
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.GlusterfsException(msg)
|
raise exception.GlusterfsException(msg)
|
||||||
|
|
||||||
self.shares = {}
|
self.shares = {}
|
||||||
|
@ -119,7 +119,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
try:
|
try:
|
||||||
self._do_umount(True, share)
|
self._do_umount(True, share)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.warning(_LE('Exception during unmounting %s') % (exc))
|
LOG.warning(_LW('Exception during unmounting %s'), exc)
|
||||||
|
|
||||||
def _do_umount(self, ignore_not_mounted, share):
|
def _do_umount(self, ignore_not_mounted, share):
|
||||||
mount_path = self._get_mount_point_for_share(share)
|
mount_path = self._get_mount_point_for_share(share)
|
||||||
|
@ -139,8 +139,8 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
self._unmount_shares()
|
self._unmount_shares()
|
||||||
except processutils.ProcessExecutionError as exc:
|
except processutils.ProcessExecutionError as exc:
|
||||||
if 'target is busy' in exc.stderr:
|
if 'target is busy' in exc.stderr:
|
||||||
LOG.warn(_LW("Failed to refresh mounts, reason=%s") %
|
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
|
||||||
exc.stderr)
|
exc.stderr)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
|
|
||||||
volume['provider_location'] = self._find_share(volume['size'])
|
volume['provider_location'] = self._find_share(volume['size'])
|
||||||
|
|
||||||
LOG.info(_LI('casted to %s') % volume['provider_location'])
|
LOG.info(_LI('casted to %s'), volume['provider_location'])
|
||||||
|
|
||||||
self._do_create_volume(volume)
|
self._do_create_volume(volume)
|
||||||
|
|
||||||
|
@ -182,10 +182,10 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
|
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
|
||||||
"volume_size: %(size)s"
|
"volume_size: %(size)s",
|
||||||
% {'snap': snapshot['id'],
|
{'snap': snapshot['id'],
|
||||||
'vol': volume['id'],
|
'vol': volume['id'],
|
||||||
'size': volume_size})
|
'size': volume_size})
|
||||||
|
|
||||||
info_path = self._local_path_volume_info(snapshot['volume'])
|
info_path = self._local_path_volume_info(snapshot['volume'])
|
||||||
snap_info = self._read_info_file(info_path)
|
snap_info = self._read_info_file(info_path)
|
||||||
|
@ -201,7 +201,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
|
|
||||||
path_to_new_vol = self._local_path_volume(volume)
|
path_to_new_vol = self._local_path_volume(volume)
|
||||||
|
|
||||||
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
|
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
|
||||||
|
|
||||||
if self.configuration.glusterfs_qcow2_volumes:
|
if self.configuration.glusterfs_qcow2_volumes:
|
||||||
out_format = 'qcow2'
|
out_format = 'qcow2'
|
||||||
|
@ -219,9 +219,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
|
|
||||||
if not volume['provider_location']:
|
if not volume['provider_location']:
|
||||||
LOG.warn(_LW('Volume %s does not have '
|
LOG.warning(_LW('Volume %s does not have '
|
||||||
'provider_location specified, '
|
'provider_location specified, '
|
||||||
'skipping'), volume['name'])
|
'skipping'), volume['name'])
|
||||||
return
|
return
|
||||||
|
|
||||||
self._ensure_share_mounted(volume['provider_location'])
|
self._ensure_share_mounted(volume['provider_location'])
|
||||||
|
@ -324,7 +324,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
volume_path = self.local_path(volume)
|
volume_path = self.local_path(volume)
|
||||||
volume_size = volume['size']
|
volume_size = volume['size']
|
||||||
|
|
||||||
LOG.debug("creating new volume at %s" % volume_path)
|
LOG.debug("creating new volume at %s", volume_path)
|
||||||
|
|
||||||
if os.path.exists(volume_path):
|
if os.path.exists(volume_path):
|
||||||
msg = _('file already exists at %s') % volume_path
|
msg = _('file already exists at %s') % volume_path
|
||||||
|
@ -353,9 +353,9 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
self._ensure_share_mounted(share)
|
self._ensure_share_mounted(share)
|
||||||
self._mounted_shares.append(share)
|
self._mounted_shares.append(share)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('Exception during mounting %s') % (exc,))
|
LOG.error(_LE('Exception during mounting %s'), exc)
|
||||||
|
|
||||||
LOG.debug('Available shares: %s' % self._mounted_shares)
|
LOG.debug('Available shares: %s', self._mounted_shares)
|
||||||
|
|
||||||
def _ensure_share_mounted(self, glusterfs_share):
|
def _ensure_share_mounted(self, glusterfs_share):
|
||||||
"""Mount GlusterFS share.
|
"""Mount GlusterFS share.
|
||||||
|
@ -440,11 +440,10 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
info = self._qemu_img_info(active_file_path, volume['name'])
|
info = self._qemu_img_info(active_file_path, volume['name'])
|
||||||
|
|
||||||
if info.backing_file is not None:
|
if info.backing_file is not None:
|
||||||
msg = _('No snapshots found in database, but '
|
LOG.error(_LE('No snapshots found in database, but %(path)s has '
|
||||||
'%(path)s has backing file '
|
'backing file %(backing_file)s!'),
|
||||||
'%(backing_file)s!') % {'path': active_file_path,
|
{'path': active_file_path,
|
||||||
'backing_file': info.backing_file}
|
'backing_file': info.backing_file})
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.InvalidVolume(snap_error_msg)
|
raise exception.InvalidVolume(snap_error_msg)
|
||||||
|
|
||||||
if info.file_format != 'raw':
|
if info.file_format != 'raw':
|
||||||
|
|
|
@ -26,7 +26,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _LE, _LI
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.hds import hus_backend
|
from cinder.volume.drivers.hds import hus_backend
|
||||||
|
@ -70,9 +70,8 @@ def _loc_info(loc):
|
||||||
|
|
||||||
def _do_lu_range_check(start, end, maxlun):
|
def _do_lu_range_check(start, end, maxlun):
|
||||||
"""Validate array allocation range."""
|
"""Validate array allocation range."""
|
||||||
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s"
|
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s",
|
||||||
% {'start': start,
|
{'start': start, 'end': end})
|
||||||
'end': end})
|
|
||||||
if int(start) < 0:
|
if int(start) < 0:
|
||||||
msg = 'start LU limit too low: ' + start
|
msg = 'start LU limit too low: ' + start
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
@ -84,7 +83,7 @@ def _do_lu_range_check(start, end, maxlun):
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
if int(end) > int(maxlun):
|
if int(end) > int(maxlun):
|
||||||
end = maxlun
|
end = maxlun
|
||||||
LOG.debug("setting LU upper (end) limit to %s" % maxlun)
|
LOG.debug("setting LU upper (end) limit to %s", maxlun)
|
||||||
return (start, end)
|
return (start, end)
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,9 +91,8 @@ def _xml_read(root, element, check=None):
|
||||||
"""Read an xml element."""
|
"""Read an xml element."""
|
||||||
try:
|
try:
|
||||||
val = root.findtext(element)
|
val = root.findtext(element)
|
||||||
LOG.info(_LI("%(element)s: %(val)s")
|
LOG.info(_LI("%(element)s: %(val)s"),
|
||||||
% {'element': element,
|
{'element': element, 'val': val})
|
||||||
'val': val})
|
|
||||||
if val:
|
if val:
|
||||||
return val.strip()
|
return val.strip()
|
||||||
if check:
|
if check:
|
||||||
|
@ -103,9 +101,9 @@ def _xml_read(root, element, check=None):
|
||||||
except ETree.ParseError:
|
except ETree.ParseError:
|
||||||
if check:
|
if check:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("XML exception reading parameter: %s") % element)
|
LOG.error(_LE("XML exception reading parameter: %s"), element)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("XML exception reading parameter: %s") % element)
|
LOG.info(_LI("XML exception reading parameter: %s"), element)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@ -178,12 +176,9 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
conf[ip]['ctl'] = ctl
|
conf[ip]['ctl'] = ctl
|
||||||
conf[ip]['port'] = port
|
conf[ip]['port'] = port
|
||||||
conf[ip]['iscsi_port'] = ipp # HUS default: 3260
|
conf[ip]['iscsi_port'] = ipp # HUS default: 3260
|
||||||
msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
|
LOG.debug('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: '
|
||||||
LOG.debug(msg
|
'%(port)s', {'ip': ip, 'ipp': ipp,
|
||||||
% {'ip': ip,
|
'ctl': ctl, 'port': port})
|
||||||
'ipp': ipp,
|
|
||||||
'ctl': ctl,
|
|
||||||
'port': port})
|
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
def _get_service(self, volume):
|
def _get_service(self, volume):
|
||||||
|
@ -197,7 +192,7 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
|
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
|
||||||
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
|
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("No configuration found for service: %s") % label)
|
LOG.error(_LE("No configuration found for service: %s"), label)
|
||||||
raise exception.ParameterNotFound(param=label)
|
raise exception.ParameterNotFound(param=label)
|
||||||
return service
|
return service
|
||||||
|
|
||||||
|
@ -250,7 +245,7 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
lst.extend([self.config['snapshot_hdp'], ])
|
lst.extend([self.config['snapshot_hdp'], ])
|
||||||
for hdp in lst:
|
for hdp in lst:
|
||||||
if hdp not in hdpl:
|
if hdp not in hdpl:
|
||||||
LOG.error(_LE("HDP not found: %s") % hdp)
|
LOG.error(_LE("HDP not found: %s"), hdp)
|
||||||
err = "HDP not found: " + hdp
|
err = "HDP not found: " + hdp
|
||||||
raise exception.ParameterNotFound(param=err)
|
raise exception.ParameterNotFound(param=err)
|
||||||
|
|
||||||
|
@ -290,7 +285,7 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
iscsi_info[svc_ip]['iscsi_port'])
|
iscsi_info[svc_ip]['iscsi_port'])
|
||||||
else: # config iscsi address not found on device!
|
else: # config iscsi address not found on device!
|
||||||
LOG.error(_LE("iSCSI portal not found "
|
LOG.error(_LE("iSCSI portal not found "
|
||||||
"for service: %s") % svc_ip)
|
"for service: %s"), svc_ip)
|
||||||
raise exception.ParameterNotFound(param=svc_ip)
|
raise exception.ParameterNotFound(param=svc_ip)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -328,9 +323,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
'%s' % (int(volume['size']) * 1024))
|
'%s' % (int(volume['size']) * 1024))
|
||||||
lun = self.arid + '.' + out.split()[1]
|
lun = self.arid + '.' + out.split()[1]
|
||||||
sz = int(out.split()[5])
|
sz = int(out.split()[5])
|
||||||
LOG.debug("LUN %(lun)s of size %(sz)s MB is created."
|
LOG.debug("LUN %(lun)s of size %(sz)s MB is created.",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'sz': sz})
|
||||||
'sz': sz})
|
|
||||||
return {'provider_location': lun}
|
return {'provider_location': lun}
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
|
@ -356,9 +350,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
'%s' % (size))
|
'%s' % (size))
|
||||||
lun = self.arid + '.' + out.split()[1]
|
lun = self.arid + '.' + out.split()[1]
|
||||||
size = int(out.split()[5])
|
size = int(out.split()[5])
|
||||||
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
|
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'size': size})
|
||||||
'size': size})
|
|
||||||
return {'provider_location': lun}
|
return {'provider_location': lun}
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
|
@ -373,9 +366,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
self.config['password'],
|
self.config['password'],
|
||||||
arid, lun,
|
arid, lun,
|
||||||
'%s' % (new_size * 1024))
|
'%s' % (new_size * 1024))
|
||||||
LOG.debug("LUN %(lun)s extended to %(size)s GB."
|
LOG.debug("LUN %(lun)s extended to %(size)s GB.",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'size': new_size})
|
||||||
'size': new_size})
|
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
|
@ -396,9 +388,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
arid, lun, ctl, port, iqn,
|
arid, lun, ctl, port, iqn,
|
||||||
'')
|
'')
|
||||||
name = self.hus_name
|
name = self.hus_name
|
||||||
LOG.debug("delete lun %(lun)s on %(name)s"
|
LOG.debug("delete lun %(lun)s on %(name)s",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'name': name})
|
||||||
'name': name})
|
|
||||||
self.bend.delete_lu(self.config['hus_cmd'],
|
self.bend.delete_lu(self.config['hus_cmd'],
|
||||||
HDS_VERSION,
|
HDS_VERSION,
|
||||||
self.config['mgmt_ip0'],
|
self.config['mgmt_ip0'],
|
||||||
|
@ -481,9 +472,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
'%s' % (size))
|
'%s' % (size))
|
||||||
lun = self.arid + '.' + out.split()[1]
|
lun = self.arid + '.' + out.split()[1]
|
||||||
sz = int(out.split()[5])
|
sz = int(out.split()[5])
|
||||||
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
|
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'sz': sz})
|
||||||
'sz': sz})
|
|
||||||
return {'provider_location': lun}
|
return {'provider_location': lun}
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
|
@ -504,9 +494,8 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
'%s' % (size))
|
'%s' % (size))
|
||||||
lun = self.arid + '.' + out.split()[1]
|
lun = self.arid + '.' + out.split()[1]
|
||||||
size = int(out.split()[5])
|
size = int(out.split()[5])
|
||||||
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot."
|
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot.",
|
||||||
% {'lun': lun,
|
{'lun': lun, 'size': size})
|
||||||
'size': size})
|
|
||||||
return {'provider_location': lun}
|
return {'provider_location': lun}
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
|
@ -523,7 +512,7 @@ class HUSDriver(driver.ISCSIDriver):
|
||||||
self.config['username'],
|
self.config['username'],
|
||||||
self.config['password'],
|
self.config['password'],
|
||||||
arid, lun)
|
arid, lun)
|
||||||
LOG.debug("LUN %s is deleted." % lun)
|
LOG.debug("LUN %s is deleted.", lun)
|
||||||
return
|
return
|
||||||
|
|
||||||
@utils.synchronized('hds_hus', external=True)
|
@utils.synchronized('hds_hus', external=True)
|
||||||
|
|
|
@ -38,7 +38,8 @@ class HusBackend(object):
|
||||||
'--version', '1',
|
'--version', '1',
|
||||||
run_as_root=True,
|
run_as_root=True,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('get_version: ' + out + ' -- ' + err)
|
LOG.debug('get_version: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw):
|
def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw):
|
||||||
|
@ -50,7 +51,8 @@ class HusBackend(object):
|
||||||
'--password', pw,
|
'--password', pw,
|
||||||
'--iscsi', '1',
|
'--iscsi', '1',
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('get_iscsi_info: ' + out + ' -- ' + err)
|
LOG.debug('get_iscsi_info: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw):
|
def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw):
|
||||||
|
@ -62,7 +64,8 @@ class HusBackend(object):
|
||||||
'--password', pw,
|
'--password', pw,
|
||||||
'--hdp', '1',
|
'--hdp', '1',
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('get_hdp_info: ' + out + ' -- ' + err)
|
LOG.debug('get_hdp_info: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start,
|
def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start,
|
||||||
|
@ -80,7 +83,8 @@ class HusBackend(object):
|
||||||
'--end', end,
|
'--end', end,
|
||||||
'--size', size,
|
'--size', size,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('create_lu: ' + out + ' -- ' + err)
|
LOG.debug('create_lu: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun):
|
def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun):
|
||||||
|
@ -95,7 +99,8 @@ class HusBackend(object):
|
||||||
'--lun', lun,
|
'--lun', lun,
|
||||||
'--force', 1,
|
'--force', 1,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('delete_lu: ' + out + ' -- ' + err)
|
LOG.debug('delete_lu: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun,
|
def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun,
|
||||||
|
@ -114,7 +119,8 @@ class HusBackend(object):
|
||||||
'--end', end,
|
'--end', end,
|
||||||
'--size', size,
|
'--size', size,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('create_dup: ' + out + ' -- ' + err)
|
LOG.debug('create_dup: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lun, new_size):
|
def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lun, new_size):
|
||||||
|
@ -129,7 +135,8 @@ class HusBackend(object):
|
||||||
'--lun', lun,
|
'--lun', lun,
|
||||||
'--size', new_size,
|
'--size', new_size,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('extend_vol: ' + out + ' -- ' + err)
|
LOG.debug('extend_vol: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
|
def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
|
||||||
|
@ -148,7 +155,8 @@ class HusBackend(object):
|
||||||
'--target', iqn,
|
'--target', iqn,
|
||||||
'--initiator', initiator,
|
'--initiator', initiator,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('add_iscsi_conn: ' + out + ' -- ' + err)
|
LOG.debug('add_iscsi_conn: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
||||||
def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
|
def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port,
|
||||||
|
@ -168,5 +176,6 @@ class HusBackend(object):
|
||||||
'--initiator', initiator,
|
'--initiator', initiator,
|
||||||
'--force', 1,
|
'--force', 1,
|
||||||
check_exit_code=True)
|
check_exit_code=True)
|
||||||
LOG.debug('del_iscsi_conn: ' + out + ' -- ' + err)
|
LOG.debug('del_iscsi_conn: %(out)s -- %(err)s',
|
||||||
|
{'out': out, 'err': err})
|
||||||
return out
|
return out
|
||||||
|
|
|
@ -205,8 +205,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||||
conf[ip]['ctl'] = ctl
|
conf[ip]['ctl'] = ctl
|
||||||
conf[ip]['port'] = port
|
conf[ip]['port'] = port
|
||||||
conf[ip]['iscsi_port'] = ipp
|
conf[ip]['iscsi_port'] = ipp
|
||||||
msg = "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s"
|
LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s",
|
||||||
LOG.debug(msg, {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
|
{'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
|
||||||
|
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||||
# value and use a temporary dummy password.
|
# value and use a temporary dummy password.
|
||||||
if 'iscsi_secret' not in svc:
|
if 'iscsi_secret' not in svc:
|
||||||
# Warns in the first time
|
# Warns in the first time
|
||||||
LOG.info(_LE("CHAP authentication disabled"))
|
LOG.info(_LI("CHAP authentication disabled"))
|
||||||
|
|
||||||
svc['iscsi_secret'] = ""
|
svc['iscsi_secret'] = ""
|
||||||
|
|
||||||
|
@ -303,7 +303,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||||
self.config['password'],
|
self.config['password'],
|
||||||
pool['hdp'])
|
pool['hdp'])
|
||||||
|
|
||||||
LOG.debug('Query for pool %s: %s', pool['pool_name'], out)
|
LOG.debug('Query for pool %(pool)s: %(out)s',
|
||||||
|
{'pool': pool['pool_name'], 'out': out})
|
||||||
|
|
||||||
(hdp, size, _ign, used) = out.split()[1:5] # in MB
|
(hdp, size, _ign, used) = out.split()[1:5] # in MB
|
||||||
pool['total_capacity_gb'] = int(size) / units.Ki
|
pool['total_capacity_gb'] = int(size) / units.Ki
|
||||||
|
@ -566,7 +567,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
if 'tgt' in info.keys(): # spurious repeat connection
|
if 'tgt' in info.keys(): # spurious repeat connection
|
||||||
# print info.keys()
|
# print info.keys()
|
||||||
LOG.debug("initiate_conn: tgt already set %s" % info['tgt'])
|
LOG.debug("initiate_conn: tgt already set %s", info['tgt'])
|
||||||
(arid, lun) = info['id_lu']
|
(arid, lun) = info['id_lu']
|
||||||
loc = arid + '.' + lun
|
loc = arid + '.' + lun
|
||||||
# sps, use target if provided
|
# sps, use target if provided
|
||||||
|
@ -612,7 +613,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
info = _loc_info(volume['provider_location'])
|
info = _loc_info(volume['provider_location'])
|
||||||
if 'tgt' not in info.keys(): # spurious disconnection
|
if 'tgt' not in info.keys(): # spurious disconnection
|
||||||
LOG.warn(_LW("terminate_conn: provider location empty."))
|
LOG.warning(_LW("terminate_conn: provider location empty."))
|
||||||
return
|
return
|
||||||
(arid, lun) = info['id_lu']
|
(arid, lun) = info['id_lu']
|
||||||
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
|
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
|
||||||
|
|
|
@ -450,10 +450,10 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||||
conf[key]['path'] = path
|
conf[key]['path'] = path
|
||||||
conf[key]['hdp'] = hdp
|
conf[key]['hdp'] = hdp
|
||||||
conf[key]['fslabel'] = fslabel
|
conf[key]['fslabel'] = fslabel
|
||||||
msg = _("nfs_info: %(key)s: %(path)s, HDP: \
|
LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s "
|
||||||
%(fslabel)s FSID: %(hdp)s")
|
"FSID: %(hdp)s"),
|
||||||
LOG.info(msg, {'key': key, 'path': path, 'fslabel': fslabel,
|
{'key': key, 'path': path,
|
||||||
'hdp': hdp})
|
'fslabel': fslabel, 'hdp': hdp})
|
||||||
|
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _, _LE
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
|
||||||
SMPL = 1
|
SMPL = 1
|
||||||
|
@ -168,7 +168,7 @@ def set_msg(msg_id, **kwargs):
|
||||||
def output_err(msg_id, **kwargs):
|
def output_err(msg_id, **kwargs):
|
||||||
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
|
msg = HBSD_ERR_MSG.get(msg_id) % kwargs
|
||||||
|
|
||||||
LOG.error("MSGID%04d-E: %s", msg_id, msg)
|
LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg})
|
||||||
|
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
@ -236,10 +236,10 @@ class HBSDBasicLib(object):
|
||||||
def exec_command(self, cmd, args=None, printflag=True):
|
def exec_command(self, cmd, args=None, printflag=True):
|
||||||
if printflag:
|
if printflag:
|
||||||
if args:
|
if args:
|
||||||
LOG.debug('cmd: %(cmd)s, args: %(args)s' %
|
LOG.debug('cmd: %(cmd)s, args: %(args)s',
|
||||||
{'cmd': cmd, 'args': args})
|
{'cmd': cmd, 'args': args})
|
||||||
else:
|
else:
|
||||||
LOG.debug('cmd: %s' % cmd)
|
LOG.debug('cmd: %s', cmd)
|
||||||
|
|
||||||
cmd = [cmd]
|
cmd = [cmd]
|
||||||
|
|
||||||
|
@ -257,11 +257,11 @@ class HBSDBasicLib(object):
|
||||||
stdout = e.stdout
|
stdout = e.stdout
|
||||||
stderr = e.stderr
|
stderr = e.stderr
|
||||||
|
|
||||||
LOG.debug('cmd: %s' % six.text_type(cmd))
|
LOG.debug('cmd: %s', cmd)
|
||||||
LOG.debug('from: %s' % six.text_type(inspect.stack()[2]))
|
LOG.debug('from: %s', inspect.stack()[2])
|
||||||
LOG.debug('ret: %d' % ret)
|
LOG.debug('ret: %d', ret)
|
||||||
LOG.debug('stdout: %s' % stdout.replace(os.linesep, ' '))
|
LOG.debug('stdout: %s', stdout.replace(os.linesep, ' '))
|
||||||
LOG.debug('stderr: %s' % stderr.replace(os.linesep, ' '))
|
LOG.debug('stderr: %s', stderr.replace(os.linesep, ' '))
|
||||||
|
|
||||||
return ret, stdout, stderr
|
return ret, stdout, stderr
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LW
|
from cinder.i18n import _LE, _LI, _LW
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||||
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
|
from cinder.volume.drivers.hitachi import hbsd_horcm as horcm
|
||||||
|
@ -253,27 +253,28 @@ class HBSDCommon(object):
|
||||||
essential_inherited_param = ['volume_backend_name', 'volume_driver']
|
essential_inherited_param = ['volume_backend_name', 'volume_driver']
|
||||||
conf = self.configuration
|
conf = self.configuration
|
||||||
|
|
||||||
msg = basic_lib.set_msg(1, config_group=conf.config_group)
|
LOG.info(basic_lib.set_msg(1, config_group=conf.config_group))
|
||||||
LOG.info(msg)
|
|
||||||
version = self.command.get_comm_version()
|
version = self.command.get_comm_version()
|
||||||
if conf.hitachi_unit_name:
|
if conf.hitachi_unit_name:
|
||||||
prefix = 'HSNM2 version'
|
prefix = 'HSNM2 version'
|
||||||
else:
|
else:
|
||||||
prefix = 'RAID Manager version'
|
prefix = 'RAID Manager version'
|
||||||
LOG.info('\t%-35s%s' % (prefix + ': ', six.text_type(version)))
|
LOG.info(_LI('\t%(prefix)-35s : %(version)s'),
|
||||||
|
{'prefix': prefix, 'version': version})
|
||||||
for param in essential_inherited_param:
|
for param in essential_inherited_param:
|
||||||
value = conf.safe_get(param)
|
value = conf.safe_get(param)
|
||||||
LOG.info('\t%-35s%s' % (param + ': ', six.text_type(value)))
|
LOG.info(_LI('\t%(param)-35s : %(value)s'),
|
||||||
|
{'param': param, 'value': value})
|
||||||
for opt in volume_opts:
|
for opt in volume_opts:
|
||||||
if not opt.secret:
|
if not opt.secret:
|
||||||
value = getattr(conf, opt.name)
|
value = getattr(conf, opt.name)
|
||||||
LOG.info('\t%-35s%s' % (opt.name + ': ',
|
LOG.info(_LI('\t%(name)-35s : %(value)s'),
|
||||||
six.text_type(value)))
|
{'name': opt.name, 'value': value})
|
||||||
|
|
||||||
if storage_protocol == 'iSCSI':
|
if storage_protocol == 'iSCSI':
|
||||||
value = getattr(conf, 'hitachi_group_request')
|
value = getattr(conf, 'hitachi_group_request')
|
||||||
LOG.info('\t%-35s%s' % ('hitachi_group_request: ',
|
LOG.info(_LI('\t%(request)-35s : %(value)s'),
|
||||||
six.text_type(value)))
|
{'request': 'hitachi_group_request', 'value': value})
|
||||||
|
|
||||||
def check_param(self):
|
def check_param(self):
|
||||||
conf = self.configuration
|
conf = self.configuration
|
||||||
|
@ -352,7 +353,7 @@ class HBSDCommon(object):
|
||||||
|
|
||||||
def delete_pair(self, ldev, all_split=True, is_vvol=None):
|
def delete_pair(self, ldev, all_split=True, is_vvol=None):
|
||||||
paired_info = self.command.get_paired_info(ldev)
|
paired_info = self.command.get_paired_info(ldev)
|
||||||
LOG.debug('paired_info: %s' % six.text_type(paired_info))
|
LOG.debug('paired_info: %s', paired_info)
|
||||||
pvol = paired_info['pvol']
|
pvol = paired_info['pvol']
|
||||||
svols = paired_info['svol']
|
svols = paired_info['svol']
|
||||||
driver = self.generated_from
|
driver = self.generated_from
|
||||||
|
@ -413,15 +414,13 @@ class HBSDCommon(object):
|
||||||
try:
|
try:
|
||||||
self.command.restart_pair_horcm()
|
self.command.restart_pair_horcm()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Failed to restart horcm: %s') %
|
LOG.warning(_LW('Failed to restart horcm: %s'), e)
|
||||||
six.text_type(e))
|
|
||||||
else:
|
else:
|
||||||
if (all_split or is_vvol) and restart:
|
if (all_split or is_vvol) and restart:
|
||||||
try:
|
try:
|
||||||
self.command.restart_pair_horcm()
|
self.command.restart_pair_horcm()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Failed to restart horcm: %s') %
|
LOG.warning(_LW('Failed to restart horcm: %s'), e)
|
||||||
six.text_type(e))
|
|
||||||
|
|
||||||
def copy_async_data(self, pvol, svol, is_vvol):
|
def copy_async_data(self, pvol, svol, is_vvol):
|
||||||
path_list = []
|
path_list = []
|
||||||
|
@ -442,9 +441,8 @@ class HBSDCommon(object):
|
||||||
try:
|
try:
|
||||||
driver.pair_terminate_connection(ldev)
|
driver.pair_terminate_connection(ldev)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(310, ldev=ldev,
|
||||||
310, ldev=ldev, reason=six.text_type(ex))
|
reason=ex))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def copy_sync_data(self, src_ldev, dest_ldev, size):
|
def copy_sync_data(self, src_ldev, dest_ldev, size):
|
||||||
src_vol = {'provider_location': six.text_type(src_ldev),
|
src_vol = {'provider_location': six.text_type(src_ldev),
|
||||||
|
@ -488,9 +486,8 @@ class HBSDCommon(object):
|
||||||
try:
|
try:
|
||||||
self.delete_ldev(svol, is_vvol)
|
self.delete_ldev(svol, is_vvol)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(313, ldev=svol,
|
||||||
313, ldev=svol, reason=six.text_type(ex))
|
reason=ex))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
return six.text_type(svol), type
|
return six.text_type(svol), type
|
||||||
|
|
||||||
|
@ -502,22 +499,21 @@ class HBSDCommon(object):
|
||||||
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
|
def create_ldev(self, size, ldev_range, pool_id, is_vvol):
|
||||||
LOG.debug('create start (normal)')
|
LOG.debug('create start (normal)')
|
||||||
for i in basic_lib.DEFAULT_TRY_RANGE:
|
for i in basic_lib.DEFAULT_TRY_RANGE:
|
||||||
LOG.debug('Try number: %(tries)s / %(max_tries)s' %
|
LOG.debug('Try number: %(tries)s / %(max_tries)s',
|
||||||
{'tries': i + 1,
|
{'tries': i + 1,
|
||||||
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
|
'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)})
|
||||||
new_ldev = self._get_unused_volume_num(ldev_range)
|
new_ldev = self._get_unused_volume_num(ldev_range)
|
||||||
try:
|
try:
|
||||||
self._add_ldev(new_ldev, size, pool_id, is_vvol)
|
self._add_ldev(new_ldev, size, pool_id, is_vvol)
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
msg = basic_lib.set_msg(312, resource='LDEV')
|
LOG.warning(basic_lib.set_msg(312, resource='LDEV'))
|
||||||
LOG.warning(msg)
|
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = basic_lib.output_err(636)
|
msg = basic_lib.output_err(636)
|
||||||
raise exception.HBSDError(message=msg)
|
raise exception.HBSDError(message=msg)
|
||||||
LOG.debug('create end (normal: %s)' % six.text_type(new_ldev))
|
LOG.debug('create end (normal: %s)', new_ldev)
|
||||||
self.init_volinfo(self.volume_info, new_ldev)
|
self.init_volinfo(self.volume_info, new_ldev)
|
||||||
return new_ldev
|
return new_ldev
|
||||||
|
|
||||||
|
@ -544,8 +540,8 @@ class HBSDCommon(object):
|
||||||
'metadata': volume_metadata}
|
'metadata': volume_metadata}
|
||||||
|
|
||||||
def delete_ldev(self, ldev, is_vvol):
|
def delete_ldev(self, ldev, is_vvol):
|
||||||
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)'
|
LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)',
|
||||||
% {'ldev': ldev, 'vvol': is_vvol})
|
{'ldev': ldev, 'vvol': is_vvol})
|
||||||
with self.pair_flock:
|
with self.pair_flock:
|
||||||
self.delete_pair(ldev)
|
self.delete_pair(ldev)
|
||||||
self.command.comm_delete_ldev(ldev, is_vvol)
|
self.command.comm_delete_ldev(ldev, is_vvol)
|
||||||
|
@ -553,15 +549,14 @@ class HBSDCommon(object):
|
||||||
if ldev in self.volume_info:
|
if ldev in self.volume_info:
|
||||||
self.volume_info.pop(ldev)
|
self.volume_info.pop(ldev)
|
||||||
LOG.debug('delete_ldev is finished '
|
LOG.debug('delete_ldev is finished '
|
||||||
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)'
|
'(LDEV: %(ldev)d, is_vvol: %(vvol)s)',
|
||||||
% {'ldev': ldev, 'vvol': is_vvol})
|
{'ldev': ldev, 'vvol': is_vvol})
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
ldev = self.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(304, method='delete_volume',
|
||||||
304, method='delete_volume', id=volume['id'])
|
id=volume['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
return
|
return
|
||||||
self.add_volinfo(ldev, volume['id'])
|
self.add_volinfo(ldev, volume['id'])
|
||||||
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
||||||
|
@ -576,9 +571,8 @@ class HBSDCommon(object):
|
||||||
with self.volinfo_lock:
|
with self.volinfo_lock:
|
||||||
if ldev in self.volume_info:
|
if ldev in self.volume_info:
|
||||||
self.volume_info.pop(ldev)
|
self.volume_info.pop(ldev)
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
305, type='volume', id=volume['id'])
|
305, type='volume', id=volume['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
except exception.HBSDBusy:
|
except exception.HBSDBusy:
|
||||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||||
finally:
|
finally:
|
||||||
|
@ -621,9 +615,8 @@ class HBSDCommon(object):
|
||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
ldev = self.get_ldev(snapshot)
|
ldev = self.get_ldev(snapshot)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
304, method='delete_snapshot', id=snapshot['id'])
|
304, method='delete_snapshot', id=snapshot['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
return
|
return
|
||||||
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
|
self.add_volinfo(ldev, id=snapshot['id'], type='snapshot')
|
||||||
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
if not self.volume_info[ldev]['in_use'].lock.acquire(False):
|
||||||
|
@ -638,9 +631,8 @@ class HBSDCommon(object):
|
||||||
with self.volinfo_lock:
|
with self.volinfo_lock:
|
||||||
if ldev in self.volume_info:
|
if ldev in self.volume_info:
|
||||||
self.volume_info.pop(ldev)
|
self.volume_info.pop(ldev)
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
305, type='snapshot', id=snapshot['id'])
|
305, type='snapshot', id=snapshot['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
except exception.HBSDBusy:
|
except exception.HBSDBusy:
|
||||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||||
finally:
|
finally:
|
||||||
|
@ -722,9 +714,8 @@ class HBSDCommon(object):
|
||||||
def output_backend_available_once(self):
|
def output_backend_available_once(self):
|
||||||
if self.output_first:
|
if self.output_first:
|
||||||
self.output_first = False
|
self.output_first = False
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
3, config_group=self.configuration.config_group)
|
3, config_group=self.configuration.config_group))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def update_volume_stats(self, storage_protocol):
|
def update_volume_stats(self, storage_protocol):
|
||||||
data = {}
|
data = {}
|
||||||
|
@ -740,8 +731,7 @@ class HBSDCommon(object):
|
||||||
total_gb, free_gb = self.command.comm_get_dp_pool(
|
total_gb, free_gb = self.command.comm_get_dp_pool(
|
||||||
self.configuration.hitachi_pool_id)
|
self.configuration.hitachi_pool_id)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LE('Failed to update volume status: %s') %
|
LOG.error(_LE('Failed to update volume status: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
data['total_capacity_gb'] = total_gb
|
data['total_capacity_gb'] = total_gb
|
||||||
|
@ -750,7 +740,7 @@ class HBSDCommon(object):
|
||||||
'reserved_percentage')
|
'reserved_percentage')
|
||||||
data['QoS_support'] = False
|
data['QoS_support'] = False
|
||||||
|
|
||||||
LOG.debug('Updating volume status (%s)' % data)
|
LOG.debug('Updating volume status (%s)', data)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@ -773,8 +763,7 @@ class HBSDCommon(object):
|
||||||
|
|
||||||
ldev = self._string2int(existing_ref.get('ldev'))
|
ldev = self._string2int(existing_ref.get('ldev'))
|
||||||
|
|
||||||
msg = basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)
|
LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev))
|
||||||
LOG.info(msg)
|
|
||||||
|
|
||||||
return {'provider_location': ldev}
|
return {'provider_location': ldev}
|
||||||
|
|
||||||
|
@ -833,8 +822,7 @@ class HBSDCommon(object):
|
||||||
except exception.HBSDBusy:
|
except exception.HBSDBusy:
|
||||||
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
|
raise exception.HBSDVolumeIsBusy(volume_name=volume['name'])
|
||||||
else:
|
else:
|
||||||
msg = basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)
|
LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev))
|
||||||
LOG.info(msg)
|
|
||||||
finally:
|
finally:
|
||||||
if ldev in self.volume_info:
|
if ldev in self.volume_info:
|
||||||
self.volume_info[ldev]['in_use'].lock.release()
|
self.volume_info[ldev]['in_use'].lock.release()
|
||||||
|
|
|
@ -25,7 +25,7 @@ from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LW
|
from cinder.i18n import _LI, _LW
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
import cinder.volume.driver
|
import cinder.volume.driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||||
|
@ -83,8 +83,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
for opt in volume_opts:
|
for opt in volume_opts:
|
||||||
if not opt.secret:
|
if not opt.secret:
|
||||||
value = getattr(self.configuration, opt.name)
|
value = getattr(self.configuration, opt.name)
|
||||||
LOG.info('\t%-35s%s' %
|
LOG.info(_LI('\t%(name)-35s : %(value)s'),
|
||||||
(opt.name + ': ', six.text_type(value)))
|
{'name': opt.name, 'value': value})
|
||||||
self.common.command.output_param_to_log(self.configuration)
|
self.common.command.output_param_to_log(self.configuration)
|
||||||
|
|
||||||
def _add_wwn(self, hgs, port, gid, wwns):
|
def _add_wwn(self, hgs, port, gid, wwns):
|
||||||
|
@ -94,7 +94,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
detected = self.common.command.is_detected(port, wwn)
|
detected = self.common.command.is_detected(port, wwn)
|
||||||
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
|
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
|
||||||
'detected': detected})
|
'detected': detected})
|
||||||
LOG.debug('Create host group for %s' % hgs)
|
LOG.debug('Create host group for %s', hgs)
|
||||||
|
|
||||||
def _add_lun(self, hostgroups, ldev):
|
def _add_lun(self, hostgroups, ldev):
|
||||||
if hostgroups is self.pair_hostgroups:
|
if hostgroups is self.pair_hostgroups:
|
||||||
|
@ -107,8 +107,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
try:
|
try:
|
||||||
self.common.command.comm_delete_lun(hostgroups, ldev)
|
self.common.command.comm_delete_lun(hostgroups, ldev)
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
msg = basic_lib.set_msg(301, ldev=ldev)
|
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def _get_hgname_gid(self, port, host_grp_name):
|
def _get_hgname_gid(self, port, host_grp_name):
|
||||||
return self.common.command.get_hgname_gid(port, host_grp_name)
|
return self.common.command.get_hgname_gid(port, host_grp_name)
|
||||||
|
@ -127,9 +126,9 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
def _fill_group(self, hgs, port, host_grp_name, wwns):
|
def _fill_group(self, hgs, port, host_grp_name, wwns):
|
||||||
added_hostgroup = False
|
added_hostgroup = False
|
||||||
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
|
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
|
||||||
'name: %(name)s wwns: %(wwns)s)'
|
'name: %(name)s wwns: %(wwns)s)',
|
||||||
% {'hgs': hgs, 'port': port,
|
{'hgs': hgs, 'port': port,
|
||||||
'name': host_grp_name, 'wwns': wwns})
|
'name': host_grp_name, 'wwns': wwns})
|
||||||
gid = self._get_hgname_gid(port, host_grp_name)
|
gid = self._get_hgname_gid(port, host_grp_name)
|
||||||
if gid is None:
|
if gid is None:
|
||||||
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
|
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
|
||||||
|
@ -139,13 +138,12 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
added_hostgroup = True
|
added_hostgroup = True
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
gid = None
|
gid = None
|
||||||
msg = basic_lib.set_msg(312, resource='GID')
|
LOG.warning(basic_lib.set_msg(312, resource='GID'))
|
||||||
LOG.warning(msg)
|
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
LOG.debug('Completed to add host target'
|
LOG.debug('Completed to add host target'
|
||||||
'(port: %(port)s gid: %(gid)d)'
|
'(port: %(port)s gid: %(gid)d)',
|
||||||
% {'port': port, 'gid': gid})
|
{'port': port, 'gid': gid})
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = basic_lib.output_err(641)
|
msg = basic_lib.output_err(641)
|
||||||
|
@ -180,15 +178,12 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
try:
|
try:
|
||||||
self._fill_group(hgs, port, host_grp_name, wwns_copy)
|
self._fill_group(hgs, port, host_grp_name, wwns_copy)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to add host group: %s') %
|
LOG.warning(_LW('Failed to add host group: %s'), ex)
|
||||||
six.text_type(ex))
|
LOG.warning(basic_lib.set_msg(
|
||||||
msg = basic_lib.set_msg(
|
308, port=port, name=host_grp_name))
|
||||||
308, port=port, name=host_grp_name)
|
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
if not hgs:
|
if not hgs:
|
||||||
msg = basic_lib.output_err(649)
|
raise exception.HBSDError(message=basic_lib.output_err(649))
|
||||||
raise exception.HBSDError(message=msg)
|
|
||||||
|
|
||||||
def add_hostgroup_pair(self, pair_hostgroups):
|
def add_hostgroup_pair(self, pair_hostgroups):
|
||||||
if self.configuration.hitachi_unit_name:
|
if self.configuration.hitachi_unit_name:
|
||||||
|
@ -232,7 +227,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
if 'wwpns' not in properties:
|
if 'wwpns' not in properties:
|
||||||
msg = basic_lib.output_err(650, resource='HBA')
|
msg = basic_lib.output_err(650, resource='HBA')
|
||||||
raise exception.HBSDError(message=msg)
|
raise exception.HBSDError(message=msg)
|
||||||
LOG.debug("wwpns: %s" % properties['wwpns'])
|
LOG.debug("wwpns: %s", properties['wwpns'])
|
||||||
|
|
||||||
hostgroups = []
|
hostgroups = []
|
||||||
security_ports = self._get_hostgroup_info(
|
security_ports = self._get_hostgroup_info(
|
||||||
|
@ -254,9 +249,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
|
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
306, port=port, gid=gid, name=host_grp_name)
|
306, port=port, gid=gid, name=host_grp_name))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def _check_volume_mapping(self, hostgroup):
|
def _check_volume_mapping(self, hostgroup):
|
||||||
port = hostgroup['port']
|
port = hostgroup['port']
|
||||||
|
@ -372,8 +366,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
|
|
||||||
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
||||||
LOG.debug("Call _initialize_connection "
|
LOG.debug("Call _initialize_connection "
|
||||||
"(config_group: %(group)s ldev: %(ldev)d)"
|
"(config_group: %(group)s ldev: %(ldev)d)",
|
||||||
% {'group': self.configuration.config_group, 'ldev': ldev})
|
{'group': self.configuration.config_group, 'ldev': ldev})
|
||||||
if src_hgs is self.pair_hostgroups:
|
if src_hgs is self.pair_hostgroups:
|
||||||
hostgroups = src_hgs
|
hostgroups = src_hgs
|
||||||
else:
|
else:
|
||||||
|
@ -387,8 +381,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
try:
|
try:
|
||||||
self._add_lun(hostgroups, ldev)
|
self._add_lun(hostgroups, ldev)
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
msg = basic_lib.set_msg(311, ldev=ldev)
|
LOG.warning(basic_lib.set_msg(311, ldev=ldev))
|
||||||
LOG.warning(msg)
|
|
||||||
for i in range(self.max_hostgroups + 1):
|
for i in range(self.max_hostgroups + 1):
|
||||||
self.pair_hostnum += 1
|
self.pair_hostnum += 1
|
||||||
pair_hostgroups = []
|
pair_hostgroups = []
|
||||||
|
@ -419,18 +412,18 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
self.common.volume_info[ldev]['in_use']:
|
self.common.volume_info[ldev]['in_use']:
|
||||||
hostgroups = self._initialize_connection(ldev, connector)
|
hostgroups = self._initialize_connection(ldev, connector)
|
||||||
properties = self._get_properties(volume, hostgroups)
|
properties = self._get_properties(volume, hostgroups)
|
||||||
LOG.debug('Initialize volume_info: %s'
|
LOG.debug('Initialize volume_info: %s',
|
||||||
% self.common.volume_info)
|
self.common.volume_info)
|
||||||
|
|
||||||
LOG.debug('HFCDrv: properties=%s' % properties)
|
LOG.debug('HFCDrv: properties=%s', properties)
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': 'fibre_channel',
|
'driver_volume_type': 'fibre_channel',
|
||||||
'data': properties
|
'data': properties
|
||||||
}
|
}
|
||||||
|
|
||||||
def _terminate_connection(self, ldev, connector, src_hgs):
|
def _terminate_connection(self, ldev, connector, src_hgs):
|
||||||
LOG.debug("Call _terminate_connection(config_group: %s)"
|
LOG.debug("Call _terminate_connection(config_group: %s)",
|
||||||
% self.configuration.config_group)
|
self.configuration.config_group)
|
||||||
hostgroups = src_hgs[:]
|
hostgroups = src_hgs[:]
|
||||||
self._delete_lun(hostgroups, ldev)
|
self._delete_lun(hostgroups, ldev)
|
||||||
LOG.debug("*** _terminate_ ***")
|
LOG.debug("*** _terminate_ ***")
|
||||||
|
@ -440,8 +433,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
self.do_setup_status.wait()
|
self.do_setup_status.wait()
|
||||||
ldev = self.common.get_ldev(volume)
|
ldev = self.common.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = basic_lib.set_msg(302, volume_id=volume['id'])
|
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if 'wwpns' not in connector:
|
if 'wwpns' not in connector:
|
||||||
|
@ -461,7 +453,7 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
self._terminate_connection(ldev, connector, hostgroups)
|
self._terminate_connection(ldev, connector, hostgroups)
|
||||||
properties = self._get_properties(volume, hostgroups,
|
properties = self._get_properties(volume, hostgroups,
|
||||||
terminate=True)
|
terminate=True)
|
||||||
LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
|
LOG.debug('Terminate volume_info: %s', self.common.volume_info)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': 'fibre_channel',
|
'driver_volume_type': 'fibre_channel',
|
||||||
|
|
|
@ -27,7 +27,7 @@ from oslo_utils import units
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LW
|
from cinder.i18n import _LE, _LI, _LW
|
||||||
from cinder.openstack.common import loopingcall
|
from cinder.openstack.common import loopingcall
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||||
|
@ -454,7 +454,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
target_wwns[port] = line[10]
|
target_wwns[port] = line[10]
|
||||||
LOG.debug('target wwns: %s' % target_wwns)
|
LOG.debug('target wwns: %s', target_wwns)
|
||||||
return target_wwns
|
return target_wwns
|
||||||
|
|
||||||
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
|
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
|
||||||
|
@ -584,8 +584,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
|
||||||
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
|
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
|
||||||
not self.comm_get_snapshot(ldev) or
|
not self.comm_get_snapshot(ldev) or
|
||||||
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
|
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
|
||||||
msg = basic_lib.set_msg(310, ldev=ldev, reason=stderr)
|
LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
if time.time() - start >= LUN_DELETE_WAITTIME:
|
if time.time() - start >= LUN_DELETE_WAITTIME:
|
||||||
msg = basic_lib.output_err(
|
msg = basic_lib.output_err(
|
||||||
|
@ -790,9 +789,8 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
|
||||||
if is_once:
|
if is_once:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
314, ldev=ldev, lun=lun, port=port, id=gid)
|
314, ldev=ldev, lun=lun, port=port, id=gid))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
self.comm_unlock()
|
self.comm_unlock()
|
||||||
|
@ -885,8 +883,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
|
||||||
self.comm_lock()
|
self.comm_lock()
|
||||||
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
|
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
|
||||||
if ret:
|
if ret:
|
||||||
msg = basic_lib.set_msg(315, ldev=ldev, reason=stderr)
|
LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr))
|
||||||
LOG.warning(msg)
|
|
||||||
finally:
|
finally:
|
||||||
self.comm_unlock()
|
self.comm_unlock()
|
||||||
|
|
||||||
|
@ -896,9 +893,8 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
|
||||||
def discard_zero_page(self, ldev):
|
def discard_zero_page(self, ldev):
|
||||||
try:
|
try:
|
||||||
self.comm_modify_ldev(ldev)
|
self.comm_modify_ldev(ldev)
|
||||||
except Exception as e:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to discard zero page: %s') %
|
LOG.warning(_LW('Failed to discard zero page: %s'), ex)
|
||||||
six.text_type(e))
|
|
||||||
|
|
||||||
@storage_synchronized
|
@storage_synchronized
|
||||||
def comm_add_snapshot(self, pvol, svol):
|
def comm_add_snapshot(self, pvol, svol):
|
||||||
|
@ -1396,8 +1392,7 @@ HORCM_CMD
|
||||||
[basic_lib.PSUS], timeout,
|
[basic_lib.PSUS], timeout,
|
||||||
interval, check_svol=True)
|
interval, check_svol=True)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to create pair: %s') %
|
LOG.warning(_LW('Failed to create pair: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.comm_pairsplit(copy_group, ldev_name)
|
self.comm_pairsplit(copy_group, ldev_name)
|
||||||
|
@ -1406,23 +1401,20 @@ HORCM_CMD
|
||||||
[basic_lib.SMPL], timeout,
|
[basic_lib.SMPL], timeout,
|
||||||
self.conf.hitachi_async_copy_check_interval)
|
self.conf.hitachi_async_copy_check_interval)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to create pair: %s') %
|
LOG.warning(_LW('Failed to create pair: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
if self.is_smpl(copy_group, ldev_name):
|
if self.is_smpl(copy_group, ldev_name):
|
||||||
try:
|
try:
|
||||||
self.delete_pair_config(pvol, svol, copy_group,
|
self.delete_pair_config(pvol, svol, copy_group,
|
||||||
ldev_name)
|
ldev_name)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to create pair: %s') %
|
LOG.warning(_LW('Failed to create pair: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
if restart:
|
if restart:
|
||||||
try:
|
try:
|
||||||
self.restart_pair_horcm()
|
self.restart_pair_horcm()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to restart horcm: %s') %
|
LOG.warning(_LW('Failed to restart horcm: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.check_snap_count(pvol)
|
self.check_snap_count(pvol)
|
||||||
|
@ -1440,8 +1432,7 @@ HORCM_CMD
|
||||||
pvol, svol, [basic_lib.SMPL], timeout,
|
pvol, svol, [basic_lib.SMPL], timeout,
|
||||||
self.conf.hitachi_async_copy_check_interval)
|
self.conf.hitachi_async_copy_check_interval)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Failed to create pair: %s') %
|
LOG.warning(_LW('Failed to create pair: %s'), ex)
|
||||||
six.text_type(ex))
|
|
||||||
|
|
||||||
def delete_pair(self, pvol, svol, is_vvol):
|
def delete_pair(self, pvol, svol, is_vvol):
|
||||||
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
|
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
|
||||||
|
@ -1480,8 +1471,8 @@ HORCM_CMD
|
||||||
for opt in volume_opts:
|
for opt in volume_opts:
|
||||||
if not opt.secret:
|
if not opt.secret:
|
||||||
value = getattr(conf, opt.name)
|
value = getattr(conf, opt.name)
|
||||||
LOG.info('\t%-35s%s' % (opt.name + ': ',
|
LOG.info(_LI('\t%(name)-35s : %(value)s'),
|
||||||
six.text_type(value)))
|
{'name': opt.name, 'value': value})
|
||||||
|
|
||||||
def create_lock_file(self):
|
def create_lock_file(self):
|
||||||
inst = self.conf.hitachi_horcm_numbers[0]
|
inst = self.conf.hitachi_horcm_numbers[0]
|
||||||
|
|
|
@ -24,7 +24,7 @@ from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE
|
from cinder.i18n import _LE, _LI
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
import cinder.volume.driver
|
import cinder.volume.driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
|
||||||
|
@ -70,8 +70,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
self.configuration.append_config_values(volume_opts)
|
self.configuration.append_config_values(volume_opts)
|
||||||
if (self.configuration.hitachi_auth_method and
|
if (self.configuration.hitachi_auth_method and
|
||||||
self.configuration.hitachi_auth_method not in CHAP_METHOD):
|
self.configuration.hitachi_auth_method not in CHAP_METHOD):
|
||||||
msg = basic_lib.output_err(601, param='hitachi_auth_method')
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(601, param='hitachi_auth_method'))
|
||||||
if self.configuration.hitachi_auth_method == 'None':
|
if self.configuration.hitachi_auth_method == 'None':
|
||||||
self.configuration.hitachi_auth_method = None
|
self.configuration.hitachi_auth_method = None
|
||||||
for opt in volume_opts:
|
for opt in volume_opts:
|
||||||
|
@ -84,8 +84,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
except exception.HBSDError:
|
except exception.HBSDError:
|
||||||
raise
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = basic_lib.output_err(601, param=six.text_type(ex))
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(601, param=six.text_type(ex)))
|
||||||
|
|
||||||
def output_param_to_log(self):
|
def output_param_to_log(self):
|
||||||
lock = basic_lib.get_process_lock(self.common.system_lock_file)
|
lock = basic_lib.get_process_lock(self.common.system_lock_file)
|
||||||
|
@ -95,15 +95,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
for opt in volume_opts:
|
for opt in volume_opts:
|
||||||
if not opt.secret:
|
if not opt.secret:
|
||||||
value = getattr(self.configuration, opt.name)
|
value = getattr(self.configuration, opt.name)
|
||||||
LOG.info('\t%-35s%s' % (opt.name + ': ',
|
LOG.info(_LI('\t%(name)-35s : %(value)s'),
|
||||||
six.text_type(value)))
|
{'name': opt.name, 'value': value})
|
||||||
|
|
||||||
def _delete_lun_iscsi(self, hostgroups, ldev):
|
def _delete_lun_iscsi(self, hostgroups, ldev):
|
||||||
try:
|
try:
|
||||||
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
|
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
msg = basic_lib.set_msg(301, ldev=ldev)
|
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def _add_target(self, hostgroups, ldev):
|
def _add_target(self, hostgroups, ldev):
|
||||||
self.common.add_lun('autargetmap', hostgroups, ldev)
|
self.common.add_lun('autargetmap', hostgroups, ldev)
|
||||||
|
@ -111,7 +110,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
def _add_initiator(self, hgs, port, gid, host_iqn):
|
def _add_initiator(self, hgs, port, gid, host_iqn):
|
||||||
self.common.command.comm_add_initiator(port, gid, host_iqn)
|
self.common.command.comm_add_initiator(port, gid, host_iqn)
|
||||||
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
|
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
|
||||||
LOG.debug("Create iSCSI target for %s" % hgs)
|
LOG.debug("Create iSCSI target for %s", hgs)
|
||||||
|
|
||||||
def _get_unused_gid_iscsi(self, port):
|
def _get_unused_gid_iscsi(self, port):
|
||||||
group_range = self.configuration.hitachi_group_range
|
group_range = self.configuration.hitachi_group_range
|
||||||
|
@ -123,16 +122,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
|
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
|
||||||
port, target_no, target_alias)
|
port, target_no, target_alias)
|
||||||
if ret:
|
if ret:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
307, port=port, tno=target_no, alias=target_alias)
|
307, port=port, tno=target_no, alias=target_alias))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def _delete_chap_user(self, port):
|
def _delete_chap_user(self, port):
|
||||||
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
|
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
|
||||||
if ret:
|
if ret:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
303, user=self.configuration.hitachi_auth_user)
|
303, user=self.configuration.hitachi_auth_user))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
|
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
|
||||||
return self.common.command.comm_get_hostgroup_info_iscsi(
|
return self.common.command.comm_get_hostgroup_info_iscsi(
|
||||||
|
@ -147,8 +144,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
hostgroup['ip_addr'] = ip_addr
|
hostgroup['ip_addr'] = ip_addr
|
||||||
hostgroup['ip_port'] = ip_port
|
hostgroup['ip_port'] = ip_port
|
||||||
hostgroup['target_iqn'] = target_iqn
|
hostgroup['target_iqn'] = target_iqn
|
||||||
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s"
|
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s",
|
||||||
% {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
|
{'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
|
||||||
|
|
||||||
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
|
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
|
||||||
for port in ports:
|
for port in ports:
|
||||||
|
@ -156,7 +153,7 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
added_user = False
|
added_user = False
|
||||||
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
|
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
|
||||||
'target_iqn: %(tiqn)s target_alias: %(alias)s '
|
'target_iqn: %(tiqn)s target_alias: %(alias)s '
|
||||||
'add_iqn: %(aiqn)s)' %
|
'add_iqn: %(aiqn)s)',
|
||||||
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
|
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
|
||||||
'alias': target_alias, 'aiqn': add_iqn})
|
'alias': target_alias, 'aiqn': add_iqn})
|
||||||
gid = self.common.command.get_gid_from_targetiqn(
|
gid = self.common.command.get_gid_from_targetiqn(
|
||||||
|
@ -170,22 +167,20 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
port, gid, target_alias, target_iqn)
|
port, gid, target_alias, target_iqn)
|
||||||
added_hostgroup = True
|
added_hostgroup = True
|
||||||
except exception.HBSDNotFound:
|
except exception.HBSDNotFound:
|
||||||
msg = basic_lib.set_msg(312, resource='GID')
|
LOG.warning(basic_lib.set_msg(312, resource='GID'))
|
||||||
LOG.warning(msg)
|
|
||||||
continue
|
continue
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
309, port=port, alias=target_alias,
|
309, port=port, alias=target_alias,
|
||||||
reason=six.text_type(ex))
|
reason=ex))
|
||||||
LOG.warning(msg)
|
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
LOG.debug('Completed to add target'
|
LOG.debug('Completed to add target'
|
||||||
'(port: %(port)s gid: %(gid)d)'
|
'(port: %(port)s gid: %(gid)d)',
|
||||||
% {'port': port, 'gid': gid})
|
{'port': port, 'gid': gid})
|
||||||
break
|
break
|
||||||
if gid is None:
|
if gid is None:
|
||||||
LOG.error(_LE('Failed to add target(port: %s)') % port)
|
LOG.error(_LE('Failed to add target(port: %s)'), port)
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
if added_hostgroup:
|
if added_hostgroup:
|
||||||
|
@ -196,9 +191,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
port, target_alias)
|
port, target_alias)
|
||||||
self._add_initiator(hgs, port, gid, add_iqn)
|
self._add_initiator(hgs, port, gid, add_iqn)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
316, port=port, reason=six.text_type(ex))
|
316, port=port, reason=ex))
|
||||||
LOG.warning(msg)
|
|
||||||
if added_hostgroup:
|
if added_hostgroup:
|
||||||
if added_user:
|
if added_user:
|
||||||
self._delete_chap_user(port)
|
self._delete_chap_user(port)
|
||||||
|
@ -227,15 +221,14 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
|
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
|
||||||
target_alias, master_iqn)
|
target_alias, master_iqn)
|
||||||
if not hgs:
|
if not hgs:
|
||||||
msg = basic_lib.output_err(649)
|
raise exception.HBSDError(message=basic_lib.output_err(649))
|
||||||
raise exception.HBSDError(message=msg)
|
|
||||||
|
|
||||||
def add_hostgroup(self):
|
def add_hostgroup(self):
|
||||||
properties = utils.brick_get_connector_properties()
|
properties = utils.brick_get_connector_properties()
|
||||||
if 'initiator' not in properties:
|
if 'initiator' not in properties:
|
||||||
msg = basic_lib.output_err(650, resource='HBA')
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(650, resource='HBA'))
|
||||||
LOG.debug("initiator: %s" % properties['initiator'])
|
LOG.debug("initiator: %s", properties['initiator'])
|
||||||
hostgroups = []
|
hostgroups = []
|
||||||
security_ports = self._get_hostgroup_info_iscsi(
|
security_ports = self._get_hostgroup_info_iscsi(
|
||||||
hostgroups, properties['initiator'])
|
hostgroups, properties['initiator'])
|
||||||
|
@ -325,8 +318,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
|
|
||||||
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
def _initialize_connection(self, ldev, connector, src_hgs=None):
|
||||||
LOG.debug("Call _initialize_connection "
|
LOG.debug("Call _initialize_connection "
|
||||||
"(config_group: %(group)s ldev: %(ldev)d)"
|
"(config_group: %(group)s ldev: %(ldev)d)",
|
||||||
% {'group': self.configuration.config_group, 'ldev': ldev})
|
{'group': self.configuration.config_group, 'ldev': ldev})
|
||||||
if src_hgs:
|
if src_hgs:
|
||||||
hostgroups = src_hgs[:]
|
hostgroups = src_hgs[:]
|
||||||
else:
|
else:
|
||||||
|
@ -344,26 +337,26 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
self.do_setup_status.wait()
|
self.do_setup_status.wait()
|
||||||
ldev = self.common.get_ldev(volume)
|
ldev = self.common.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = basic_lib.output_err(619, volume_id=volume['id'])
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(619, volume_id=volume['id']))
|
||||||
self.common.add_volinfo(ldev, volume['id'])
|
self.common.add_volinfo(ldev, volume['id'])
|
||||||
with self.common.volume_info[ldev]['lock'],\
|
with self.common.volume_info[ldev]['lock'],\
|
||||||
self.common.volume_info[ldev]['in_use']:
|
self.common.volume_info[ldev]['in_use']:
|
||||||
hostgroups = self._initialize_connection(ldev, connector)
|
hostgroups = self._initialize_connection(ldev, connector)
|
||||||
protocol = 'iscsi'
|
protocol = 'iscsi'
|
||||||
properties = self._get_properties(volume, hostgroups)
|
properties = self._get_properties(volume, hostgroups)
|
||||||
LOG.debug('Initialize volume_info: %s'
|
LOG.debug('Initialize volume_info: %s',
|
||||||
% self.common.volume_info)
|
self.common.volume_info)
|
||||||
|
|
||||||
LOG.debug('HFCDrv: properties=%s' % properties)
|
LOG.debug('HFCDrv: properties=%s', properties)
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': protocol,
|
'driver_volume_type': protocol,
|
||||||
'data': properties
|
'data': properties
|
||||||
}
|
}
|
||||||
|
|
||||||
def _terminate_connection(self, ldev, connector, src_hgs):
|
def _terminate_connection(self, ldev, connector, src_hgs):
|
||||||
LOG.debug("Call _terminate_connection(config_group: %s)"
|
LOG.debug("Call _terminate_connection(config_group: %s)",
|
||||||
% self.configuration.config_group)
|
self.configuration.config_group)
|
||||||
hostgroups = src_hgs[:]
|
hostgroups = src_hgs[:]
|
||||||
self._delete_lun_iscsi(hostgroups, ldev)
|
self._delete_lun_iscsi(hostgroups, ldev)
|
||||||
|
|
||||||
|
@ -373,20 +366,18 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
self.do_setup_status.wait()
|
self.do_setup_status.wait()
|
||||||
ldev = self.common.get_ldev(volume)
|
ldev = self.common.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = basic_lib.set_msg(302, volume_id=volume['id'])
|
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
|
||||||
LOG.warning(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if 'initiator' not in connector:
|
if 'initiator' not in connector:
|
||||||
msg = basic_lib.output_err(650, resource='HBA')
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(650, resource='HBA'))
|
||||||
|
|
||||||
hostgroups = []
|
hostgroups = []
|
||||||
self._get_hostgroup_info_iscsi(hostgroups,
|
self._get_hostgroup_info_iscsi(hostgroups,
|
||||||
connector['initiator'])
|
connector['initiator'])
|
||||||
if not hostgroups:
|
if not hostgroups:
|
||||||
msg = basic_lib.output_err(649)
|
raise exception.HBSDError(message=basic_lib.output_err(649))
|
||||||
raise exception.HBSDError(message=msg)
|
|
||||||
|
|
||||||
self.common.add_volinfo(ldev, volume['id'])
|
self.common.add_volinfo(ldev, volume['id'])
|
||||||
with self.common.volume_info[ldev]['lock'],\
|
with self.common.volume_info[ldev]['lock'],\
|
||||||
|
@ -412,8 +403,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
self.do_setup_status.wait()
|
self.do_setup_status.wait()
|
||||||
if volume['volume_attachment']:
|
if volume['volume_attachment']:
|
||||||
desc = 'volume %s' % volume['id']
|
desc = 'volume %s' % volume['id']
|
||||||
msg = basic_lib.output_err(660, desc=desc)
|
raise exception.HBSDError(
|
||||||
raise exception.HBSDError(message=msg)
|
message=basic_lib.output_err(660, desc=desc))
|
||||||
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
|
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
|
||||||
image_service,
|
image_service,
|
||||||
image_meta)
|
image_meta)
|
||||||
|
|
|
@ -143,8 +143,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
if int(line[3]) == ldev:
|
if int(line[3]) == ldev:
|
||||||
hlu = int(line[2])
|
hlu = int(line[2])
|
||||||
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
|
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
|
||||||
'(hlun: %(hlu)d)')
|
'(hlun: %(hlu)d)'),
|
||||||
% {'ldev': ldev, 'hlu': hlu})
|
{'ldev': ldev, 'hlu': hlu})
|
||||||
return hlu
|
return hlu
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -295,7 +295,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
else:
|
else:
|
||||||
target_wwns[port] = line[3]
|
target_wwns[port] = line[3]
|
||||||
|
|
||||||
LOG.debug('target wwns: %s' % target_wwns)
|
LOG.debug('target wwns: %s', target_wwns)
|
||||||
return target_wwns
|
return target_wwns
|
||||||
|
|
||||||
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
|
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
|
||||||
|
@ -379,7 +379,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
no_lun_cnt = 0
|
no_lun_cnt = 0
|
||||||
deleted_hostgroups = []
|
deleted_hostgroups = []
|
||||||
for hostgroup in hostgroups:
|
for hostgroup in hostgroups:
|
||||||
LOG.debug('comm_delete_lun: hostgroup is %s' % hostgroup)
|
LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
|
||||||
port = hostgroup['port']
|
port = hostgroup['port']
|
||||||
gid = hostgroup['gid']
|
gid = hostgroup['gid']
|
||||||
ctl_no = port[0]
|
ctl_no = port[0]
|
||||||
|
@ -423,7 +423,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
|
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
|
||||||
|
|
||||||
deleted_hostgroups.append({'port': port, 'gid': gid})
|
deleted_hostgroups.append({'port': port, 'gid': gid})
|
||||||
LOG.debug('comm_delete_lun is over (%d)' % lun)
|
LOG.debug('comm_delete_lun is over (%d)', lun)
|
||||||
|
|
||||||
def comm_delete_lun(self, hostgroups, ldev):
|
def comm_delete_lun(self, hostgroups, ldev):
|
||||||
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
|
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
|
||||||
|
@ -554,9 +554,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
if is_once:
|
if is_once:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = basic_lib.set_msg(
|
LOG.warning(basic_lib.set_msg(
|
||||||
314, ldev=ldev, lun=hlu, port=port, id=gid)
|
314, ldev=ldev, lun=hlu, port=port, id=gid))
|
||||||
LOG.warning(msg)
|
|
||||||
|
|
||||||
if not is_ok:
|
if not is_ok:
|
||||||
if stderr:
|
if stderr:
|
||||||
|
@ -680,8 +679,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
if added_flag:
|
if added_flag:
|
||||||
_ret, _stdout, _stderr = self.delete_chap_user(port)
|
_ret, _stdout, _stderr = self.delete_chap_user(port)
|
||||||
if _ret:
|
if _ret:
|
||||||
msg = basic_lib.set_msg(303, user=auth_username)
|
LOG.warning(basic_lib.set_msg(
|
||||||
LOG.warning(msg)
|
303, user=auth_username))
|
||||||
|
|
||||||
msg = basic_lib.output_err(
|
msg = basic_lib.output_err(
|
||||||
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
|
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
|
||||||
|
@ -773,8 +772,8 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
|
||||||
gid = int(shlex.split(line)[0][0:3])
|
gid = int(shlex.split(line)[0][0:3])
|
||||||
hostgroups.append(
|
hostgroups.append(
|
||||||
{'port': port, 'gid': gid, 'detected': True})
|
{'port': port, 'gid': gid, 'detected': True})
|
||||||
LOG.debug('Find port=%(port)s gid=%(gid)d'
|
LOG.debug('Find port=%(port)s gid=%(gid)d',
|
||||||
% {'port': port, 'gid': gid})
|
{'port': port, 'gid': gid})
|
||||||
if port not in security_ports:
|
if port not in security_ports:
|
||||||
security_ports.append(port)
|
security_ports.append(port)
|
||||||
|
|
||||||
|
|
|
@ -62,15 +62,15 @@ class HuaweiVolumeDriver(object):
|
||||||
conf_file = self.configuration.cinder_huawei_conf_file
|
conf_file = self.configuration.cinder_huawei_conf_file
|
||||||
(product, protocol) = self._get_conf_info(conf_file)
|
(product, protocol) = self._get_conf_info(conf_file)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('_instantiate_driver: Loading %(protocol)s driver for '
|
||||||
'_instantiate_driver: Loading %(protocol)s driver for '
|
'Huawei OceanStor %(product)s series storage arrays.'),
|
||||||
'Huawei OceanStor %(product)s series storage arrays.')
|
{'protocol': protocol,
|
||||||
% {'protocol': protocol,
|
'product': product})
|
||||||
'product': product})
|
|
||||||
# Map HVS to 18000
|
# Map HVS to 18000
|
||||||
if product in MAPPING:
|
if product in MAPPING:
|
||||||
LOG.warn(_LW("Product name %s is deprecated, update your "
|
LOG.warning(_LW("Product name %s is deprecated, update your "
|
||||||
"configuration to the new product name."), product)
|
"configuration to the new product name."),
|
||||||
|
product)
|
||||||
product = MAPPING[product]
|
product = MAPPING[product]
|
||||||
|
|
||||||
driver_module = self._product[product]
|
driver_module = self._product[product]
|
||||||
|
|
|
@ -39,8 +39,8 @@ def parse_xml_file(filepath):
|
||||||
root = tree.getroot()
|
root = tree.getroot()
|
||||||
return root
|
return root
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
LOG.error(_LE('parse_xml_file: %s') % err)
|
LOG.error(_LE('parse_xml_file: %s'), err)
|
||||||
raise err
|
raise
|
||||||
|
|
||||||
|
|
||||||
def get_xml_item(xml_root, item):
|
def get_xml_item(xml_root, item):
|
||||||
|
@ -127,7 +127,7 @@ def get_conf_host_os_type(host_ip, config):
|
||||||
if not host_os:
|
if not host_os:
|
||||||
host_os = os_type['Linux'] # default os type
|
host_os = os_type['Linux'] # default os type
|
||||||
|
|
||||||
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.'
|
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.',
|
||||||
% {'ip': host_ip, 'os': host_os})
|
{'ip': host_ip, 'os': host_os})
|
||||||
|
|
||||||
return host_os
|
return host_os
|
||||||
|
|
|
@ -85,14 +85,13 @@ class RestCommon(object):
|
||||||
'res': res})
|
'res': res})
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.error(_LE('\nBad response from server: %s.') % err)
|
LOG.error(_LE('\nBad response from server: %s.'), err)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
try:
|
try:
|
||||||
res_json = json.loads(res)
|
res_json = json.loads(res)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
err_msg = (_LE('JSON transfer error: %s.') % err)
|
LOG.error(_LE('JSON transfer error: %s.'), err)
|
||||||
LOG.error(err_msg)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return res_json
|
return res_json
|
||||||
|
@ -165,10 +164,8 @@ class RestCommon(object):
|
||||||
volume_description = volume['name']
|
volume_description = volume['name']
|
||||||
volume_size = self._get_volume_size(volume)
|
volume_size = self._get_volume_size(volume)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('Create Volume: %(volume)s Size: %(size)s.'),
|
||||||
'Create Volume: %(volume)s Size: %(size)s.')
|
{'volume': volume_name, 'size': volume_size})
|
||||||
% {'volume': volume_name,
|
|
||||||
'size': volume_size})
|
|
||||||
|
|
||||||
params = self._get_lun_conf_params()
|
params = self._get_lun_conf_params()
|
||||||
params['pool_id'] = poolinfo['ID']
|
params['pool_id'] = poolinfo['ID']
|
||||||
|
@ -234,8 +231,8 @@ class RestCommon(object):
|
||||||
|
|
||||||
name = self._encode_name(volume['id'])
|
name = self._encode_name(volume['id'])
|
||||||
lun_id = volume.get('provider_location', None)
|
lun_id = volume.get('provider_location', None)
|
||||||
LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.')
|
LOG.info(_LI('Delete Volume: %(name)s array lun id: %(lun_id)s.'),
|
||||||
% {'name': name, 'lun_id': lun_id})
|
{'name': name, 'lun_id': lun_id})
|
||||||
if lun_id:
|
if lun_id:
|
||||||
if self._check_lun_exist(lun_id) is True:
|
if self._check_lun_exist(lun_id) is True:
|
||||||
# Get qos_id by lun_id.
|
# Get qos_id by lun_id.
|
||||||
|
@ -278,7 +275,7 @@ class RestCommon(object):
|
||||||
tree = ET.parse(filename)
|
tree = ET.parse(filename)
|
||||||
root = tree.getroot()
|
root = tree.getroot()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.error(_LE('_read_xml: %s') % err)
|
LOG.error(_LE('_read_xml: %s'), err)
|
||||||
raise
|
raise
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
@ -341,11 +338,10 @@ class RestCommon(object):
|
||||||
snapshot_description = snapshot['id']
|
snapshot_description = snapshot['id']
|
||||||
volume_name = self._encode_name(snapshot['volume_id'])
|
volume_name = self._encode_name(snapshot['volume_id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('_create_snapshot:snapshot name: %(snapshot)s, '
|
||||||
'_create_snapshot:snapshot name: %(snapshot)s, '
|
'volume name: %(volume)s.'),
|
||||||
'volume name: %(volume)s.')
|
{'snapshot': snapshot_name,
|
||||||
% {'snapshot': snapshot_name,
|
'volume': volume_name})
|
||||||
'volume': volume_name})
|
|
||||||
|
|
||||||
lun_id = self._get_volume_by_name(volume_name)
|
lun_id = self._get_volume_by_name(volume_name)
|
||||||
if lun_id is None:
|
if lun_id is None:
|
||||||
|
@ -404,11 +400,10 @@ class RestCommon(object):
|
||||||
snapshot_name = self._encode_name(snapshot['id'])
|
snapshot_name = self._encode_name(snapshot['id'])
|
||||||
volume_name = self._encode_name(snapshot['volume_id'])
|
volume_name = self._encode_name(snapshot['volume_id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('stop_snapshot:snapshot name: %(snapshot)s, '
|
||||||
'stop_snapshot:snapshot name: %(snapshot)s, '
|
'volume name: %(volume)s.'),
|
||||||
'volume name: %(volume)s.')
|
{'snapshot': snapshot_name,
|
||||||
% {'snapshot': snapshot_name,
|
'volume': volume_name})
|
||||||
'volume': volume_name})
|
|
||||||
|
|
||||||
snapshot_id = snapshot.get('provider_location', None)
|
snapshot_id = snapshot.get('provider_location', None)
|
||||||
if snapshot_id is None:
|
if snapshot_id is None:
|
||||||
|
@ -556,12 +551,12 @@ class RestCommon(object):
|
||||||
tgt_lun_id = lun_info['ID']
|
tgt_lun_id = lun_info['ID']
|
||||||
luncopy_name = self._encode_name(volume['id'])
|
luncopy_name = self._encode_name(volume['id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('create_volume_from_snapshot: src_lun_id: '
|
||||||
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
|
'%(src_lun_id)s, tgt_lun_id: %(tgt_lun_id)s, '
|
||||||
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s')
|
'copy_name: %(copy_name)s'),
|
||||||
% {'src_lun_id': snapshot_id,
|
{'src_lun_id': snapshot_id,
|
||||||
'tgt_lun_id': tgt_lun_id,
|
'tgt_lun_id': tgt_lun_id,
|
||||||
'copy_name': luncopy_name})
|
'copy_name': luncopy_name})
|
||||||
|
|
||||||
event_type = 'LUNReadyWaitInterval'
|
event_type = 'LUNReadyWaitInterval'
|
||||||
wait_interval = self._get_wait_interval(event_type)
|
wait_interval = self._get_wait_interval(event_type)
|
||||||
|
@ -601,11 +596,10 @@ class RestCommon(object):
|
||||||
# Delete snapshot.
|
# Delete snapshot.
|
||||||
self.delete_snapshot(snapshot)
|
self.delete_snapshot(snapshot)
|
||||||
except exception.CinderException:
|
except exception.CinderException:
|
||||||
LOG.warning(_LW(
|
LOG.warning(_LW('Failure deleting the snapshot '
|
||||||
'Failure deleting the snapshot %(snapshot_id)s '
|
'%(snapshot_id)s of volume %(volume_id)s.'),
|
||||||
'of volume %(volume_id)s.')
|
{'snapshot_id': snapshot['id'],
|
||||||
% {'snapshot_id': snapshot['id'],
|
'volume_id': src_vref['id']})
|
||||||
'volume_id': src_vref['id']})
|
|
||||||
|
|
||||||
return lun_info
|
return lun_info
|
||||||
|
|
||||||
|
@ -638,11 +632,10 @@ class RestCommon(object):
|
||||||
host_group_name = HOSTGROUP_PREFIX + host_id
|
host_group_name = HOSTGROUP_PREFIX + host_id
|
||||||
hostgroup_id = self._find_hostgroup(host_group_name)
|
hostgroup_id = self._find_hostgroup(host_group_name)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('_add_host_into_hostgroup, hostgroup name: %(name)s, '
|
||||||
'_add_host_into_hostgroup, hostgroup name: %(name)s, '
|
'hostgroup id: %(id)s.'),
|
||||||
'hostgroup id: %(id)s.')
|
{'name': host_group_name,
|
||||||
% {'name': host_group_name,
|
'id': hostgroup_id})
|
||||||
'id': hostgroup_id})
|
|
||||||
|
|
||||||
if hostgroup_id is None:
|
if hostgroup_id is None:
|
||||||
hostgroup_id = self._create_hostgroup(host_group_name)
|
hostgroup_id = self._create_hostgroup(host_group_name)
|
||||||
|
@ -663,12 +656,11 @@ class RestCommon(object):
|
||||||
lun_id = self._get_volume_by_name(volume_name)
|
lun_id = self._get_volume_by_name(volume_name)
|
||||||
view_id = self._find_mapping_view(mapping_view_name)
|
view_id = self._find_mapping_view(mapping_view_name)
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('_mapping_hostgroup_and_lungroup, lun_group: '
|
||||||
'_mapping_hostgroup_and_lungroup, lun_group: %(lun_group)s, '
|
'%(lun_group)s, view_id: %(view_id)s, lun_id: '
|
||||||
'view_id: %(view_id)s, lun_id: %(lun_id)s.')
|
'%(lun_id)s.'), {'lun_group': lungroup_id,
|
||||||
% {'lun_group': six.text_type(lungroup_id),
|
'view_id': view_id,
|
||||||
'view_id': six.text_type(view_id),
|
'lun_id': lun_id})
|
||||||
'lun_id': six.text_type(lun_id)})
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Create lungroup and add LUN into to lungroup.
|
# Create lungroup and add LUN into to lungroup.
|
||||||
|
@ -691,10 +683,9 @@ class RestCommon(object):
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
err_msg = (_LE(
|
LOG.error(_LE('Error occurred when adding hostgroup and '
|
||||||
'Error occurred when adding hostgroup and lungroup to '
|
'lungroup to view. Remove lun from lungroup '
|
||||||
'view. Remove lun from lungroup now.'))
|
'now.'))
|
||||||
LOG.error(err_msg)
|
|
||||||
self._remove_lun_from_lungroup(lungroup_id, lun_id)
|
self._remove_lun_from_lungroup(lungroup_id, lun_id)
|
||||||
|
|
||||||
return lun_id
|
return lun_id
|
||||||
|
@ -717,18 +708,16 @@ class RestCommon(object):
|
||||||
initiator_name = connector['initiator']
|
initiator_name = connector['initiator']
|
||||||
volume_name = self._encode_name(volume['id'])
|
volume_name = self._encode_name(volume['id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('initiator name: %(initiator_name)s, '
|
||||||
'initiator name: %(initiator_name)s, '
|
'volume name: %(volume)s.'),
|
||||||
'volume name: %(volume)s.')
|
{'initiator_name': initiator_name,
|
||||||
% {'initiator_name': initiator_name,
|
'volume': volume_name})
|
||||||
'volume': volume_name})
|
|
||||||
|
|
||||||
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
|
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
|
||||||
'initialize_connection_iscsi,iscsi_iqn: %(iscsi_iqn)s, '
|
'target_ip: %(target_ip)s.'),
|
||||||
'target_ip: %(target_ip)s.')
|
{'iscsi_iqn': iscsi_iqn,
|
||||||
% {'iscsi_iqn': iscsi_iqn,
|
'target_ip': target_ip})
|
||||||
'target_ip': target_ip})
|
|
||||||
|
|
||||||
# Create host_group if not exist.
|
# Create host_group if not exist.
|
||||||
host_name = connector['host']
|
host_name = connector['host']
|
||||||
|
@ -746,8 +735,8 @@ class RestCommon(object):
|
||||||
|
|
||||||
hostlunid = self._find_host_lun_id(hostid, lun_id)
|
hostlunid = self._find_host_lun_id(hostid, lun_id)
|
||||||
|
|
||||||
LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s.")
|
LOG.info(_LI("initialize_connection_iscsi, host lun id is: %s."),
|
||||||
% hostlunid)
|
hostlunid)
|
||||||
|
|
||||||
# Return iSCSI properties.
|
# Return iSCSI properties.
|
||||||
properties = {}
|
properties = {}
|
||||||
|
@ -757,8 +746,8 @@ class RestCommon(object):
|
||||||
properties['target_lun'] = int(hostlunid)
|
properties['target_lun'] = int(hostlunid)
|
||||||
properties['volume_id'] = volume['id']
|
properties['volume_id'] = volume['id']
|
||||||
|
|
||||||
LOG.info(_LI("initialize_connection_iscsi success. Return data: %s.")
|
LOG.info(_LI("initialize_connection_iscsi success. Return data: %s."),
|
||||||
% properties)
|
properties)
|
||||||
return {'driver_volume_type': 'iscsi', 'data': properties}
|
return {'driver_volume_type': 'iscsi', 'data': properties}
|
||||||
|
|
||||||
@utils.synchronized('huawei', external=True)
|
@utils.synchronized('huawei', external=True)
|
||||||
|
@ -767,11 +756,10 @@ class RestCommon(object):
|
||||||
host_name = connector['host']
|
host_name = connector['host']
|
||||||
volume_name = self._encode_name(volume['id'])
|
volume_name = self._encode_name(volume['id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('initialize_connection_fc, initiator: %(initiator_name)s,'
|
||||||
'initialize_connection_fc, initiator: %(initiator_name)s,'
|
' volume name: %(volume)s.'),
|
||||||
' volume name: %(volume)s.')
|
{'initiator_name': wwns,
|
||||||
% {'initiator_name': wwns,
|
'volume': volume_name})
|
||||||
'volume': volume_name})
|
|
||||||
|
|
||||||
# Create host_group if not exist.
|
# Create host_group if not exist.
|
||||||
hostid = self._find_host(host_name)
|
hostid = self._find_host(host_name)
|
||||||
|
@ -782,8 +770,8 @@ class RestCommon(object):
|
||||||
hostgroup_id = self._add_host_into_hostgroup(hostid)
|
hostgroup_id = self._add_host_into_hostgroup(hostid)
|
||||||
|
|
||||||
free_wwns = self._get_connected_free_wwns()
|
free_wwns = self._get_connected_free_wwns()
|
||||||
LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s")
|
LOG.info(_LI("initialize_connection_fc, the array has free wwns: %s"),
|
||||||
% free_wwns)
|
free_wwns)
|
||||||
for wwn in wwns:
|
for wwn in wwns:
|
||||||
if wwn in free_wwns:
|
if wwn in free_wwns:
|
||||||
self._add_fc_port_to_host(hostid, wwn)
|
self._add_fc_port_to_host(hostid, wwn)
|
||||||
|
@ -810,8 +798,7 @@ class RestCommon(object):
|
||||||
'volume_id': volume['id'],
|
'volume_id': volume['id'],
|
||||||
'initiator_target_map': init_targ_map}}
|
'initiator_target_map': init_targ_map}}
|
||||||
|
|
||||||
LOG.info(_LI("initialize_connection_fc, return data is: %s.")
|
LOG.info(_LI("initialize_connection_fc, return data is: %s."), info)
|
||||||
% info)
|
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
@ -926,8 +913,7 @@ class RestCommon(object):
|
||||||
host_lun_id = hostassoinfo['HostLUNID']
|
host_lun_id = hostassoinfo['HostLUNID']
|
||||||
break
|
break
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
msg = (_LE("JSON transfer data error. %s") % err)
|
LOG.error(_LE("JSON transfer data error. %s"), err)
|
||||||
LOG.error(msg)
|
|
||||||
raise
|
raise
|
||||||
return host_lun_id
|
return host_lun_id
|
||||||
|
|
||||||
|
@ -1155,13 +1141,11 @@ class RestCommon(object):
|
||||||
initiator_name = connector['initiator']
|
initiator_name = connector['initiator']
|
||||||
volume_name = self._encode_name(volume['id'])
|
volume_name = self._encode_name(volume['id'])
|
||||||
lun_id = volume.get('provider_location', None)
|
lun_id = volume.get('provider_location', None)
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('terminate_connection:volume name: %(volume)s, '
|
||||||
'terminate_connection:volume name: %(volume)s, '
|
'initiator name: %(ini)s, lun_id: %(lunid)s.'),
|
||||||
'initiator name: %(ini)s, '
|
{'volume': volume_name,
|
||||||
'lun_id: %(lunid)s.')
|
'ini': initiator_name,
|
||||||
% {'volume': volume_name,
|
'lunid': lun_id})
|
||||||
'ini': initiator_name,
|
|
||||||
'lunid': lun_id})
|
|
||||||
|
|
||||||
if lun_id:
|
if lun_id:
|
||||||
if self._check_lun_exist(lun_id) is True:
|
if self._check_lun_exist(lun_id) is True:
|
||||||
|
@ -1393,10 +1377,10 @@ class RestCommon(object):
|
||||||
ip_info = self._get_iscsi_port_info(iscsiip)
|
ip_info = self._get_iscsi_port_info(iscsiip)
|
||||||
iqn_prefix = self._get_iscsi_tgt_port()
|
iqn_prefix = self._get_iscsi_tgt_port()
|
||||||
|
|
||||||
LOG.info(_LI('Request ip info is: %s.') % ip_info)
|
LOG.info(_LI('Request ip info is: %s.'), ip_info)
|
||||||
split_list = ip_info.split(".")
|
split_list = ip_info.split(".")
|
||||||
newstr = split_list[1] + split_list[2]
|
newstr = split_list[1] + split_list[2]
|
||||||
LOG.info(_LI('New str info is: %s.') % newstr)
|
LOG.info(_LI('New str info is: %s.'), newstr)
|
||||||
|
|
||||||
if ip_info:
|
if ip_info:
|
||||||
if newstr[0] == 'A':
|
if newstr[0] == 'A':
|
||||||
|
@ -1411,7 +1395,7 @@ class RestCommon(object):
|
||||||
iqn_suffix = iqn_suffix[i:]
|
iqn_suffix = iqn_suffix[i:]
|
||||||
break
|
break
|
||||||
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
|
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
|
||||||
LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.') % iqn)
|
LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn)
|
||||||
return iqn
|
return iqn
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
@ -1495,7 +1479,7 @@ class RestCommon(object):
|
||||||
try:
|
try:
|
||||||
tree.write(filename, 'UTF-8')
|
tree.write(filename, 'UTF-8')
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning(_LW('Unable to access config file. %s') % err)
|
LOG.warning(_LW('Unable to access config file. %s'), err)
|
||||||
|
|
||||||
return logininfo
|
return logininfo
|
||||||
|
|
||||||
|
@ -1589,12 +1573,11 @@ class RestCommon(object):
|
||||||
new_volume_size = int(new_size) * units.Gi / 512
|
new_volume_size = int(new_size) * units.Gi / 512
|
||||||
volume_name = self._encode_name(volume['id'])
|
volume_name = self._encode_name(volume['id'])
|
||||||
|
|
||||||
LOG.info(_LI(
|
LOG.info(_LI('Extend Volume: %(volumename)s, oldsize: %(oldsize)s '
|
||||||
'Extend Volume: %(volumename)s, oldsize:'
|
'newsize: %(newsize)s.'),
|
||||||
' %(oldsize)s newsize: %(newsize)s.')
|
{'volumename': volume_name,
|
||||||
% {'volumename': volume_name,
|
'oldsize': volume_size,
|
||||||
'oldsize': volume_size,
|
'newsize': new_volume_size})
|
||||||
'newsize': new_volume_size})
|
|
||||||
|
|
||||||
lun_id = self._get_volume_by_name(volume_name)
|
lun_id = self._get_volume_by_name(volume_name)
|
||||||
|
|
||||||
|
@ -1634,7 +1617,7 @@ class RestCommon(object):
|
||||||
else:
|
else:
|
||||||
kvs = specs
|
kvs = specs
|
||||||
|
|
||||||
LOG.info(_LI('The QoS sepcs is: %s.') % kvs)
|
LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
|
||||||
for key, value in kvs.iteritems():
|
for key, value in kvs.iteritems():
|
||||||
if key in huawei_valid_keys:
|
if key in huawei_valid_keys:
|
||||||
qos[key.upper()] = value
|
qos[key.upper()] = value
|
||||||
|
|
|
@ -406,7 +406,7 @@ class FlashSystemDriver(san.SanDriver):
|
||||||
out, err = self._ssh(ssh_cmd)
|
out, err = self._ssh(ssh_cmd)
|
||||||
except processutils.ProcessExecutionError:
|
except processutils.ProcessExecutionError:
|
||||||
LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to '
|
LOG.warning(_LW('_execute_command_and_parse_attributes: Failed to '
|
||||||
'run command: %s.'), six.text_type(ssh_cmd))
|
'run command: %s.'), ssh_cmd)
|
||||||
# Does not raise exception when command encounters error.
|
# Does not raise exception when command encounters error.
|
||||||
# Only return and the upper logic decides what to do.
|
# Only return and the upper logic decides what to do.
|
||||||
return None
|
return None
|
||||||
|
@ -1140,9 +1140,8 @@ class FlashSystemDriver(san.SanDriver):
|
||||||
def validate_connector(self, connector):
|
def validate_connector(self, connector):
|
||||||
"""Check connector."""
|
"""Check connector."""
|
||||||
if 'FC' == self._protocol and 'wwpns' not in connector:
|
if 'FC' == self._protocol and 'wwpns' not in connector:
|
||||||
msg = (_LE('The connector does not contain the '
|
LOG.error(_LE('The connector does not contain the '
|
||||||
'required information: wwpns is missing'))
|
'required information: wwpns is missing'))
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.InvalidConnectorException(missing='wwpns')
|
raise exception.InvalidConnectorException(missing='wwpns')
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
|
|
|
@ -136,9 +136,8 @@ class GPFSDriver(driver.VolumeDriver):
|
||||||
gpfs_state = lines[1].split(':')[state_token]
|
gpfs_state = lines[1].split(':')[state_token]
|
||||||
if gpfs_state != 'active':
|
if gpfs_state != 'active':
|
||||||
LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
|
LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
|
||||||
exception_message = (_('GPFS is not running, state: %s.') %
|
raise exception.VolumeBackendAPIException(
|
||||||
gpfs_state)
|
data=_('GPFS is not running, state: %s.') % gpfs_state)
|
||||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
|
||||||
|
|
||||||
def _get_filesystem_from_path(self, path):
|
def _get_filesystem_from_path(self, path):
|
||||||
"""Return filesystem for specified path."""
|
"""Return filesystem for specified path."""
|
||||||
|
|
|
@ -36,6 +36,7 @@ from oslo_concurrency import processutils
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI, _LW
|
from cinder.i18n import _, _LI, _LW
|
||||||
|
@ -170,7 +171,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||||
self._run_ssh(ssh_cmd)
|
self._run_ssh(ssh_cmd)
|
||||||
except processutils.ProcessExecutionError as e:
|
except processutils.ProcessExecutionError as e:
|
||||||
msg = (_('Failed in _ssh_operation while execution of ssh_cmd:'
|
msg = (_('Failed in _ssh_operation while execution of ssh_cmd:'
|
||||||
'%(cmd)s. Error: %(error)s') % {'cmd': ssh_cmd, 'error': e})
|
'%(cmd)s. Error: %(error)s') %
|
||||||
|
{'cmd': ssh_cmd, 'error': six.text_type(e)})
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
|
@ -224,7 +226,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||||
msg = (_("Failed to resize volume "
|
msg = (_("Failed to resize volume "
|
||||||
"%(volume_id)s, error: %(error)s") %
|
"%(volume_id)s, error: %(error)s") %
|
||||||
{'volume_id': os.path.basename(path).split('-')[1],
|
{'volume_id': os.path.basename(path).split('-')[1],
|
||||||
'error': e.stderr})
|
'error': six.text_type(e.stderr)})
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
return True
|
return True
|
||||||
|
@ -247,7 +249,8 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||||
try:
|
try:
|
||||||
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
|
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
|
||||||
except processutils.ProcessExecutionError as e:
|
except processutils.ProcessExecutionError as e:
|
||||||
msg = (_("Failed in _delete_snapfiles. Error: %s") % e.stderr)
|
msg = (_("Failed in _delete_snapfiles. Error: %s") %
|
||||||
|
six.text_type(e.stderr))
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
fparent = None
|
fparent = None
|
||||||
|
@ -285,9 +288,9 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
if not volume['provider_location']:
|
if not volume['provider_location']:
|
||||||
LOG.warn(_LW('Volume %s does not have '
|
LOG.warning(_LW('Volume %s does not have '
|
||||||
'provider_location specified, '
|
'provider_location specified, '
|
||||||
'skipping.'), volume['name'])
|
'skipping.'), volume['name'])
|
||||||
return
|
return
|
||||||
|
|
||||||
export_path = self._get_export_path(volume['id'])
|
export_path = self._get_export_path(volume['id'])
|
||||||
|
|
|
@ -44,7 +44,7 @@ from oslo_utils import units
|
||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LW
|
from cinder.i18n import _, _LE, _LI, _LW
|
||||||
from cinder.openstack.common import loopingcall
|
from cinder.openstack.common import loopingcall
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
|
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
|
||||||
|
@ -290,8 +290,8 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
"""
|
"""
|
||||||
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
|
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
|
||||||
if not volume_defined:
|
if not volume_defined:
|
||||||
LOG.error(_LE('ensure_export: Volume %s not found on storage')
|
LOG.error(_LE('ensure_export: Volume %s not found on storage'),
|
||||||
% volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def create_export(self, ctxt, volume):
|
def create_export(self, ctxt, volume):
|
||||||
model_update = None
|
model_update = None
|
||||||
|
@ -309,9 +309,8 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
|
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
|
||||||
valid = True
|
valid = True
|
||||||
if not valid:
|
if not valid:
|
||||||
msg = (_LE('The connector does not contain the required '
|
LOG.error(_LE('The connector does not contain the required '
|
||||||
'information.'))
|
'information.'))
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.InvalidConnectorException(
|
raise exception.InvalidConnectorException(
|
||||||
missing='initiator or wwpns')
|
missing='initiator or wwpns')
|
||||||
|
|
||||||
|
@ -386,10 +385,10 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
IO_group = volume_attributes['IO_group_id']
|
IO_group = volume_attributes['IO_group_id']
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
LOG.error(_LE('Did not find expected column name in '
|
LOG.error(_LE('Did not find expected column name in '
|
||||||
'lsvdisk: %s') % e)
|
'lsvdisk: %s'), e)
|
||||||
msg = (_('initialize_connection: Missing volume '
|
raise exception.VolumeBackendAPIException(
|
||||||
'attribute for volume %s') % volume_name)
|
data=_('initialize_connection: Missing volume attribute for '
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
'volume %s') % volume_name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get preferred node and other nodes in I/O group
|
# Get preferred node and other nodes in I/O group
|
||||||
|
@ -413,8 +412,8 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
if not preferred_node_entry and not vol_opts['multipath']:
|
if not preferred_node_entry and not vol_opts['multipath']:
|
||||||
# Get 1st node in I/O group
|
# Get 1st node in I/O group
|
||||||
preferred_node_entry = io_group_nodes[0]
|
preferred_node_entry = io_group_nodes[0]
|
||||||
LOG.warn(_LW('initialize_connection: Did not find a preferred '
|
LOG.warning(_LW('initialize_connection: Did not find a '
|
||||||
'node for volume %s') % volume_name)
|
'preferred node for volume %s'), volume_name)
|
||||||
|
|
||||||
properties = {}
|
properties = {}
|
||||||
properties['target_discovered'] = False
|
properties['target_discovered'] = False
|
||||||
|
@ -471,7 +470,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
LOG.warning(_LW('Unable to find a preferred node match'
|
LOG.warning(_LW('Unable to find a preferred node match'
|
||||||
' for node %(node)s in the list of '
|
' for node %(node)s in the list of '
|
||||||
'available WWPNs on %(host)s. '
|
'available WWPNs on %(host)s. '
|
||||||
'Using first available.') %
|
'Using first available.'),
|
||||||
{'node': preferred_node,
|
{'node': preferred_node,
|
||||||
'host': host_name})
|
'host': host_name})
|
||||||
properties['target_wwn'] = conn_wwpns[0]
|
properties['target_wwn'] = conn_wwpns[0]
|
||||||
|
@ -651,7 +650,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
return replica_status
|
return replica_status
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
|
LOG.debug('enter: extend_volume: volume %s', volume['id'])
|
||||||
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
|
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
|
||||||
allow_snaps=False)
|
allow_snaps=False)
|
||||||
if not ret:
|
if not ret:
|
||||||
|
@ -662,7 +661,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
|
|
||||||
extend_amt = int(new_size) - volume['size']
|
extend_amt = int(new_size) - volume['size']
|
||||||
self._helpers.extend_vdisk(volume['name'], extend_amt)
|
self._helpers.extend_vdisk(volume['name'], extend_amt)
|
||||||
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
|
LOG.debug('leave: extend_volume: volume %s', volume['id'])
|
||||||
|
|
||||||
def add_vdisk_copy(self, volume, dest_pool, vol_type):
|
def add_vdisk_copy(self, volume, dest_pool, vol_type):
|
||||||
return self._helpers.add_vdisk_copy(volume, dest_pool,
|
return self._helpers.add_vdisk_copy(volume, dest_pool,
|
||||||
|
@ -703,37 +702,34 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
self._vdiskcopyops_loop.stop()
|
self._vdiskcopyops_loop.stop()
|
||||||
self._vdiskcopyops_loop = None
|
self._vdiskcopyops_loop = None
|
||||||
except KeyError:
|
except KeyError:
|
||||||
msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
|
LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
|
||||||
'registered vdisk copy operations.') % volume['id'])
|
'registered vdisk copy operations.'), volume['id'])
|
||||||
LOG.error(msg)
|
|
||||||
return
|
return
|
||||||
except ValueError:
|
except ValueError:
|
||||||
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
|
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
|
||||||
'specified vdisk copy operation: orig=%(orig)s '
|
'the specified vdisk copy operation: orig=%(orig)s '
|
||||||
'new=%(new)s.')
|
'new=%(new)s.'),
|
||||||
% {'vol': volume['id'], 'orig': orig_copy_id,
|
{'vol': volume['id'], 'orig': orig_copy_id,
|
||||||
'new': new_copy_id})
|
'new': new_copy_id})
|
||||||
LOG.error(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
|
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
|
||||||
volume['id'])
|
volume['id'])
|
||||||
curr_ops = metadata.get('vdiskcopyops', None)
|
curr_ops = metadata.get('vdiskcopyops', None)
|
||||||
if not curr_ops:
|
if not curr_ops:
|
||||||
msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
|
LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
|
||||||
'registered vdisk copy operations.') % volume['id'])
|
'have any registered vdisk copy operations.'),
|
||||||
LOG.error(msg)
|
volume['id'])
|
||||||
return
|
return
|
||||||
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
|
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
|
||||||
try:
|
try:
|
||||||
curr_ops_list.remove((orig_copy_id, new_copy_id))
|
curr_ops_list.remove((orig_copy_id, new_copy_id))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
|
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
|
||||||
'have the specified vdisk copy operation: orig=%(orig)s '
|
'not have the specified vdisk copy operation: '
|
||||||
'new=%(new)s.')
|
'orig=%(orig)s new=%(new)s.'),
|
||||||
% {'vol': volume['id'], 'orig': orig_copy_id,
|
{'vol': volume['id'], 'orig': orig_copy_id,
|
||||||
'new': new_copy_id})
|
'new': new_copy_id})
|
||||||
LOG.error(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if len(curr_ops_list):
|
if len(curr_ops_list):
|
||||||
|
@ -775,7 +771,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
try:
|
try:
|
||||||
volume = self.db.volume_get(ctxt, vol_id)
|
volume = self.db.volume_get(ctxt, vol_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(_LW('Volume %s does not exist.'), vol_id)
|
LOG.warning(_LW('Volume %s does not exist.'), vol_id)
|
||||||
del self._vdiskcopyops[vol_id]
|
del self._vdiskcopyops[vol_id]
|
||||||
if not len(self._vdiskcopyops):
|
if not len(self._vdiskcopyops):
|
||||||
self._vdiskcopyops_loop.stop()
|
self._vdiskcopyops_loop.stop()
|
||||||
|
@ -787,12 +783,11 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
|
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
|
||||||
copy_op[1])
|
copy_op[1])
|
||||||
except Exception:
|
except Exception:
|
||||||
msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
|
LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
|
||||||
'have the specified vdisk copy operation: '
|
'not have the specified vdisk copy '
|
||||||
'orig=%(orig)s new=%(new)s.')
|
'operation: orig=%(orig)s new=%(new)s.'),
|
||||||
% {'vol': volume['id'], 'orig': copy_op[0],
|
{'vol': volume['id'], 'orig': copy_op[0],
|
||||||
'new': copy_op[1]})
|
'new': copy_op[1]})
|
||||||
LOG.info(msg)
|
|
||||||
else:
|
else:
|
||||||
if synced:
|
if synced:
|
||||||
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
|
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
|
||||||
|
@ -813,7 +808,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
host['host'] is its name, and host['capabilities'] is a
|
host['host'] is its name, and host['capabilities'] is a
|
||||||
dictionary of its reported capabilities.
|
dictionary of its reported capabilities.
|
||||||
"""
|
"""
|
||||||
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
|
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
|
||||||
{'id': volume['id'], 'host': host['host']})
|
{'id': volume['id'], 'host': host['host']})
|
||||||
|
|
||||||
false_ret = (False, None)
|
false_ret = (False, None)
|
||||||
|
@ -831,7 +826,7 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
self._check_volume_copy_ops()
|
self._check_volume_copy_ops()
|
||||||
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
|
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
|
||||||
self._add_vdisk_copy_op(ctxt, volume, new_op)
|
self._add_vdisk_copy_op(ctxt, volume, new_op)
|
||||||
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
|
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
|
||||||
{'id': volume['id'], 'host': host['host']})
|
{'id': volume['id'], 'host': host['host']})
|
||||||
return (True, None)
|
return (True, None)
|
||||||
|
|
||||||
|
@ -854,10 +849,10 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
self._state, (new, old))
|
self._state, (new, old))
|
||||||
|
|
||||||
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
|
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
|
||||||
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
|
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
|
||||||
'new_type': new_type,
|
'new_type': new_type,
|
||||||
'diff': diff,
|
'diff': diff,
|
||||||
'host': host})
|
'host': host})
|
||||||
|
|
||||||
ignore_keys = ['protocol', 'multipath']
|
ignore_keys = ['protocol', 'multipath']
|
||||||
no_copy_keys = ['warning', 'autoexpand', 'easytier']
|
no_copy_keys = ['warning', 'autoexpand', 'easytier']
|
||||||
|
@ -944,10 +939,10 @@ class StorwizeSVCDriver(san.SanDriver):
|
||||||
new_type)
|
new_type)
|
||||||
|
|
||||||
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
|
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
|
||||||
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
|
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
|
||||||
'new_type': new_type,
|
'new_type': new_type,
|
||||||
'diff': diff,
|
'diff': diff,
|
||||||
'host': host['host']})
|
'host': host['host']})
|
||||||
return True, model_update
|
return True, model_update
|
||||||
|
|
||||||
def manage_existing(self, volume, ref):
|
def manage_existing(self, volume, ref):
|
||||||
|
|
|
@ -156,8 +156,8 @@ class StorwizeHelpers(object):
|
||||||
if 'active' == s:
|
if 'active' == s:
|
||||||
wwpns.add(i)
|
wwpns.add(i)
|
||||||
node['WWPN'] = list(wwpns)
|
node['WWPN'] = list(wwpns)
|
||||||
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
|
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s'),
|
||||||
% {'node': node['id'], 'wwpn': node['WWPN']})
|
{'node': node['id'], 'wwpn': node['WWPN']})
|
||||||
|
|
||||||
def add_chap_secret_to_host(self, host_name):
|
def add_chap_secret_to_host(self, host_name):
|
||||||
"""Generate and store a randomly-generated CHAP secret for the host."""
|
"""Generate and store a randomly-generated CHAP secret for the host."""
|
||||||
|
@ -192,7 +192,7 @@ class StorwizeHelpers(object):
|
||||||
|
|
||||||
def get_host_from_connector(self, connector):
|
def get_host_from_connector(self, connector):
|
||||||
"""Return the Storwize host described by the connector."""
|
"""Return the Storwize host described by the connector."""
|
||||||
LOG.debug('enter: get_host_from_connector: %s' % connector)
|
LOG.debug('enter: get_host_from_connector: %s', connector)
|
||||||
|
|
||||||
# If we have FC information, we have a faster lookup option
|
# If we have FC information, we have a faster lookup option
|
||||||
host_name = None
|
host_name = None
|
||||||
|
@ -210,7 +210,7 @@ class StorwizeHelpers(object):
|
||||||
self.handle_keyerror('lsfabric', wwpn_info)
|
self.handle_keyerror('lsfabric', wwpn_info)
|
||||||
|
|
||||||
if host_name:
|
if host_name:
|
||||||
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
|
LOG.debug('leave: get_host_from_connector: host %s', host_name)
|
||||||
return host_name
|
return host_name
|
||||||
|
|
||||||
# That didn't work, so try exhaustive search
|
# That didn't work, so try exhaustive search
|
||||||
|
@ -234,7 +234,7 @@ class StorwizeHelpers(object):
|
||||||
if found:
|
if found:
|
||||||
break
|
break
|
||||||
|
|
||||||
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
|
LOG.debug('leave: get_host_from_connector: host %s', host_name)
|
||||||
return host_name
|
return host_name
|
||||||
|
|
||||||
def create_host(self, connector):
|
def create_host(self, connector):
|
||||||
|
@ -245,7 +245,7 @@ class StorwizeHelpers(object):
|
||||||
host name (at most 55 characters), plus a random 8-character suffix to
|
host name (at most 55 characters), plus a random 8-character suffix to
|
||||||
avoid collisions. The total length should be at most 63 characters.
|
avoid collisions. The total length should be at most 63 characters.
|
||||||
"""
|
"""
|
||||||
LOG.debug('enter: create_host: host %s' % connector['host'])
|
LOG.debug('enter: create_host: host %s', connector['host'])
|
||||||
|
|
||||||
# Before we start, make sure host name is a string and that we have at
|
# Before we start, make sure host name is a string and that we have at
|
||||||
# least one port.
|
# least one port.
|
||||||
|
@ -292,7 +292,7 @@ class StorwizeHelpers(object):
|
||||||
for port in ports:
|
for port in ports:
|
||||||
self.ssh.addhostport(host_name, port[0], port[1])
|
self.ssh.addhostport(host_name, port[0], port[1])
|
||||||
|
|
||||||
LOG.debug('leave: create_host: host %(host)s - %(host_name)s' %
|
LOG.debug('leave: create_host: host %(host)s - %(host_name)s',
|
||||||
{'host': connector['host'], 'host_name': host_name})
|
{'host': connector['host'], 'host_name': host_name})
|
||||||
return host_name
|
return host_name
|
||||||
|
|
||||||
|
@ -303,8 +303,8 @@ class StorwizeHelpers(object):
|
||||||
"""Create a mapping between a volume to a host."""
|
"""Create a mapping between a volume to a host."""
|
||||||
|
|
||||||
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
|
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
|
||||||
'host %(host_name)s'
|
'host %(host_name)s',
|
||||||
% {'volume_name': volume_name, 'host_name': host_name})
|
{'volume_name': volume_name, 'host_name': host_name})
|
||||||
|
|
||||||
# Check if this volume is already mapped to this host
|
# Check if this volume is already mapped to this host
|
||||||
mapped = False
|
mapped = False
|
||||||
|
@ -329,7 +329,7 @@ class StorwizeHelpers(object):
|
||||||
multihostmap)
|
multihostmap)
|
||||||
|
|
||||||
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
|
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
|
||||||
'%(volume_name)s, host %(host_name)s' %
|
'%(volume_name)s, host %(host_name)s',
|
||||||
{'result_lun': result_lun,
|
{'result_lun': result_lun,
|
||||||
'volume_name': volume_name,
|
'volume_name': volume_name,
|
||||||
'host_name': host_name})
|
'host_name': host_name})
|
||||||
|
@ -339,21 +339,21 @@ class StorwizeHelpers(object):
|
||||||
"""Unmap the volume and delete the host if it has no more mappings."""
|
"""Unmap the volume and delete the host if it has no more mappings."""
|
||||||
|
|
||||||
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
|
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
|
||||||
'host %(host_name)s'
|
'host %(host_name)s',
|
||||||
% {'volume_name': volume_name, 'host_name': host_name})
|
{'volume_name': volume_name, 'host_name': host_name})
|
||||||
|
|
||||||
# Check if the mapping exists
|
# Check if the mapping exists
|
||||||
resp = self.ssh.lsvdiskhostmap(volume_name)
|
resp = self.ssh.lsvdiskhostmap(volume_name)
|
||||||
if not len(resp):
|
if not len(resp):
|
||||||
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
|
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
|
||||||
'%(vol_name)s to any host found.') %
|
'%(vol_name)s to any host found.'),
|
||||||
{'vol_name': volume_name})
|
{'vol_name': volume_name})
|
||||||
return
|
return
|
||||||
if host_name is None:
|
if host_name is None:
|
||||||
if len(resp) > 1:
|
if len(resp) > 1:
|
||||||
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
|
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
|
||||||
'volume %(vol_name)s found, no host '
|
'volume %(vol_name)s found, no host '
|
||||||
'specified.') % {'vol_name': volume_name})
|
'specified.'), {'vol_name': volume_name})
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
host_name = resp[0]['host_name']
|
host_name = resp[0]['host_name']
|
||||||
|
@ -364,7 +364,7 @@ class StorwizeHelpers(object):
|
||||||
found = True
|
found = True
|
||||||
if not found:
|
if not found:
|
||||||
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
|
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
|
||||||
'%(vol_name)s to host %(host)s found.') %
|
'%(vol_name)s to host %(host)s found.'),
|
||||||
{'vol_name': volume_name, 'host': host_name})
|
{'vol_name': volume_name, 'host': host_name})
|
||||||
|
|
||||||
# We now know that the mapping exists
|
# We now know that the mapping exists
|
||||||
|
@ -376,8 +376,8 @@ class StorwizeHelpers(object):
|
||||||
self.delete_host(host_name)
|
self.delete_host(host_name)
|
||||||
|
|
||||||
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
|
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
|
||||||
'host %(host_name)s'
|
'host %(host_name)s',
|
||||||
% {'volume_name': volume_name, 'host_name': host_name})
|
{'volume_name': volume_name, 'host_name': host_name})
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def build_default_opts(config):
|
def build_default_opts(config):
|
||||||
|
@ -627,10 +627,10 @@ class StorwizeHelpers(object):
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def create_vdisk(self, name, size, units, pool, opts):
|
def create_vdisk(self, name, size, units, pool, opts):
|
||||||
LOG.debug('enter: create_vdisk: vdisk %s ' % name)
|
LOG.debug('enter: create_vdisk: vdisk %s ', name)
|
||||||
params = self._get_vdisk_create_params(opts)
|
params = self._get_vdisk_create_params(opts)
|
||||||
self.ssh.mkvdisk(name, size, units, pool, opts, params)
|
self.ssh.mkvdisk(name, size, units, pool, opts, params)
|
||||||
LOG.debug('leave: _create_vdisk: volume %s ' % name)
|
LOG.debug('leave: _create_vdisk: volume %s ', name)
|
||||||
|
|
||||||
def get_vdisk_attributes(self, vdisk):
|
def get_vdisk_attributes(self, vdisk):
|
||||||
attrs = self.ssh.lsvdisk(vdisk)
|
attrs = self.ssh.lsvdisk(vdisk)
|
||||||
|
@ -877,7 +877,7 @@ class StorwizeHelpers(object):
|
||||||
|
|
||||||
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
|
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
|
||||||
"""FlashCopy mapping check helper."""
|
"""FlashCopy mapping check helper."""
|
||||||
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
|
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s', name)
|
||||||
mapping_ids = self._get_vdisk_fc_mappings(name)
|
mapping_ids = self._get_vdisk_fc_mappings(name)
|
||||||
wait_for_copy = False
|
wait_for_copy = False
|
||||||
for map_id in mapping_ids:
|
for map_id in mapping_ids:
|
||||||
|
@ -936,26 +936,26 @@ class StorwizeHelpers(object):
|
||||||
# before it finishes. Don't set the sleep interval shorter
|
# before it finishes. Don't set the sleep interval shorter
|
||||||
# than the heartbeat. Otherwise volume service heartbeat
|
# than the heartbeat. Otherwise volume service heartbeat
|
||||||
# will not be serviced.
|
# will not be serviced.
|
||||||
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s'
|
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s',
|
||||||
% name)
|
name)
|
||||||
ret = timer.start(interval=self.check_fcmapping_interval).wait()
|
ret = timer.start(interval=self.check_fcmapping_interval).wait()
|
||||||
timer.stop()
|
timer.stop()
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def delete_vdisk(self, vdisk, force):
|
def delete_vdisk(self, vdisk, force):
|
||||||
"""Ensures that vdisk is not part of FC mapping and deletes it."""
|
"""Ensures that vdisk is not part of FC mapping and deletes it."""
|
||||||
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
|
LOG.debug('enter: delete_vdisk: vdisk %s', vdisk)
|
||||||
if not self.is_vdisk_defined(vdisk):
|
if not self.is_vdisk_defined(vdisk):
|
||||||
LOG.info(_LI('Tried to delete non-existent vdisk %s.') % vdisk)
|
LOG.info(_LI('Tried to delete non-existent vdisk %s.'), vdisk)
|
||||||
return
|
return
|
||||||
self.ensure_vdisk_no_fc_mappings(vdisk)
|
self.ensure_vdisk_no_fc_mappings(vdisk)
|
||||||
self.ssh.rmvdisk(vdisk, force=force)
|
self.ssh.rmvdisk(vdisk, force=force)
|
||||||
LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
|
LOG.debug('leave: delete_vdisk: vdisk %s', vdisk)
|
||||||
|
|
||||||
def create_copy(self, src, tgt, src_id, config, opts,
|
def create_copy(self, src, tgt, src_id, config, opts,
|
||||||
full_copy, pool=None):
|
full_copy, pool=None):
|
||||||
"""Create a new snapshot using FlashCopy."""
|
"""Create a new snapshot using FlashCopy."""
|
||||||
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
|
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s',
|
||||||
{'tgt': tgt, 'src': src})
|
{'tgt': tgt, 'src': src})
|
||||||
|
|
||||||
src_attrs = self.get_vdisk_attributes(src)
|
src_attrs = self.get_vdisk_attributes(src)
|
||||||
|
@ -978,7 +978,7 @@ class StorwizeHelpers(object):
|
||||||
self.delete_vdisk(tgt, True)
|
self.delete_vdisk(tgt, True)
|
||||||
|
|
||||||
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
|
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
|
||||||
'vdisk %(src)s' %
|
'vdisk %(src)s',
|
||||||
{'tgt': tgt, 'src': src})
|
{'tgt': tgt, 'src': src})
|
||||||
|
|
||||||
def extend_vdisk(self, vdisk, amount):
|
def extend_vdisk(self, vdisk, amount):
|
||||||
|
@ -1080,7 +1080,7 @@ class StorwizeHelpers(object):
|
||||||
def change_vdisk_iogrp(self, vdisk, state, iogrp):
|
def change_vdisk_iogrp(self, vdisk, state, iogrp):
|
||||||
if state['code_level'] < (6, 4, 0, 0):
|
if state['code_level'] < (6, 4, 0, 0):
|
||||||
LOG.debug('Ignore change IO group as storage code level is '
|
LOG.debug('Ignore change IO group as storage code level is '
|
||||||
'%(code_level)s, below the required 6.4.0.0' %
|
'%(code_level)s, below the required 6.4.0.0',
|
||||||
{'code_level': state['code_level']})
|
{'code_level': state['code_level']})
|
||||||
else:
|
else:
|
||||||
self.ssh.movevdisk(vdisk, str(iogrp[0]))
|
self.ssh.movevdisk(vdisk, str(iogrp[0]))
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _, _LI
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
@ -89,8 +89,8 @@ class StorwizeSVCReplicationStretchedCluster(StorwizeSVCReplication):
|
||||||
self.driver._helpers.rm_vdisk_copy(volume['name'],
|
self.driver._helpers.rm_vdisk_copy(volume['name'],
|
||||||
secondary['copy_id'])
|
secondary['copy_id'])
|
||||||
else:
|
else:
|
||||||
LOG.info(('Could not find replica to delete of'
|
LOG.info(_LI('Could not find replica to delete of'
|
||||||
' volume %(vol)s.') % {'vol': vdisk})
|
' volume %(vol)s.'), {'vol': vdisk})
|
||||||
|
|
||||||
def test_replica(self, tgt_volume, src_volume):
|
def test_replica(self, tgt_volume, src_volume):
|
||||||
vdisk = src_volume['name']
|
vdisk = src_volume['name']
|
||||||
|
|
|
@ -167,10 +167,10 @@ class StorwizeSSH(object):
|
||||||
if not multihostmap:
|
if not multihostmap:
|
||||||
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
|
LOG.error(_LE('storwize_svc_multihostmap_enabled is set '
|
||||||
'to False, not allowing multi host mapping.'))
|
'to False, not allowing multi host mapping.'))
|
||||||
msg = 'CMMVC6071E The VDisk-to-host mapping '\
|
raise exception.VolumeDriverException(
|
||||||
'was not created because the VDisk is '\
|
message=_('CMMVC6071E The VDisk-to-host mapping was not '
|
||||||
'already mapped to a host.\n"'
|
'created because the VDisk is already mapped '
|
||||||
raise exception.VolumeDriverException(message=msg)
|
'to a host.\n"'))
|
||||||
|
|
||||||
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
|
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
|
||||||
return self.run_ssh_check_created(ssh_cmd)
|
return self.run_ssh_check_created(ssh_cmd)
|
||||||
|
|
|
@ -24,6 +24,7 @@ from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
import six
|
||||||
|
|
||||||
from cinder.brick import exception as brick_exception
|
from cinder.brick import exception as brick_exception
|
||||||
from cinder.brick.local_dev import lvm as lvm
|
from cinder.brick.local_dev import lvm as lvm
|
||||||
|
@ -132,15 +133,15 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
# the cow table and only overwriting what's necessary?
|
# the cow table and only overwriting what's necessary?
|
||||||
# for now we're still skipping on snaps due to hang issue
|
# for now we're still skipping on snaps due to hang issue
|
||||||
if not os.path.exists(dev_path):
|
if not os.path.exists(dev_path):
|
||||||
msg = (_LE('Volume device file path %s does not exist.')
|
msg = (_('Volume device file path %s does not exist.')
|
||||||
% dev_path)
|
% dev_path)
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
size_in_g = volume.get('volume_size') or volume.get('size')
|
size_in_g = volume.get('volume_size') or volume.get('size')
|
||||||
if size_in_g is None:
|
if size_in_g is None:
|
||||||
msg = (_LE("Size for volume: %s not found, "
|
msg = (_("Size for volume: %s not found, cannot secure delete.")
|
||||||
"cannot secure delete.") % volume['id'])
|
% volume['id'])
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidParameterValue(msg)
|
raise exception.InvalidParameterValue(msg)
|
||||||
|
|
||||||
|
@ -170,7 +171,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
def _update_volume_stats(self):
|
def _update_volume_stats(self):
|
||||||
"""Retrieve stats info from volume group."""
|
"""Retrieve stats info from volume group."""
|
||||||
|
|
||||||
LOG.debug(("Updating volume stats"))
|
LOG.debug("Updating volume stats")
|
||||||
if self.vg is None:
|
if self.vg is None:
|
||||||
LOG.warning(_LW('Unable to update stats on non-initialized '
|
LOG.warning(_LW('Unable to update stats on non-initialized '
|
||||||
'Volume Group: %s'),
|
'Volume Group: %s'),
|
||||||
|
@ -290,7 +291,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
except processutils.ProcessExecutionError as exc:
|
except processutils.ProcessExecutionError as exc:
|
||||||
exception_message = (_("Failed to create thin pool, "
|
exception_message = (_("Failed to create thin pool, "
|
||||||
"error message was: %s")
|
"error message was: %s")
|
||||||
% exc.stderr)
|
% six.text_type(exc.stderr))
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=exception_message)
|
data=exception_message)
|
||||||
|
|
||||||
|
@ -336,8 +337,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if self.vg.lv_has_snapshot(volume['name']):
|
if self.vg.lv_has_snapshot(volume['name']):
|
||||||
LOG.error(_LE('Unabled to delete due to existing snapshot '
|
LOG.error(_LE('Unable to delete due to existing snapshot '
|
||||||
'for volume: %s') % volume['name'])
|
'for volume: %s'), volume['name'])
|
||||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||||
|
|
||||||
self._delete_volume(volume)
|
self._delete_volume(volume)
|
||||||
|
@ -355,7 +356,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
|
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
|
||||||
# If the snapshot isn't present, then don't attempt to delete
|
# If the snapshot isn't present, then don't attempt to delete
|
||||||
LOG.warning(_LW("snapshot: %s not found, "
|
LOG.warning(_LW("snapshot: %s not found, "
|
||||||
"skipping delete operations") % snapshot['name'])
|
"skipping delete operations"), snapshot['name'])
|
||||||
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
|
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -393,7 +394,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
mirror_count = 0
|
mirror_count = 0
|
||||||
if self.configuration.lvm_mirrors:
|
if self.configuration.lvm_mirrors:
|
||||||
mirror_count = self.configuration.lvm_mirrors
|
mirror_count = self.configuration.lvm_mirrors
|
||||||
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
|
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
|
||||||
volume_name = src_vref['name']
|
volume_name = src_vref['name']
|
||||||
temp_id = 'tmp-snap-%s' % volume['id']
|
temp_id = 'tmp-snap-%s' % volume['id']
|
||||||
temp_snapshot = {'volume_name': volume_name,
|
temp_snapshot = {'volume_name': volume_name,
|
||||||
|
@ -541,9 +542,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
try:
|
try:
|
||||||
(vg for vg in vg_list if vg['name'] == dest_vg).next()
|
(vg for vg in vg_list if vg['name'] == dest_vg).next()
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
message = (_LE("Destination Volume Group %s does not exist") %
|
LOG.error(_LE("Destination Volume Group %s does not exist"),
|
||||||
dest_vg)
|
dest_vg)
|
||||||
LOG.error(message)
|
|
||||||
return false_ret
|
return false_ret
|
||||||
|
|
||||||
helper = utils.get_root_helper()
|
helper = utils.get_root_helper()
|
||||||
|
@ -574,7 +574,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||||
else:
|
else:
|
||||||
message = (_("Refusing to migrate volume ID: %(id)s. Please "
|
message = (_("Refusing to migrate volume ID: %(id)s. Please "
|
||||||
"check your configuration because source and "
|
"check your configuration because source and "
|
||||||
"destination are the same Volume Group: %(name)s."),
|
"destination are the same Volume Group: %(name)s.") %
|
||||||
{'id': volume['id'], 'name': self.vg.vg_name})
|
{'id': volume['id'], 'name': self.vg.vg_name})
|
||||||
LOG.exception(message)
|
LOG.exception(message)
|
||||||
raise exception.VolumeBackendAPIException(data=message)
|
raise exception.VolumeBackendAPIException(data=message)
|
||||||
|
|
|
@ -73,8 +73,8 @@ class NetAppDriver(driver.ProxyVD):
|
||||||
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
|
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
|
||||||
|
|
||||||
app_version = na_utils.OpenStackInfo().info()
|
app_version = na_utils.OpenStackInfo().info()
|
||||||
LOG.info(_LI('OpenStack OS Version Info: %(info)s') % {
|
LOG.info(_LI('OpenStack OS Version Info: %(info)s'),
|
||||||
'info': app_version})
|
{'info': app_version})
|
||||||
kwargs['app_version'] = app_version
|
kwargs['app_version'] = app_version
|
||||||
|
|
||||||
return NetAppDriver.create_driver(config.netapp_storage_family,
|
return NetAppDriver.create_driver(config.netapp_storage_family,
|
||||||
|
@ -91,7 +91,7 @@ class NetAppDriver(driver.ProxyVD):
|
||||||
fmt = {'storage_family': storage_family,
|
fmt = {'storage_family': storage_family,
|
||||||
'storage_protocol': storage_protocol}
|
'storage_protocol': storage_protocol}
|
||||||
LOG.info(_LI('Requested unified config: %(storage_family)s and '
|
LOG.info(_LI('Requested unified config: %(storage_family)s and '
|
||||||
'%(storage_protocol)s.') % fmt)
|
'%(storage_protocol)s.'), fmt)
|
||||||
|
|
||||||
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
|
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
|
||||||
if family_meta is None:
|
if family_meta is None:
|
||||||
|
@ -109,5 +109,5 @@ class NetAppDriver(driver.ProxyVD):
|
||||||
kwargs['netapp_mode'] = 'proxy'
|
kwargs['netapp_mode'] = 'proxy'
|
||||||
driver = importutils.import_object(driver_loc, *args, **kwargs)
|
driver = importutils.import_object(driver_loc, *args, **kwargs)
|
||||||
LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol '
|
LOG.info(_LI('NetApp driver of family %(storage_family)s and protocol '
|
||||||
'%(storage_protocol)s loaded.') % fmt)
|
'%(storage_protocol)s loaded.'), fmt)
|
||||||
return driver
|
return driver
|
||||||
|
|
|
@ -24,7 +24,6 @@ Volume driver library for NetApp 7-mode block storage systems.
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
import six
|
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _, _LW
|
||||||
|
@ -125,7 +124,7 @@ class NetAppBlockStorage7modeLibrary(block_base.
|
||||||
if self._get_vol_option(volume_name, 'root') == 'true':
|
if self._get_vol_option(volume_name, 'root') == 'true':
|
||||||
return volume_name
|
return volume_name
|
||||||
LOG.warning(_LW('Could not determine root volume name '
|
LOG.warning(_LW('Could not determine root volume name '
|
||||||
'on %s.') % self._get_owner())
|
'on %s.'), self._get_owner())
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_owner(self):
|
def _get_owner(self):
|
||||||
|
@ -314,7 +313,7 @@ class NetAppBlockStorage7modeLibrary(block_base.
|
||||||
self.vol_refresh_time = timeutils.utcnow()
|
self.vol_refresh_time = timeutils.utcnow()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Error refreshing volume info. Message: %s"),
|
LOG.warning(_LW("Error refreshing volume info. Message: %s"),
|
||||||
six.text_type(e))
|
e)
|
||||||
finally:
|
finally:
|
||||||
na_utils.set_safe_attr(self, 'vol_refresh_running', False)
|
na_utils.set_safe_attr(self, 'vol_refresh_running', False)
|
||||||
|
|
||||||
|
|
|
@ -56,9 +56,8 @@ class NetAppLun(object):
|
||||||
if prop in self.metadata:
|
if prop in self.metadata:
|
||||||
return self.metadata[prop]
|
return self.metadata[prop]
|
||||||
name = self.name
|
name = self.name
|
||||||
msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
|
LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s",
|
||||||
msg_fmt = {'prop': prop, 'name': name}
|
{'prop': prop, 'name': name})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
|
|
||||||
def __str__(self, *args, **kwargs):
|
def __str__(self, *args, **kwargs):
|
||||||
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
|
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
|
||||||
|
@ -144,7 +143,7 @@ class NetAppBlockStorageLibrary(object):
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
|
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
|
||||||
|
|
||||||
LOG.debug('create_volume on %s' % volume['host'])
|
LOG.debug('create_volume on %s', volume['host'])
|
||||||
|
|
||||||
# get Data ONTAP volume name as pool name
|
# get Data ONTAP volume name as pool name
|
||||||
ontap_volume_name = volume_utils.extract_host(volume['host'],
|
ontap_volume_name = volume_utils.extract_host(volume['host'],
|
||||||
|
@ -174,7 +173,7 @@ class NetAppBlockStorageLibrary(object):
|
||||||
|
|
||||||
self._create_lun(ontap_volume_name, lun_name, size,
|
self._create_lun(ontap_volume_name, lun_name, size,
|
||||||
metadata, qos_policy_group)
|
metadata, qos_policy_group)
|
||||||
LOG.debug('Created LUN with name %s' % lun_name)
|
LOG.debug('Created LUN with name %s', lun_name)
|
||||||
|
|
||||||
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
|
metadata['Path'] = '/vol/%s/%s' % (ontap_volume_name, lun_name)
|
||||||
metadata['Volume'] = ontap_volume_name
|
metadata['Volume'] = ontap_volume_name
|
||||||
|
@ -188,9 +187,8 @@ class NetAppBlockStorageLibrary(object):
|
||||||
name = volume['name']
|
name = volume['name']
|
||||||
metadata = self._get_lun_attr(name, 'metadata')
|
metadata = self._get_lun_attr(name, 'metadata')
|
||||||
if not metadata:
|
if not metadata:
|
||||||
msg = _LW("No entry in LUN table for volume/snapshot %(name)s.")
|
LOG.warning(_LW("No entry in LUN table for volume/snapshot"
|
||||||
msg_fmt = {'name': name}
|
" %(name)s."), {'name': name})
|
||||||
LOG.warning(msg % msg_fmt)
|
|
||||||
return
|
return
|
||||||
self.zapi_client.destroy_lun(metadata['Path'])
|
self.zapi_client.destroy_lun(metadata['Path'])
|
||||||
self.lun_table.pop(name)
|
self.lun_table.pop(name)
|
||||||
|
@ -229,7 +227,7 @@ class NetAppBlockStorageLibrary(object):
|
||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
"""Driver entry point for deleting a snapshot."""
|
"""Driver entry point for deleting a snapshot."""
|
||||||
self.delete_volume(snapshot)
|
self.delete_volume(snapshot)
|
||||||
LOG.debug("Snapshot %s deletion successful" % snapshot['name'])
|
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
"""Driver entry point for creating a new volume from a snapshot.
|
"""Driver entry point for creating a new volume from a snapshot.
|
||||||
|
@ -381,8 +379,7 @@ class NetAppBlockStorageLibrary(object):
|
||||||
except exception.VolumeNotFound as e:
|
except exception.VolumeNotFound as e:
|
||||||
LOG.error(_LE("Message: %s"), e.msg)
|
LOG.error(_LE("Message: %s"), e.msg)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Error getting LUN attribute. Exception: %s"),
|
LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
|
||||||
e.__str__())
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _create_lun_meta(self, lun):
|
def _create_lun_meta(self, lun):
|
||||||
|
@ -518,7 +515,7 @@ class NetAppBlockStorageLibrary(object):
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Unknown exception in"
|
LOG.error(_LE("Unknown exception in"
|
||||||
" post clone resize LUN %s."), seg[-1])
|
" post clone resize LUN %s."), seg[-1])
|
||||||
LOG.error(_LE("Exception details: %s") % (e.__str__()))
|
LOG.error(_LE("Exception details: %s"), e)
|
||||||
|
|
||||||
def _get_lun_block_count(self, path):
|
def _get_lun_block_count(self, path):
|
||||||
"""Gets block counts for the LUN."""
|
"""Gets block counts for the LUN."""
|
||||||
|
@ -633,19 +630,17 @@ class NetAppBlockStorageLibrary(object):
|
||||||
name = volume['name']
|
name = volume['name']
|
||||||
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
|
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
|
||||||
|
|
||||||
msg = "Mapped LUN %(name)s to the initiator %(initiator_name)s"
|
LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s",
|
||||||
msg_fmt = {'name': name, 'initiator_name': initiator_name}
|
{'name': name, 'initiator_name': initiator_name})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
|
|
||||||
target_list = self.zapi_client.get_iscsi_target_details()
|
target_list = self.zapi_client.get_iscsi_target_details()
|
||||||
if not target_list:
|
if not target_list:
|
||||||
msg = _('Failed to get LUN target list for the LUN %s')
|
raise exception.VolumeBackendAPIException(
|
||||||
raise exception.VolumeBackendAPIException(data=msg % name)
|
data=_('Failed to get LUN target list for the LUN %s') % name)
|
||||||
|
|
||||||
msg = ("Successfully fetched target list for LUN %(name)s and "
|
LOG.debug("Successfully fetched target list for LUN %(name)s and "
|
||||||
"initiator %(initiator_name)s")
|
"initiator %(initiator_name)s",
|
||||||
msg_fmt = {'name': name, 'initiator_name': initiator_name}
|
{'name': name, 'initiator_name': initiator_name})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
|
|
||||||
preferred_target = self._get_preferred_target_from_list(
|
preferred_target = self._get_preferred_target_from_list(
|
||||||
target_list)
|
target_list)
|
||||||
|
@ -690,9 +685,9 @@ class NetAppBlockStorageLibrary(object):
|
||||||
metadata = self._get_lun_attr(name, 'metadata')
|
metadata = self._get_lun_attr(name, 'metadata')
|
||||||
path = metadata['Path']
|
path = metadata['Path']
|
||||||
self._unmap_lun(path, [initiator_name])
|
self._unmap_lun(path, [initiator_name])
|
||||||
msg = _("Unmapped LUN %(name)s from the initiator %(initiator_name)s")
|
LOG.debug("Unmapped LUN %(name)s from the initiator "
|
||||||
msg_fmt = {'name': name, 'initiator_name': initiator_name}
|
"%(initiator_name)s",
|
||||||
LOG.debug(msg % msg_fmt)
|
{'name': name, 'initiator_name': initiator_name})
|
||||||
|
|
||||||
def initialize_connection_fc(self, volume, connector):
|
def initialize_connection_fc(self, volume, connector):
|
||||||
"""Initializes the connection and returns connection info.
|
"""Initializes the connection and returns connection info.
|
||||||
|
@ -744,21 +739,20 @@ class NetAppBlockStorageLibrary(object):
|
||||||
|
|
||||||
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
|
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
|
||||||
|
|
||||||
msg = _("Mapped LUN %(name)s to the initiator(s) %(initiators)s")
|
LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
|
||||||
msg_fmt = {'name': volume_name, 'initiators': initiators}
|
{'name': volume_name, 'initiators': initiators})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
|
|
||||||
target_wwpns, initiator_target_map, num_paths = \
|
target_wwpns, initiator_target_map, num_paths = \
|
||||||
self._build_initiator_target_map(connector)
|
self._build_initiator_target_map(connector)
|
||||||
|
|
||||||
if target_wwpns:
|
if target_wwpns:
|
||||||
msg = _("Successfully fetched target details for LUN %(name)s "
|
LOG.debug("Successfully fetched target details for LUN %(name)s "
|
||||||
"and initiator(s) %(initiators)s")
|
"and initiator(s) %(initiators)s",
|
||||||
msg_fmt = {'name': volume_name, 'initiators': initiators}
|
{'name': volume_name, 'initiators': initiators})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
else:
|
else:
|
||||||
msg = _('Failed to get LUN target details for the LUN %s')
|
raise exception.VolumeBackendAPIException(
|
||||||
raise exception.VolumeBackendAPIException(data=msg % volume_name)
|
data=_('Failed to get LUN target details for '
|
||||||
|
'the LUN %s') % volume_name)
|
||||||
|
|
||||||
target_info = {'driver_volume_type': 'fibre_channel',
|
target_info = {'driver_volume_type': 'fibre_channel',
|
||||||
'data': {'target_discovered': True,
|
'data': {'target_discovered': True,
|
||||||
|
@ -790,9 +784,8 @@ class NetAppBlockStorageLibrary(object):
|
||||||
|
|
||||||
self._unmap_lun(path, initiators)
|
self._unmap_lun(path, initiators)
|
||||||
|
|
||||||
msg = _("Unmapped LUN %(name)s from the initiator %(initiators)s")
|
LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s",
|
||||||
msg_fmt = {'name': name, 'initiators': initiators}
|
{'name': name, 'initiators': initiators})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
|
|
||||||
info = {'driver_volume_type': 'fibre_channel',
|
info = {'driver_volume_type': 'fibre_channel',
|
||||||
'data': {}}
|
'data': {}}
|
||||||
|
|
|
@ -111,7 +111,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.
|
||||||
volume = metadata['Volume']
|
volume = metadata['Volume']
|
||||||
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
|
self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
|
||||||
src_block=0, dest_block=0, block_count=0)
|
src_block=0, dest_block=0, block_count=0)
|
||||||
LOG.debug("Cloned LUN with new name %s" % new_name)
|
LOG.debug("Cloned LUN with new name %s", new_name)
|
||||||
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
|
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
|
||||||
path='/vol/%s/%s'
|
path='/vol/%s/%s'
|
||||||
% (volume, new_name))
|
% (volume, new_name))
|
||||||
|
|
|
@ -62,7 +62,7 @@ class NaServer(object):
|
||||||
self._password = password
|
self._password = password
|
||||||
self._refresh_conn = True
|
self._refresh_conn = True
|
||||||
|
|
||||||
LOG.debug('Using NetApp controller: %s' % self._host)
|
LOG.debug('Using NetApp controller: %s', self._host)
|
||||||
|
|
||||||
def get_transport_type(self):
|
def get_transport_type(self):
|
||||||
"""Get the transport type protocol."""
|
"""Get the transport type protocol."""
|
||||||
|
|
|
@ -127,7 +127,7 @@ class Client(client_base.Client):
|
||||||
lun_list.extend(luns)
|
lun_list.extend(luns)
|
||||||
except netapp_api.NaApiError:
|
except netapp_api.NaApiError:
|
||||||
LOG.warning(_LW("Error finding LUNs for volume %s."
|
LOG.warning(_LW("Error finding LUNs for volume %s."
|
||||||
" Verify volume exists.") % vol)
|
" Verify volume exists."), vol)
|
||||||
else:
|
else:
|
||||||
luns = self._get_vol_luns(None)
|
luns = self._get_vol_luns(None)
|
||||||
lun_list.extend(luns)
|
lun_list.extend(luns)
|
||||||
|
@ -262,10 +262,10 @@ class Client(client_base.Client):
|
||||||
if clone_ops_info.get_child_content('clone-state')\
|
if clone_ops_info.get_child_content('clone-state')\
|
||||||
== 'completed':
|
== 'completed':
|
||||||
LOG.debug("Clone operation with src %(name)s"
|
LOG.debug("Clone operation with src %(name)s"
|
||||||
" and dest %(new_name)s completed" % fmt)
|
" and dest %(new_name)s completed", fmt)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Clone operation with src %(name)s"
|
LOG.debug("Clone operation with src %(name)s"
|
||||||
" and dest %(new_name)s failed" % fmt)
|
" and dest %(new_name)s failed", fmt)
|
||||||
raise netapp_api.NaApiError(
|
raise netapp_api.NaApiError(
|
||||||
clone_ops_info.get_child_content('error'),
|
clone_ops_info.get_child_content('error'),
|
||||||
clone_ops_info.get_child_content('reason'))
|
clone_ops_info.get_child_content('reason'))
|
||||||
|
@ -312,9 +312,8 @@ class Client(client_base.Client):
|
||||||
% (export_path))
|
% (export_path))
|
||||||
|
|
||||||
def clone_file(self, src_path, dest_path):
|
def clone_file(self, src_path, dest_path):
|
||||||
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
|
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
|
||||||
LOG.debug("""Cloning with src %(src_path)s, dest %(dest_path)s"""
|
{'src_path': src_path, 'dest_path': dest_path})
|
||||||
% msg_fmt)
|
|
||||||
clone_start = netapp_api.NaElement.create_node_with_children(
|
clone_start = netapp_api.NaElement.create_node_with_children(
|
||||||
'clone-start',
|
'clone-start',
|
||||||
**{'source-path': src_path,
|
**{'source-path': src_path,
|
||||||
|
@ -392,8 +391,8 @@ class Client(client_base.Client):
|
||||||
'file-usage-get', **{'path': path})
|
'file-usage-get', **{'path': path})
|
||||||
res = self.connection.invoke_successfully(file_use)
|
res = self.connection.invoke_successfully(file_use)
|
||||||
bytes = res.get_child_content('unique-bytes')
|
bytes = res.get_child_content('unique-bytes')
|
||||||
LOG.debug('file-usage for path %(path)s is %(bytes)s'
|
LOG.debug('file-usage for path %(path)s is %(bytes)s',
|
||||||
% {'path': path, 'bytes': bytes})
|
{'path': path, 'bytes': bytes})
|
||||||
return bytes
|
return bytes
|
||||||
|
|
||||||
def get_ifconfig(self):
|
def get_ifconfig(self):
|
||||||
|
|
|
@ -83,12 +83,11 @@ class Client(object):
|
||||||
self.connection.invoke_successfully(lun_create, True)
|
self.connection.invoke_successfully(lun_create, True)
|
||||||
except netapp_api.NaApiError as ex:
|
except netapp_api.NaApiError as ex:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _LE("Error provisioning volume %(lun_name)s on "
|
LOG.error(_LE("Error provisioning volume %(lun_name)s on "
|
||||||
"%(volume_name)s. Details: %(ex)s")
|
"%(volume_name)s. Details: %(ex)s"),
|
||||||
msg_args = {'lun_name': lun_name,
|
{'lun_name': lun_name,
|
||||||
'volume_name': volume_name,
|
'volume_name': volume_name,
|
||||||
'ex': six.text_type(ex)}
|
'ex': ex})
|
||||||
LOG.error(msg % msg_args)
|
|
||||||
|
|
||||||
def destroy_lun(self, path, force=True):
|
def destroy_lun(self, path, force=True):
|
||||||
"""Destroys the LUN at the path."""
|
"""Destroys the LUN at the path."""
|
||||||
|
@ -99,7 +98,7 @@ class Client(object):
|
||||||
lun_destroy.add_new_child('force', 'true')
|
lun_destroy.add_new_child('force', 'true')
|
||||||
self.connection.invoke_successfully(lun_destroy, True)
|
self.connection.invoke_successfully(lun_destroy, True)
|
||||||
seg = path.split("/")
|
seg = path.split("/")
|
||||||
LOG.debug("Destroyed LUN %s" % seg[-1])
|
LOG.debug("Destroyed LUN %s", seg[-1])
|
||||||
|
|
||||||
def map_lun(self, path, igroup_name, lun_id=None):
|
def map_lun(self, path, igroup_name, lun_id=None):
|
||||||
"""Maps LUN to the initiator and returns LUN id assigned."""
|
"""Maps LUN to the initiator and returns LUN id assigned."""
|
||||||
|
@ -114,9 +113,8 @@ class Client(object):
|
||||||
except netapp_api.NaApiError as e:
|
except netapp_api.NaApiError as e:
|
||||||
code = e.code
|
code = e.code
|
||||||
message = e.message
|
message = e.message
|
||||||
msg = _LW('Error mapping LUN. Code :%(code)s, Message:%(message)s')
|
LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: '
|
||||||
msg_fmt = {'code': code, 'message': message}
|
'%(message)s'), {'code': code, 'message': message})
|
||||||
LOG.warning(msg % msg_fmt)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def unmap_lun(self, path, igroup_name):
|
def unmap_lun(self, path, igroup_name):
|
||||||
|
@ -127,11 +125,10 @@ class Client(object):
|
||||||
try:
|
try:
|
||||||
self.connection.invoke_successfully(lun_unmap, True)
|
self.connection.invoke_successfully(lun_unmap, True)
|
||||||
except netapp_api.NaApiError as e:
|
except netapp_api.NaApiError as e:
|
||||||
msg = _LW("Error unmapping LUN. Code :%(code)s,"
|
|
||||||
" Message:%(message)s")
|
|
||||||
msg_fmt = {'code': e.code, 'message': e.message}
|
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.warning(msg % msg_fmt)
|
LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: "
|
||||||
|
"%(message)s"), {'code': e.code,
|
||||||
|
'message': e.message})
|
||||||
# if the LUN is already unmapped
|
# if the LUN is already unmapped
|
||||||
if e.code == '13115' or e.code == '9016':
|
if e.code == '13115' or e.code == '9016':
|
||||||
pass
|
pass
|
||||||
|
@ -186,8 +183,8 @@ class Client(object):
|
||||||
geometry['max_resize'] =\
|
geometry['max_resize'] =\
|
||||||
result.get_child_content("max-resize-size")
|
result.get_child_content("max-resize-size")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s")
|
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"),
|
||||||
% {'path': path, 'msg': e.message})
|
{'path': path, 'msg': e.message})
|
||||||
return geometry
|
return geometry
|
||||||
|
|
||||||
def get_volume_options(self, volume_name):
|
def get_volume_options(self, volume_name):
|
||||||
|
@ -205,8 +202,8 @@ class Client(object):
|
||||||
"""Moves the LUN at path to new path."""
|
"""Moves the LUN at path to new path."""
|
||||||
seg = path.split("/")
|
seg = path.split("/")
|
||||||
new_seg = new_path.split("/")
|
new_seg = new_path.split("/")
|
||||||
LOG.debug("Moving LUN %(name)s to %(new_name)s."
|
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
|
||||||
% {'name': seg[-1], 'new_name': new_seg[-1]})
|
{'name': seg[-1], 'new_name': new_seg[-1]})
|
||||||
lun_move = netapp_api.NaElement("lun-move")
|
lun_move = netapp_api.NaElement("lun-move")
|
||||||
lun_move.add_new_child("path", path)
|
lun_move.add_new_child("path", path)
|
||||||
lun_move.add_new_child("new-path", new_path)
|
lun_move.add_new_child("new-path", new_path)
|
||||||
|
@ -337,6 +334,6 @@ class Client(object):
|
||||||
na_server.invoke_successfully(ems, True)
|
na_server.invoke_successfully(ems, True)
|
||||||
LOG.debug("ems executed successfully.")
|
LOG.debug("ems executed successfully.")
|
||||||
except netapp_api.NaApiError as e:
|
except netapp_api.NaApiError as e:
|
||||||
LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
|
LOG.warning(_LW("Failed to invoke ems. Message : %s"), e)
|
||||||
finally:
|
finally:
|
||||||
requester.last_ems = timeutils.utcnow()
|
requester.last_ems = timeutils.utcnow()
|
||||||
|
|
|
@ -98,7 +98,7 @@ class Client(client_base.Client):
|
||||||
attr_list = result.get_child_by_name('attributes-list')
|
attr_list = result.get_child_by_name('attributes-list')
|
||||||
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
|
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
|
||||||
return iscsi_service.get_child_content('node-name')
|
return iscsi_service.get_child_content('node-name')
|
||||||
LOG.debug('No iSCSI service found for vserver %s' % (self.vserver))
|
LOG.debug('No iSCSI service found for vserver %s', self.vserver)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_lun_list(self):
|
def get_lun_list(self):
|
||||||
|
@ -360,11 +360,10 @@ class Client(client_base.Client):
|
||||||
def clone_file(self, flex_vol, src_path, dest_path, vserver,
|
def clone_file(self, flex_vol, src_path, dest_path, vserver,
|
||||||
dest_exists=False):
|
dest_exists=False):
|
||||||
"""Clones file on vserver."""
|
"""Clones file on vserver."""
|
||||||
msg = ("Cloning with params volume %(volume)s, src %(src_path)s,"
|
LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, "
|
||||||
"dest %(dest_path)s, vserver %(vserver)s")
|
"dest %(dest_path)s, vserver %(vserver)s",
|
||||||
msg_fmt = {'volume': flex_vol, 'src_path': src_path,
|
{'volume': flex_vol, 'src_path': src_path,
|
||||||
'dest_path': dest_path, 'vserver': vserver}
|
'dest_path': dest_path, 'vserver': vserver})
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
clone_create = netapp_api.NaElement.create_node_with_children(
|
clone_create = netapp_api.NaElement.create_node_with_children(
|
||||||
'clone-create',
|
'clone-create',
|
||||||
**{'volume': flex_vol, 'source-path': src_path,
|
**{'volume': flex_vol, 'source-path': src_path,
|
||||||
|
@ -381,8 +380,8 @@ class Client(client_base.Client):
|
||||||
'file-usage-get', **{'path': path})
|
'file-usage-get', **{'path': path})
|
||||||
res = self._invoke_vserver_api(file_use, vserver)
|
res = self._invoke_vserver_api(file_use, vserver)
|
||||||
unique_bytes = res.get_child_content('unique-bytes')
|
unique_bytes = res.get_child_content('unique-bytes')
|
||||||
LOG.debug('file-usage for path %(path)s is %(bytes)s'
|
LOG.debug('file-usage for path %(path)s is %(bytes)s',
|
||||||
% {'path': path, 'bytes': unique_bytes})
|
{'path': path, 'bytes': unique_bytes})
|
||||||
return unique_bytes
|
return unique_bytes
|
||||||
|
|
||||||
def get_vserver_ips(self, vserver):
|
def get_vserver_ips(self, vserver):
|
||||||
|
|
|
@ -22,7 +22,6 @@ Volume driver for NetApp NFS storage.
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
import six
|
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LE, _LI
|
from cinder.i18n import _, _LE, _LI
|
||||||
|
@ -74,7 +73,7 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
|
|
||||||
:param volume: volume reference
|
:param volume: volume reference
|
||||||
"""
|
"""
|
||||||
LOG.debug('create_volume on %s' % volume['host'])
|
LOG.debug('create_volume on %s', volume['host'])
|
||||||
self._ensure_shares_mounted()
|
self._ensure_shares_mounted()
|
||||||
|
|
||||||
# get share as pool name
|
# get share as pool name
|
||||||
|
@ -85,17 +84,17 @@ class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
raise exception.InvalidHost(reason=msg)
|
raise exception.InvalidHost(reason=msg)
|
||||||
|
|
||||||
volume['provider_location'] = share
|
volume['provider_location'] = share
|
||||||
LOG.info(_LI('Creating volume at location %s')
|
LOG.info(_LI('Creating volume at location %s'),
|
||||||
% volume['provider_location'])
|
volume['provider_location'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._do_create_volume(volume)
|
self._do_create_volume(volume)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LE("Exception creating vol %(name)s on "
|
LOG.error(_LE("Exception creating vol %(name)s on "
|
||||||
"share %(share)s. Details: %(ex)s")
|
"share %(share)s. Details: %(ex)s"),
|
||||||
% {'name': volume['name'],
|
{'name': volume['name'],
|
||||||
'share': volume['provider_location'],
|
'share': volume['provider_location'],
|
||||||
'ex': six.text_type(ex)})
|
'ex': ex})
|
||||||
msg = _("Volume %s could not be created on shares.")
|
msg = _("Volume %s could not be created on shares.")
|
||||||
raise exception.VolumeBackendAPIException(
|
raise exception.VolumeBackendAPIException(
|
||||||
data=msg % (volume['name']))
|
data=msg % (volume['name']))
|
||||||
|
|
|
@ -241,8 +241,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
volume['provider_location'], file_name)
|
volume['provider_location'], file_name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Exception while registering image %(image_id)s'
|
LOG.warning(_LW('Exception while registering image %(image_id)s'
|
||||||
' in cache. Exception: %(exc)s')
|
' in cache. Exception: %(exc)s'),
|
||||||
% {'image_id': image_id, 'exc': e.__str__()})
|
{'image_id': image_id, 'exc': e})
|
||||||
|
|
||||||
def _find_image_in_cache(self, image_id):
|
def _find_image_in_cache(self, image_id):
|
||||||
"""Finds image in cache and returns list of shares with file name."""
|
"""Finds image in cache and returns list of shares with file name."""
|
||||||
|
@ -254,8 +254,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
file_path = '%s/%s' % (dir, file_name)
|
file_path = '%s/%s' % (dir, file_name)
|
||||||
if os.path.exists(file_path):
|
if os.path.exists(file_path):
|
||||||
LOG.debug('Found cache file for image %(image_id)s'
|
LOG.debug('Found cache file for image %(image_id)s'
|
||||||
' on share %(share)s'
|
' on share %(share)s',
|
||||||
% {'image_id': image_id, 'share': share})
|
{'image_id': image_id, 'share': share})
|
||||||
result.append((share, file_name))
|
result.append((share, file_name))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -309,8 +309,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Exception during cache cleaning'
|
LOG.warning(_LW('Exception during cache cleaning'
|
||||||
' %(share)s. Message - %(ex)s')
|
' %(share)s. Message - %(ex)s'),
|
||||||
% {'share': share, 'ex': e.__str__()})
|
{'share': share, 'ex': e})
|
||||||
continue
|
continue
|
||||||
finally:
|
finally:
|
||||||
LOG.debug('Image cache cleaning done.')
|
LOG.debug('Image cache cleaning done.')
|
||||||
|
@ -366,7 +366,7 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
self._execute(*cmd, run_as_root=self._execute_as_root)
|
self._execute(*cmd, run_as_root=self._execute_as_root)
|
||||||
return True
|
return True
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning(_LW('Exception during deleting %s'), ex.__str__())
|
LOG.warning(_LW('Exception during deleting %s'), ex)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def clone_image(self, context, volume,
|
def clone_image(self, context, volume,
|
||||||
|
@ -394,10 +394,10 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
if cloned:
|
if cloned:
|
||||||
post_clone = self._post_clone_image(volume)
|
post_clone = self._post_clone_image(volume)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg = e.msg if getattr(e, 'msg', None) else e.__str__()
|
msg = e.msg if getattr(e, 'msg', None) else e
|
||||||
LOG.info(_LI('Image cloning unsuccessful for image'
|
LOG.info(_LI('Image cloning unsuccessful for image'
|
||||||
' %(image_id)s. Message: %(msg)s')
|
' %(image_id)s. Message: %(msg)s'),
|
||||||
% {'image_id': image_id, 'msg': msg})
|
{'image_id': image_id, 'msg': msg})
|
||||||
vol_path = self.local_path(volume)
|
vol_path = self.local_path(volume)
|
||||||
volume['provider_location'] = None
|
volume['provider_location'] = None
|
||||||
if os.path.exists(vol_path):
|
if os.path.exists(vol_path):
|
||||||
|
@ -645,8 +645,8 @@ class NetAppNfsDriver(nfs.NfsDriver):
|
||||||
try:
|
try:
|
||||||
return _move_file(source_path, dest_path)
|
return _move_file(source_path, dest_path)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s')
|
LOG.warning(_LW('Exception moving file %(src)s. Message - %(e)s'),
|
||||||
% {'src': source_path, 'e': e})
|
{'src': source_path, 'e': e})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _get_export_ip_path(self, volume_id=None, share=None):
|
def _get_export_ip_path(self, volume_id=None, share=None):
|
||||||
|
|
|
@ -81,7 +81,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
|
|
||||||
:param volume: volume reference
|
:param volume: volume reference
|
||||||
"""
|
"""
|
||||||
LOG.debug('create_volume on %s' % volume['host'])
|
LOG.debug('create_volume on %s', volume['host'])
|
||||||
self._ensure_shares_mounted()
|
self._ensure_shares_mounted()
|
||||||
|
|
||||||
# get share as pool name
|
# get share as pool name
|
||||||
|
@ -100,18 +100,18 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
volume['provider_location'] = share
|
volume['provider_location'] = share
|
||||||
LOG.info(_LI('casted to %s') % volume['provider_location'])
|
LOG.info(_LI('casted to %s'), volume['provider_location'])
|
||||||
self._do_create_volume(volume)
|
self._do_create_volume(volume)
|
||||||
if qos_policy_group:
|
if qos_policy_group:
|
||||||
self._set_qos_policy_group_on_volume(volume, share,
|
self._set_qos_policy_group_on_volume(volume, share,
|
||||||
qos_policy_group)
|
qos_policy_group)
|
||||||
return {'provider_location': volume['provider_location']}
|
return {'provider_location': volume['provider_location']}
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LW("Exception creating vol %(name)s on "
|
LOG.error(_LE("Exception creating vol %(name)s on "
|
||||||
"share %(share)s. Details: %(ex)s")
|
"share %(share)s. Details: %(ex)s"),
|
||||||
% {'name': volume['name'],
|
{'name': volume['name'],
|
||||||
'share': volume['provider_location'],
|
'share': volume['provider_location'],
|
||||||
'ex': ex})
|
'ex': ex})
|
||||||
volume['provider_location'] = None
|
volume['provider_location'] = None
|
||||||
finally:
|
finally:
|
||||||
if self.ssc_enabled:
|
if self.ssc_enabled:
|
||||||
|
@ -349,8 +349,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
def _is_share_vol_type_match(self, volume, share):
|
def _is_share_vol_type_match(self, volume, share):
|
||||||
"""Checks if share matches volume type."""
|
"""Checks if share matches volume type."""
|
||||||
netapp_vol = self._get_vol_for_share(share)
|
netapp_vol = self._get_vol_for_share(share)
|
||||||
LOG.debug("Found volume %(vol)s for share %(share)s."
|
LOG.debug("Found volume %(vol)s for share %(share)s.",
|
||||||
% {'vol': netapp_vol, 'share': share})
|
{'vol': netapp_vol, 'share': share})
|
||||||
extra_specs = na_utils.get_volume_extra_specs(volume)
|
extra_specs = na_utils.get_volume_extra_specs(volume)
|
||||||
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
|
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
|
||||||
return netapp_vol in vols
|
return netapp_vol in vols
|
||||||
|
@ -383,8 +383,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
self._try_copyoffload(context, volume, image_service, image_id)
|
self._try_copyoffload(context, volume, image_service, image_id)
|
||||||
copy_success = True
|
copy_success = True
|
||||||
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
|
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
|
||||||
'copy offload workflow.')
|
'copy offload workflow.'),
|
||||||
% {'img': image_id, 'vol': volume['id']})
|
{'img': image_id, 'vol': volume['id']})
|
||||||
else:
|
else:
|
||||||
LOG.debug("Copy offload either not configured or"
|
LOG.debug("Copy offload either not configured or"
|
||||||
" unsupported.")
|
" unsupported.")
|
||||||
|
@ -498,8 +498,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
else:
|
else:
|
||||||
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
|
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
|
||||||
self._discover_file_till_timeout(dst_img_local, timeout=120)
|
self._discover_file_till_timeout(dst_img_local, timeout=120)
|
||||||
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
|
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
|
||||||
% {'img': image_id, 'tmp': tmp_img_file})
|
{'img': image_id, 'tmp': tmp_img_file})
|
||||||
dst_img_cache_local = os.path.join(dst_dir,
|
dst_img_cache_local = os.path.join(dst_dir,
|
||||||
'img-cache-%s' % image_id)
|
'img-cache-%s' % image_id)
|
||||||
if img_info['disk_format'] == 'raw':
|
if img_info['disk_format'] == 'raw':
|
||||||
|
@ -507,8 +507,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
self._clone_file_dst_exists(dst_share, tmp_img_file,
|
self._clone_file_dst_exists(dst_share, tmp_img_file,
|
||||||
volume['name'], dest_exists=True)
|
volume['name'], dest_exists=True)
|
||||||
self._move_nfs_file(dst_img_local, dst_img_cache_local)
|
self._move_nfs_file(dst_img_local, dst_img_cache_local)
|
||||||
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
|
LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
|
||||||
% {'img': image_id, 'vol': volume['id']})
|
{'img': image_id, 'vol': volume['id']})
|
||||||
else:
|
else:
|
||||||
LOG.debug('Image will be converted to raw %s.', image_id)
|
LOG.debug('Image will be converted to raw %s.', image_id)
|
||||||
img_conv = six.text_type(uuid.uuid4())
|
img_conv = six.text_type(uuid.uuid4())
|
||||||
|
@ -533,8 +533,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||||
self._move_nfs_file(dst_img_conv_local,
|
self._move_nfs_file(dst_img_conv_local,
|
||||||
dst_img_cache_local)
|
dst_img_cache_local)
|
||||||
LOG.debug('Copied locally converted raw image'
|
LOG.debug('Copied locally converted raw image'
|
||||||
' %(img)s to volume %(vol)s.'
|
' %(img)s to volume %(vol)s.',
|
||||||
% {'img': image_id, 'vol': volume['id']})
|
{'img': image_id, 'vol': volume['id']})
|
||||||
finally:
|
finally:
|
||||||
if os.path.exists(dst_img_conv_local):
|
if os.path.exists(dst_img_conv_local):
|
||||||
self._delete_file(dst_img_conv_local)
|
self._delete_file(dst_img_conv_local)
|
||||||
|
|
|
@ -242,7 +242,7 @@ def create_vol_list(vol_attrs):
|
||||||
vols.add(vol)
|
vols.add(vol)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
LOG.debug('Unexpected error while creating'
|
LOG.debug('Unexpected error while creating'
|
||||||
' ssc vol list. Message - %s' % six.text_type(e))
|
' ssc vol list. Message - %s', e)
|
||||||
continue
|
continue
|
||||||
return vols
|
return vols
|
||||||
|
|
||||||
|
@ -422,8 +422,8 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
|
||||||
def refresh_stale_ssc():
|
def refresh_stale_ssc():
|
||||||
stale_vols = backend._update_stale_vols(reset=True)
|
stale_vols = backend._update_stale_vols(reset=True)
|
||||||
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
|
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
|
||||||
' and vserver %(vs)s')
|
' and vserver %(vs)s'),
|
||||||
% {'server': na_server, 'vs': vserver})
|
{'server': na_server, 'vs': vserver})
|
||||||
# refreshing single volumes can create inconsistency
|
# refreshing single volumes can create inconsistency
|
||||||
# hence doing manipulations on copy
|
# hence doing manipulations on copy
|
||||||
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
|
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
|
||||||
|
@ -456,8 +456,8 @@ def refresh_cluster_stale_ssc(*args, **kwargs):
|
||||||
vol_set.discard(vol)
|
vol_set.discard(vol)
|
||||||
backend.refresh_ssc_vols(ssc_vols_copy)
|
backend.refresh_ssc_vols(ssc_vols_copy)
|
||||||
LOG.info(_LI('Successfully completed stale refresh job for'
|
LOG.info(_LI('Successfully completed stale refresh job for'
|
||||||
' %(server)s and vserver %(vs)s')
|
' %(server)s and vserver %(vs)s'),
|
||||||
% {'server': na_server, 'vs': vserver})
|
{'server': na_server, 'vs': vserver})
|
||||||
|
|
||||||
refresh_stale_ssc()
|
refresh_stale_ssc()
|
||||||
finally:
|
finally:
|
||||||
|
@ -483,14 +483,14 @@ def get_cluster_latest_ssc(*args, **kwargs):
|
||||||
@utils.synchronized(lock_pr)
|
@utils.synchronized(lock_pr)
|
||||||
def get_latest_ssc():
|
def get_latest_ssc():
|
||||||
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
|
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
|
||||||
' and vserver %(vs)s')
|
' and vserver %(vs)s'),
|
||||||
% {'server': na_server, 'vs': vserver})
|
{'server': na_server, 'vs': vserver})
|
||||||
ssc_vols = get_cluster_ssc(na_server, vserver)
|
ssc_vols = get_cluster_ssc(na_server, vserver)
|
||||||
backend.refresh_ssc_vols(ssc_vols)
|
backend.refresh_ssc_vols(ssc_vols)
|
||||||
backend.ssc_run_time = timeutils.utcnow()
|
backend.ssc_run_time = timeutils.utcnow()
|
||||||
LOG.info(_LI('Successfully completed ssc job for %(server)s'
|
LOG.info(_LI('Successfully completed ssc job for %(server)s'
|
||||||
' and vserver %(vs)s')
|
' and vserver %(vs)s'),
|
||||||
% {'server': na_server, 'vs': vserver})
|
{'server': na_server, 'vs': vserver})
|
||||||
|
|
||||||
get_latest_ssc()
|
get_latest_ssc()
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -128,11 +128,11 @@ class RestClient(WebserviceClient):
|
||||||
if 'storedPassword' in scrubbed_data:
|
if 'storedPassword' in scrubbed_data:
|
||||||
scrubbed_data['storedPassword'] = "****"
|
scrubbed_data['storedPassword'] = "****"
|
||||||
|
|
||||||
params = {'m': method, 'p': path, 'd': scrubbed_data,
|
|
||||||
'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs}
|
|
||||||
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
|
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
|
||||||
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
|
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
|
||||||
" verify: %(v)s, kwargs: %(k)s." % (params))
|
" verify: %(v)s, kwargs: %(k)s.",
|
||||||
|
{'m': method, 'p': path, 'd': scrubbed_data,
|
||||||
|
'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs})
|
||||||
url = self._get_resource_url(path, use_system, **kwargs)
|
url = self._get_resource_url(path, use_system, **kwargs)
|
||||||
if self._content_type == 'json':
|
if self._content_type == 'json':
|
||||||
headers = {'Accept': 'application/json',
|
headers = {'Accept': 'application/json',
|
||||||
|
|
|
@ -33,8 +33,7 @@ LOG = logging.getLogger(__name__)
|
||||||
def map_volume_to_single_host(client, volume, eseries_vol, host,
|
def map_volume_to_single_host(client, volume, eseries_vol, host,
|
||||||
vol_map):
|
vol_map):
|
||||||
"""Maps the e-series volume to host with initiator."""
|
"""Maps the e-series volume to host with initiator."""
|
||||||
msg = "Attempting to map volume %s to single host."
|
LOG.debug("Attempting to map volume %s to single host." % volume['id'])
|
||||||
LOG.debug(msg % volume['id'])
|
|
||||||
|
|
||||||
# If volume is not mapped on the backend, map directly to host
|
# If volume is not mapped on the backend, map directly to host
|
||||||
if not vol_map:
|
if not vol_map:
|
||||||
|
@ -63,10 +62,9 @@ def map_volume_to_single_host(client, volume, eseries_vol, host,
|
||||||
# If volume is not currently attached according to Cinder, it is
|
# If volume is not currently attached according to Cinder, it is
|
||||||
# safe to delete the mapping
|
# safe to delete the mapping
|
||||||
if not (volume['attach_status'] == 'attached'):
|
if not (volume['attach_status'] == 'attached'):
|
||||||
msg = (_("Volume %(vol)s is not currently attached, "
|
LOG.debug("Volume %(vol)s is not currently attached, moving "
|
||||||
"moving existing mapping to host %(host)s.")
|
"existing mapping to host %(host)s.",
|
||||||
% {'vol': volume['id'], 'host': host['label']})
|
{'vol': volume['id'], 'host': host['label']})
|
||||||
LOG.debug(msg)
|
|
||||||
mappings = _get_vol_mapping_for_host_frm_array(
|
mappings = _get_vol_mapping_for_host_frm_array(
|
||||||
client, host['hostRef'])
|
client, host['hostRef'])
|
||||||
lun = _get_free_lun(client, host, mappings)
|
lun = _get_free_lun(client, host, mappings)
|
||||||
|
@ -86,8 +84,7 @@ def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
|
||||||
mapping):
|
mapping):
|
||||||
"""Maps the e-series volume to multiattach host group."""
|
"""Maps the e-series volume to multiattach host group."""
|
||||||
|
|
||||||
msg = "Attempting to map volume %s to multiple hosts."
|
LOG.debug("Attempting to map volume %s to multiple hosts." % volume['id'])
|
||||||
LOG.debug(msg % volume['id'])
|
|
||||||
|
|
||||||
# If volume is already mapped to desired host, return the mapping
|
# If volume is already mapped to desired host, return the mapping
|
||||||
if mapping['mapRef'] == target_host['hostRef']:
|
if mapping['mapRef'] == target_host['hostRef']:
|
||||||
|
@ -143,8 +140,8 @@ def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host,
|
||||||
# Once both existing and target hosts are in the multiattach host group,
|
# Once both existing and target hosts are in the multiattach host group,
|
||||||
# move the volume mapping to said group.
|
# move the volume mapping to said group.
|
||||||
if not mapped_host_group:
|
if not mapped_host_group:
|
||||||
msg = "Moving mapping for volume %s to multiattach host group."
|
LOG.debug("Moving mapping for volume %s to multiattach host group.",
|
||||||
LOG.debug(msg % volume['id'])
|
volume['id'])
|
||||||
return client.move_volume_mapping_via_symbol(
|
return client.move_volume_mapping_via_symbol(
|
||||||
mapping.get('lunMappingRef'),
|
mapping.get('lunMappingRef'),
|
||||||
multiattach_host_group['clusterRef'],
|
multiattach_host_group['clusterRef'],
|
||||||
|
@ -187,9 +184,9 @@ def _get_vol_mapping_for_host_group_frm_array(client, hg_ref):
|
||||||
def unmap_volume_from_host(client, volume, host, mapping):
|
def unmap_volume_from_host(client, volume, host, mapping):
|
||||||
# Volume is mapped directly to host, so delete the mapping
|
# Volume is mapped directly to host, so delete the mapping
|
||||||
if mapping.get('mapRef') == host['hostRef']:
|
if mapping.get('mapRef') == host['hostRef']:
|
||||||
msg = ("Volume %(vol)s is mapped directly to host %(host)s; removing "
|
LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; "
|
||||||
"mapping.")
|
"removing mapping.", {'vol': volume['id'],
|
||||||
LOG.debug(msg % {'vol': volume['id'], 'host': host['label']})
|
'host': host['label']})
|
||||||
client.delete_volume_mapping(mapping['lunMappingRef'])
|
client.delete_volume_mapping(mapping['lunMappingRef'])
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -212,9 +209,8 @@ def unmap_volume_from_host(client, volume, host, mapping):
|
||||||
# Remove mapping if volume should no longer be attached after this
|
# Remove mapping if volume should no longer be attached after this
|
||||||
# operation.
|
# operation.
|
||||||
if volume['status'] == 'detaching':
|
if volume['status'] == 'detaching':
|
||||||
msg = ("Volume %s is mapped directly to multiattach host group "
|
LOG.debug("Volume %s is mapped directly to multiattach host group but "
|
||||||
"but is not currently attached; removing mapping.")
|
"is not currently attached; removing mapping.", volume['id'])
|
||||||
LOG.debug(msg % volume['id'])
|
|
||||||
client.delete_volume_mapping(mapping['lunMappingRef'])
|
client.delete_volume_mapping(mapping['lunMappingRef'])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -162,26 +162,25 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
def _check_multipath(self):
|
def _check_multipath(self):
|
||||||
if not self.configuration.use_multipath_for_image_xfer:
|
if not self.configuration.use_multipath_for_image_xfer:
|
||||||
msg = _LW('Production use of "%(backend)s" backend requires the '
|
LOG.warning(_LW('Production use of "%(backend)s" backend requires '
|
||||||
'Cinder controller to have multipathing properly set up '
|
'the Cinder controller to have multipathing '
|
||||||
'and the configuration option "%(mpflag)s" to be set to '
|
'properly set up and the configuration option '
|
||||||
'"True".') % {'backend': self._backend_name,
|
'"%(mpflag)s" to be set to "True".'),
|
||||||
'mpflag': 'use_multipath_for_image_xfer'}
|
{'backend': self._backend_name,
|
||||||
LOG.warning(msg)
|
'mpflag': 'use_multipath_for_image_xfer'})
|
||||||
|
|
||||||
def _ensure_multi_attach_host_group_exists(self):
|
def _ensure_multi_attach_host_group_exists(self):
|
||||||
try:
|
try:
|
||||||
host_group = self._client.get_host_group_by_name(
|
host_group = self._client.get_host_group_by_name(
|
||||||
utils.MULTI_ATTACH_HOST_GROUP_NAME)
|
utils.MULTI_ATTACH_HOST_GROUP_NAME)
|
||||||
msg = _LI("The multi-attach E-Series host group '%(label)s' "
|
LOG.info(_LI("The multi-attach E-Series host group '%(label)s' "
|
||||||
"already exists with clusterRef %(clusterRef)s")
|
"already exists with clusterRef %(clusterRef)s"),
|
||||||
LOG.info(msg % host_group)
|
host_group)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
host_group = self._client.create_host_group(
|
host_group = self._client.create_host_group(
|
||||||
utils.MULTI_ATTACH_HOST_GROUP_NAME)
|
utils.MULTI_ATTACH_HOST_GROUP_NAME)
|
||||||
msg = _LI("Created multi-attach E-Series host group '%(label)s' "
|
LOG.info(_LI("Created multi-attach E-Series host group %(label)s "
|
||||||
"with clusterRef %(clusterRef)s")
|
"with clusterRef %(clusterRef)s"), host_group)
|
||||||
LOG.info(msg % host_group)
|
|
||||||
|
|
||||||
def _check_mode_get_or_register_storage_system(self):
|
def _check_mode_get_or_register_storage_system(self):
|
||||||
"""Does validity checks for storage system registry and health."""
|
"""Does validity checks for storage system registry and health."""
|
||||||
|
@ -190,11 +189,11 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
ip = na_utils.resolve_hostname(host)
|
ip = na_utils.resolve_hostname(host)
|
||||||
return ip
|
return ip
|
||||||
except socket.gaierror as e:
|
except socket.gaierror as e:
|
||||||
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
|
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
|
||||||
% {'host': host, 'e': e})
|
{'host': host, 'e': e})
|
||||||
raise exception.NoValidHost(
|
raise exception.NoValidHost(
|
||||||
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
|
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
|
||||||
% {'host': host, 'e': e})
|
% {'host': host, 'e': six.text_type(e)})
|
||||||
|
|
||||||
ips = self.configuration.netapp_controller_ips
|
ips = self.configuration.netapp_controller_ips
|
||||||
ips = [i.strip() for i in ips.split(",")]
|
ips = [i.strip() for i in ips.split(",")]
|
||||||
|
@ -216,9 +215,9 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
system = self._client.list_storage_system()
|
system = self._client.list_storage_system()
|
||||||
except exception.NetAppDriverException:
|
except exception.NetAppDriverException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _LI("System with controller addresses [%s] is not"
|
LOG.info(_LI("System with controller addresses [%s] is not "
|
||||||
" registered with web service.")
|
"registered with web service."),
|
||||||
LOG.info(msg % self.configuration.netapp_controller_ips)
|
self.configuration.netapp_controller_ips)
|
||||||
password_not_in_sync = False
|
password_not_in_sync = False
|
||||||
if system.get('status', '').lower() == 'passwordoutofsync':
|
if system.get('status', '').lower() == 'passwordoutofsync':
|
||||||
password_not_in_sync = True
|
password_not_in_sync = True
|
||||||
|
@ -248,9 +247,10 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
msg_dict = {'id': system.get('id'), 'status': status}
|
msg_dict = {'id': system.get('id'), 'status': status}
|
||||||
if (status == 'passwordoutofsync' or status == 'notsupported' or
|
if (status == 'passwordoutofsync' or status == 'notsupported' or
|
||||||
status == 'offline'):
|
status == 'offline'):
|
||||||
msg = _("System %(id)s found with bad status - %(status)s.")
|
raise exception.NetAppDriverException(
|
||||||
raise exception.NetAppDriverException(msg % msg_dict)
|
_("System %(id)s found with bad status - "
|
||||||
LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
|
"%(status)s.") % msg_dict)
|
||||||
|
LOG.info(_LI("System %(id)s has %(status)s status."), msg_dict)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _populate_system_objects(self):
|
def _populate_system_objects(self):
|
||||||
|
@ -384,7 +384,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Creates a volume."""
|
"""Creates a volume."""
|
||||||
|
|
||||||
LOG.debug('create_volume on %s' % volume['host'])
|
LOG.debug('create_volume on %s', volume['host'])
|
||||||
|
|
||||||
# get E-series pool label as pool name
|
# get E-series pool label as pool name
|
||||||
eseries_pool_label = volume_utils.extract_host(volume['host'],
|
eseries_pool_label = volume_utils.extract_host(volume['host'],
|
||||||
|
@ -436,8 +436,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
"label %s."), eseries_volume_label)
|
"label %s."), eseries_volume_label)
|
||||||
except exception.NetAppDriverException as e:
|
except exception.NetAppDriverException as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error creating volume. Msg - %s."),
|
LOG.error(_LE("Error creating volume. Msg - %s."), e)
|
||||||
six.text_type(e))
|
|
||||||
|
|
||||||
return vol
|
return vol
|
||||||
|
|
||||||
|
@ -492,8 +491,8 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
|
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
|
||||||
"""Copies src volume to dest volume."""
|
"""Copies src volume to dest volume."""
|
||||||
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
|
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s."),
|
||||||
% {'src': src_vol['label'], 'dst': dst_vol['label']})
|
{'src': src_vol['label'], 'dst': dst_vol['label']})
|
||||||
try:
|
try:
|
||||||
job = None
|
job = None
|
||||||
job = self._client.create_volume_copy_job(src_vol['id'],
|
job = self._client.create_volume_copy_job(src_vol['id'],
|
||||||
|
@ -504,13 +503,13 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
'pending' or j_st['status'] == 'unknown'):
|
'pending' or j_st['status'] == 'unknown'):
|
||||||
time.sleep(self.SLEEP_SECS)
|
time.sleep(self.SLEEP_SECS)
|
||||||
continue
|
continue
|
||||||
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
|
if j_st['status'] == 'failed' or j_st['status'] == 'halted':
|
||||||
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
|
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
|
||||||
msg = _("Vol copy job for dest %s failed.")\
|
raise exception.NetAppDriverException(
|
||||||
% dst_vol['label']
|
_("Vol copy job for dest %s failed.") %
|
||||||
raise exception.NetAppDriverException(msg)
|
dst_vol['label'])
|
||||||
LOG.info(_LI("Vol copy job completed for dest %s.")
|
LOG.info(_LI("Vol copy job completed for dest %s."),
|
||||||
% dst_vol['label'])
|
dst_vol['label'])
|
||||||
break
|
break
|
||||||
finally:
|
finally:
|
||||||
if job:
|
if job:
|
||||||
|
@ -579,7 +578,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
try:
|
try:
|
||||||
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
|
snap_grp = self._get_cached_snapshot_grp(snapshot['id'])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.warning(_LW("Snapshot %s already deleted.") % snapshot['id'])
|
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
|
||||||
return
|
return
|
||||||
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
|
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
|
||||||
snapshot_name = snap_grp['label']
|
snapshot_name = snap_grp['label']
|
||||||
|
@ -622,16 +621,15 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
current_map)
|
current_map)
|
||||||
|
|
||||||
lun_id = mapping['lun']
|
lun_id = mapping['lun']
|
||||||
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
|
|
||||||
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
|
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
|
||||||
LOG.debug(msg % msg_fmt)
|
LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.",
|
||||||
|
msg_fmt)
|
||||||
|
|
||||||
iscsi_details = self._get_iscsi_service_details()
|
iscsi_details = self._get_iscsi_service_details()
|
||||||
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
|
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
|
||||||
iscsi_details)
|
iscsi_details)
|
||||||
msg = _("Successfully fetched target details for volume %(id)s and "
|
LOG.debug("Successfully fetched target details for volume %(id)s and "
|
||||||
"initiator %(initiator_name)s.")
|
"initiator %(initiator_name)s.", msg_fmt)
|
||||||
LOG.debug(msg % msg_fmt)
|
|
||||||
iqn = iscsi_portal['iqn']
|
iqn = iscsi_portal['iqn']
|
||||||
address = iscsi_portal['ip']
|
address = iscsi_portal['ip']
|
||||||
port = iscsi_portal['tcp_port']
|
port = iscsi_portal['tcp_port']
|
||||||
|
@ -688,9 +686,9 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
host = self._client.update_host_type(
|
host = self._client.update_host_type(
|
||||||
host['hostRef'], ht_def)
|
host['hostRef'], ht_def)
|
||||||
except exception.NetAppDriverException as e:
|
except exception.NetAppDriverException as e:
|
||||||
msg = _LW("Unable to update host type for host with "
|
LOG.warning(_LW("Unable to update host type for host with "
|
||||||
"label %(l)s. %(e)s")
|
"label %(l)s. %(e)s"),
|
||||||
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
|
{'l': host['label'], 'e': e.msg})
|
||||||
return host
|
return host
|
||||||
except exception.NotFound as e:
|
except exception.NotFound as e:
|
||||||
LOG.warning(_LW("Message - %s."), e.msg)
|
LOG.warning(_LW("Message - %s."), e.msg)
|
||||||
|
@ -791,7 +789,7 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
|
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Updating storage service catalog information for "
|
LOG.info(_LI("Updating storage service catalog information for "
|
||||||
"backend '%s'") % self._backend_name)
|
"backend '%s'"), self._backend_name)
|
||||||
self._ssc_stats = \
|
self._ssc_stats = \
|
||||||
self._update_ssc_disk_encryption(self._objects["disk_pool_refs"])
|
self._update_ssc_disk_encryption(self._objects["disk_pool_refs"])
|
||||||
self._ssc_stats = \
|
self._ssc_stats = \
|
||||||
|
@ -852,8 +850,8 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
|
||||||
(int(x.get('totalRaidedSpace', 0)) -
|
(int(x.get('totalRaidedSpace', 0)) -
|
||||||
int(x.get('usedSpace', 0) >= size))]
|
int(x.get('usedSpace', 0) >= size))]
|
||||||
if not avl_pools:
|
if not avl_pools:
|
||||||
msg = _LW("No storage pool found with available capacity %s.")
|
LOG.warning(_LW("No storage pool found with available capacity "
|
||||||
LOG.warning(msg % size_gb)
|
"%s."), size_gb)
|
||||||
return avl_pools
|
return avl_pools
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
|
|
|
@ -128,15 +128,14 @@ def round_down(value, precision):
|
||||||
def log_extra_spec_warnings(extra_specs):
|
def log_extra_spec_warnings(extra_specs):
|
||||||
for spec in (set(extra_specs.keys() if extra_specs else []) &
|
for spec in (set(extra_specs.keys() if extra_specs else []) &
|
||||||
set(OBSOLETE_SSC_SPECS.keys())):
|
set(OBSOLETE_SSC_SPECS.keys())):
|
||||||
msg = _LW('Extra spec %(old)s is obsolete. Use %(new)s instead.')
|
LOG.warning(_LW('Extra spec %(old)s is obsolete. Use %(new)s '
|
||||||
args = {'old': spec, 'new': OBSOLETE_SSC_SPECS[spec]}
|
'instead.'), {'old': spec,
|
||||||
LOG.warning(msg % args)
|
'new': OBSOLETE_SSC_SPECS[spec]})
|
||||||
for spec in (set(extra_specs.keys() if extra_specs else []) &
|
for spec in (set(extra_specs.keys() if extra_specs else []) &
|
||||||
set(DEPRECATED_SSC_SPECS.keys())):
|
set(DEPRECATED_SSC_SPECS.keys())):
|
||||||
msg = _LW('Extra spec %(old)s is deprecated. Use %(new)s '
|
LOG.warning(_LW('Extra spec %(old)s is deprecated. Use %(new)s '
|
||||||
'instead.')
|
'instead.'), {'old': spec,
|
||||||
args = {'old': spec, 'new': DEPRECATED_SSC_SPECS[spec]}
|
'new': DEPRECATED_SSC_SPECS[spec]})
|
||||||
LOG.warning(msg % args)
|
|
||||||
|
|
||||||
|
|
||||||
def get_iscsi_connection_properties(lun_id, volume, iqn,
|
def get_iscsi_connection_properties(lun_id, volume, iqn,
|
||||||
|
@ -228,7 +227,7 @@ class OpenStackInfo(object):
|
||||||
"'%{version}\t%{release}\t%{vendor}'",
|
"'%{version}\t%{release}\t%{vendor}'",
|
||||||
self.PACKAGE_NAME)
|
self.PACKAGE_NAME)
|
||||||
if not out:
|
if not out:
|
||||||
LOG.info(_LI('No rpm info found for %(pkg)s package.') % {
|
LOG.info(_LI('No rpm info found for %(pkg)s package.'), {
|
||||||
'pkg': self.PACKAGE_NAME})
|
'pkg': self.PACKAGE_NAME})
|
||||||
return False
|
return False
|
||||||
parts = out.split()
|
parts = out.split()
|
||||||
|
@ -237,7 +236,7 @@ class OpenStackInfo(object):
|
||||||
self._vendor = ' '.join(parts[2::])
|
self._vendor = ' '.join(parts[2::])
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_LI('Could not run rpm command: %(msg)s.') % {'msg': e})
|
LOG.info(_LI('Could not run rpm command: %(msg)s.'), {'msg': e})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# ubuntu, mirantis on ubuntu
|
# ubuntu, mirantis on ubuntu
|
||||||
|
@ -248,8 +247,8 @@ class OpenStackInfo(object):
|
||||||
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
|
out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
|
||||||
self.PACKAGE_NAME)
|
self.PACKAGE_NAME)
|
||||||
if not out:
|
if not out:
|
||||||
LOG.info(_LI('No dpkg-query info found for %(pkg)s package.')
|
LOG.info(_LI('No dpkg-query info found for %(pkg)s package.'),
|
||||||
% {'pkg': self.PACKAGE_NAME})
|
{'pkg': self.PACKAGE_NAME})
|
||||||
return False
|
return False
|
||||||
# debian format: [epoch:]upstream_version[-debian_revision]
|
# debian format: [epoch:]upstream_version[-debian_revision]
|
||||||
deb_version = out
|
deb_version = out
|
||||||
|
@ -266,7 +265,7 @@ class OpenStackInfo(object):
|
||||||
self._vendor = _vendor
|
self._vendor = _vendor
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
|
LOG.info(_LI('Could not run dpkg-query command: %(msg)s.'), {
|
||||||
'msg': e})
|
'msg': e})
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -121,12 +121,12 @@ class NfsDriver(remotefs.RemoteFSDriver):
|
||||||
if not config:
|
if not config:
|
||||||
msg = (_("There's no NFS config file configured (%s)") %
|
msg = (_("There's no NFS config file configured (%s)") %
|
||||||
'nfs_shares_config')
|
'nfs_shares_config')
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.NfsException(msg)
|
raise exception.NfsException(msg)
|
||||||
if not os.path.exists(config):
|
if not os.path.exists(config):
|
||||||
msg = (_("NFS config file at %(config)s doesn't exist") %
|
msg = (_("NFS config file at %(config)s doesn't exist") %
|
||||||
{'config': config})
|
{'config': config})
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.NfsException(msg)
|
raise exception.NfsException(msg)
|
||||||
if not self.configuration.nfs_oversub_ratio > 0:
|
if not self.configuration.nfs_oversub_ratio > 0:
|
||||||
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
|
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
|
||||||
|
@ -172,12 +172,13 @@ class NfsDriver(remotefs.RemoteFSDriver):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if attempt == (num_attempts - 1):
|
if attempt == (num_attempts - 1):
|
||||||
LOG.error(_LE('Mount failure for %(share)s after '
|
LOG.error(_LE('Mount failure for %(share)s after '
|
||||||
'%(count)d attempts.') % {
|
'%(count)d attempts.'), {
|
||||||
'share': nfs_share,
|
'share': nfs_share,
|
||||||
'count': num_attempts})
|
'count': num_attempts})
|
||||||
raise exception.NfsException(e)
|
raise exception.NfsException(six.text_type(e))
|
||||||
LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
|
LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n'
|
||||||
(attempt, six.text_type(e)))
|
'Retrying mount ...',
|
||||||
|
{'attempt': attempt, 'exc': e})
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def _find_share(self, volume_size_in_gib):
|
def _find_share(self, volume_size_in_gib):
|
||||||
|
@ -332,16 +333,16 @@ class NfsDriver(remotefs.RemoteFSDriver):
|
||||||
self.configuration.nas_secure_file_permissions,
|
self.configuration.nas_secure_file_permissions,
|
||||||
nfs_mount, is_new_cinder_install)
|
nfs_mount, is_new_cinder_install)
|
||||||
|
|
||||||
LOG.debug('NAS variable secure_file_permissions setting is: %s' %
|
LOG.debug('NAS variable secure_file_permissions setting is: %s',
|
||||||
self.configuration.nas_secure_file_permissions)
|
self.configuration.nas_secure_file_permissions)
|
||||||
|
|
||||||
if self.configuration.nas_secure_file_permissions == 'false':
|
if self.configuration.nas_secure_file_permissions == 'false':
|
||||||
LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
|
LOG.warning(_LW("The NAS file permissions mode will be 666 "
|
||||||
"other/world read & write access). "
|
"(allowing other/world read & write access). "
|
||||||
"This is considered an insecure NAS environment. "
|
"This is considered an insecure NAS environment. "
|
||||||
"Please see %s for information on a secure "
|
"Please see %s for information on a secure "
|
||||||
"NFS configuration.") %
|
"NFS configuration."),
|
||||||
doc_html)
|
doc_html)
|
||||||
|
|
||||||
self.configuration.nas_secure_file_operations = \
|
self.configuration.nas_secure_file_operations = \
|
||||||
self._determine_nas_security_option_setting(
|
self._determine_nas_security_option_setting(
|
||||||
|
@ -353,13 +354,13 @@ class NfsDriver(remotefs.RemoteFSDriver):
|
||||||
if self.configuration.nas_secure_file_operations == 'true':
|
if self.configuration.nas_secure_file_operations == 'true':
|
||||||
self._execute_as_root = False
|
self._execute_as_root = False
|
||||||
|
|
||||||
LOG.debug('NAS variable secure_file_operations setting is: %s' %
|
LOG.debug('NAS variable secure_file_operations setting is: %s',
|
||||||
self.configuration.nas_secure_file_operations)
|
self.configuration.nas_secure_file_operations)
|
||||||
|
|
||||||
if self.configuration.nas_secure_file_operations == 'false':
|
if self.configuration.nas_secure_file_operations == 'false':
|
||||||
LOG.warn(_LW("The NAS file operations will be run as "
|
LOG.warning(_LW("The NAS file operations will be run as "
|
||||||
"root: allowing root level access at the storage "
|
"root: allowing root level access at the storage "
|
||||||
"backend. This is considered an insecure NAS "
|
"backend. This is considered an insecure NAS "
|
||||||
"environment. Please see %s "
|
"environment. Please see %s "
|
||||||
"for information on a secure NAS configuration.") %
|
"for information on a secure NAS configuration."),
|
||||||
doc_html)
|
doc_html)
|
||||||
|
|
|
@ -94,36 +94,36 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
def _get_discovery_ip(self, netconfig):
|
def _get_discovery_ip(self, netconfig):
|
||||||
"""Get discovery ip."""
|
"""Get discovery ip."""
|
||||||
subnet_label = self.configuration.nimble_subnet_label
|
subnet_label = self.configuration.nimble_subnet_label
|
||||||
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s'
|
LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s',
|
||||||
% {'netlabel': subnet_label, 'netconf': netconfig})
|
{'netlabel': subnet_label, 'netconf': netconfig})
|
||||||
ret_discovery_ip = ''
|
ret_discovery_ip = ''
|
||||||
for subnet in netconfig['subnet-list']:
|
for subnet in netconfig['subnet-list']:
|
||||||
LOG.info(_LI('Exploring array subnet label %s') % subnet['label'])
|
LOG.info(_LI('Exploring array subnet label %s'), subnet['label'])
|
||||||
if subnet_label == '*':
|
if subnet_label == '*':
|
||||||
# Use the first data subnet, save mgmt+data for later
|
# Use the first data subnet, save mgmt+data for later
|
||||||
if (subnet['subnet-id']['type'] == SM_SUBNET_DATA):
|
if subnet['subnet-id']['type'] == SM_SUBNET_DATA:
|
||||||
LOG.info(_LI('Discovery ip %(disc_ip)s is used '
|
LOG.info(_LI('Discovery ip %(disc_ip)s is used '
|
||||||
'on data subnet %(net_label)s')
|
'on data subnet %(net_label)s'),
|
||||||
% {'disc_ip': subnet['discovery-ip'],
|
{'disc_ip': subnet['discovery-ip'],
|
||||||
'net_label': subnet['label']})
|
'net_label': subnet['label']})
|
||||||
return subnet['discovery-ip']
|
return subnet['discovery-ip']
|
||||||
elif (subnet['subnet-id']['type'] ==
|
elif (subnet['subnet-id']['type'] ==
|
||||||
SM_SUBNET_MGMT_PLUS_DATA):
|
SM_SUBNET_MGMT_PLUS_DATA):
|
||||||
LOG.info(_LI('Discovery ip %(disc_ip)s is found'
|
LOG.info(_LI('Discovery ip %(disc_ip)s is found'
|
||||||
' on mgmt+data subnet %(net_label)s')
|
' on mgmt+data subnet %(net_label)s'),
|
||||||
% {'disc_ip': subnet['discovery-ip'],
|
{'disc_ip': subnet['discovery-ip'],
|
||||||
'net_label': subnet['label']})
|
'net_label': subnet['label']})
|
||||||
ret_discovery_ip = subnet['discovery-ip']
|
ret_discovery_ip = subnet['discovery-ip']
|
||||||
# If subnet is specified and found, use the subnet
|
# If subnet is specified and found, use the subnet
|
||||||
elif subnet_label == subnet['label']:
|
elif subnet_label == subnet['label']:
|
||||||
LOG.info(_LI('Discovery ip %(disc_ip)s is used'
|
LOG.info(_LI('Discovery ip %(disc_ip)s is used'
|
||||||
' on subnet %(net_label)s')
|
' on subnet %(net_label)s'),
|
||||||
% {'disc_ip': subnet['discovery-ip'],
|
{'disc_ip': subnet['discovery-ip'],
|
||||||
'net_label': subnet['label']})
|
'net_label': subnet['label']})
|
||||||
return subnet['discovery-ip']
|
return subnet['discovery-ip']
|
||||||
if ret_discovery_ip:
|
if ret_discovery_ip:
|
||||||
LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet')
|
LOG.info(_LI('Discovery ip %s is used on mgmt+data subnet'),
|
||||||
% ret_discovery_ip)
|
ret_discovery_ip)
|
||||||
return ret_discovery_ip
|
return ret_discovery_ip
|
||||||
else:
|
else:
|
||||||
raise NimbleDriverException(_('No suitable discovery ip found'))
|
raise NimbleDriverException(_('No suitable discovery ip found'))
|
||||||
|
@ -151,8 +151,8 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
target_ipaddr = self._get_discovery_ip(netconfig)
|
target_ipaddr = self._get_discovery_ip(netconfig)
|
||||||
iscsi_portal = target_ipaddr + ':3260'
|
iscsi_portal = target_ipaddr + ':3260'
|
||||||
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
|
provider_location = '%s %s %s' % (iscsi_portal, iqn, LUN_ID)
|
||||||
LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s')
|
LOG.info(_LI('vol_name=%(name)s provider_location=%(loc)s'),
|
||||||
% {'name': volume_name, 'loc': provider_location})
|
{'name': volume_name, 'loc': provider_location})
|
||||||
return provider_location
|
return provider_location
|
||||||
|
|
||||||
def _get_model_info(self, volume_name):
|
def _get_model_info(self, volume_name):
|
||||||
|
@ -258,10 +258,10 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
float(units.Gi))
|
float(units.Gi))
|
||||||
free_space = total_capacity - used_space
|
free_space = total_capacity - used_space
|
||||||
LOG.debug('total_capacity=%(capacity)f '
|
LOG.debug('total_capacity=%(capacity)f '
|
||||||
'used_space=%(used)f free_space=%(free)f'
|
'used_space=%(used)f free_space=%(free)f',
|
||||||
% {'capacity': total_capacity,
|
{'capacity': total_capacity,
|
||||||
'used': used_space,
|
'used': used_space,
|
||||||
'free': free_space})
|
'free': free_space})
|
||||||
backend_name = self.configuration.safe_get(
|
backend_name = self.configuration.safe_get(
|
||||||
'volume_backend_name') or self.__class__.__name__
|
'volume_backend_name') or self.__class__.__name__
|
||||||
self.group_stats = {'volume_backend_name': backend_name,
|
self.group_stats = {'volume_backend_name': backend_name,
|
||||||
|
@ -277,8 +277,9 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
"""Extend an existing volume."""
|
"""Extend an existing volume."""
|
||||||
volume_name = volume['name']
|
volume_name = volume['name']
|
||||||
LOG.info(_LI('Entering extend_volume volume=%(vol)s new_size=%(size)s')
|
LOG.info(_LI('Entering extend_volume volume=%(vol)s '
|
||||||
% {'vol': volume_name, 'size': new_size})
|
'new_size=%(size)s'),
|
||||||
|
{'vol': volume_name, 'size': new_size})
|
||||||
vol_size = int(new_size) * units.Gi
|
vol_size = int(new_size) * units.Gi
|
||||||
reserve = not self.configuration.san_thin_provision
|
reserve = not self.configuration.san_thin_provision
|
||||||
reserve_size = vol_size if reserve else 0
|
reserve_size = vol_size if reserve else 0
|
||||||
|
@ -295,8 +296,8 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
"""Creates igroup for an initiator and returns the igroup name."""
|
"""Creates igroup for an initiator and returns the igroup name."""
|
||||||
igrp_name = 'openstack-' + self._generate_random_string(12)
|
igrp_name = 'openstack-' + self._generate_random_string(12)
|
||||||
LOG.info(_LI('Creating initiator group %(grp)s '
|
LOG.info(_LI('Creating initiator group %(grp)s '
|
||||||
'with initiator %(iname)s')
|
'with initiator %(iname)s'),
|
||||||
% {'grp': igrp_name, 'iname': initiator_name})
|
{'grp': igrp_name, 'iname': initiator_name})
|
||||||
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
|
self.APIExecutor.create_initiator_group(igrp_name, initiator_name)
|
||||||
return igrp_name
|
return igrp_name
|
||||||
|
|
||||||
|
@ -308,28 +309,29 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
initiator_group['initiator-list'][0]['name'] ==
|
initiator_group['initiator-list'][0]['name'] ==
|
||||||
initiator_name):
|
initiator_name):
|
||||||
LOG.info(_LI('igroup %(grp)s found for '
|
LOG.info(_LI('igroup %(grp)s found for '
|
||||||
'initiator %(iname)s')
|
'initiator %(iname)s'),
|
||||||
% {'grp': initiator_group['name'],
|
{'grp': initiator_group['name'],
|
||||||
'iname': initiator_name})
|
'iname': initiator_name})
|
||||||
return initiator_group['name']
|
return initiator_group['name']
|
||||||
LOG.info(_LI('No igroup found for initiator %s') % initiator_name)
|
LOG.info(_LI('No igroup found for initiator %s'), initiator_name)
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector):
|
def initialize_connection(self, volume, connector):
|
||||||
"""Driver entry point to attach a volume to an instance."""
|
"""Driver entry point to attach a volume to an instance."""
|
||||||
LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
|
LOG.info(_LI('Entering initialize_connection volume=%(vol)s'
|
||||||
' connector=%(conn)s location=%(loc)s')
|
' connector=%(conn)s location=%(loc)s'),
|
||||||
% {'vol': volume,
|
{'vol': volume,
|
||||||
'conn': connector,
|
'conn': connector,
|
||||||
'loc': volume['provider_location']})
|
'loc': volume['provider_location']})
|
||||||
initiator_name = connector['initiator']
|
initiator_name = connector['initiator']
|
||||||
initiator_group_name = self._get_igroupname_for_initiator(
|
initiator_group_name = self._get_igroupname_for_initiator(
|
||||||
initiator_name)
|
initiator_name)
|
||||||
if not initiator_group_name:
|
if not initiator_group_name:
|
||||||
initiator_group_name = self._create_igroup_for_initiator(
|
initiator_group_name = self._create_igroup_for_initiator(
|
||||||
initiator_name)
|
initiator_name)
|
||||||
LOG.info(_LI('Initiator group name is %(grp)s for initiator %(iname)s')
|
LOG.info(_LI('Initiator group name is %(grp)s for initiator '
|
||||||
% {'grp': initiator_group_name, 'iname': initiator_name})
|
'%(iname)s'),
|
||||||
|
{'grp': initiator_group_name, 'iname': initiator_name})
|
||||||
self.APIExecutor.add_acl(volume, initiator_group_name)
|
self.APIExecutor.add_acl(volume, initiator_group_name)
|
||||||
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
|
(iscsi_portal, iqn, lun_num) = volume['provider_location'].split()
|
||||||
properties = {}
|
properties = {}
|
||||||
|
@ -346,10 +348,10 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
def terminate_connection(self, volume, connector, **kwargs):
|
||||||
"""Driver entry point to unattach a volume from an instance."""
|
"""Driver entry point to unattach a volume from an instance."""
|
||||||
LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
|
LOG.info(_LI('Entering terminate_connection volume=%(vol)s'
|
||||||
' connector=%(conn)s location=%(loc)s.')
|
' connector=%(conn)s location=%(loc)s.'),
|
||||||
% {'vol': volume,
|
{'vol': volume,
|
||||||
'conn': connector,
|
'conn': connector,
|
||||||
'loc': volume['provider_location']})
|
'loc': volume['provider_location']})
|
||||||
initiator_name = connector['initiator']
|
initiator_name = connector['initiator']
|
||||||
initiator_group_name = self._get_igroupname_for_initiator(
|
initiator_group_name = self._get_igroupname_for_initiator(
|
||||||
initiator_name)
|
initiator_name)
|
||||||
|
@ -397,7 +399,7 @@ def _connection_checker(func):
|
||||||
self.login()
|
self.login()
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Re-throwing Exception %s') % e)
|
LOG.error(_LE('Re-throwing Exception %s'), e)
|
||||||
raise
|
raise
|
||||||
return inner_connection_checker
|
return inner_connection_checker
|
||||||
|
|
||||||
|
@ -411,14 +413,14 @@ class NimbleAPIExecutor(object):
|
||||||
self.username = kwargs['username']
|
self.username = kwargs['username']
|
||||||
self.password = kwargs['password']
|
self.password = kwargs['password']
|
||||||
wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip'])
|
wsdl_url = 'https://%s/wsdl/NsGroupManagement.wsdl' % (kwargs['ip'])
|
||||||
LOG.debug('Using Nimble wsdl_url: %s' % wsdl_url)
|
LOG.debug('Using Nimble wsdl_url: %s', wsdl_url)
|
||||||
self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url)
|
self.err_string_dict = self._create_err_code_to_str_mapper(wsdl_url)
|
||||||
self.client = client.Client(wsdl_url,
|
self.client = client.Client(wsdl_url,
|
||||||
username=self.username,
|
username=self.username,
|
||||||
password=self.password)
|
password=self.password)
|
||||||
soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'],
|
soap_url = ('https://%(ip)s:%(port)s/soap' % {'ip': kwargs['ip'],
|
||||||
'port': SOAP_PORT})
|
'port': SOAP_PORT})
|
||||||
LOG.debug('Using Nimble soap_url: %s' % soap_url)
|
LOG.debug('Using Nimble soap_url: %s', soap_url)
|
||||||
self.client.set_options(location=soap_url)
|
self.client.set_options(location=soap_url)
|
||||||
self.login()
|
self.login()
|
||||||
|
|
||||||
|
@ -453,7 +455,7 @@ class NimbleAPIExecutor(object):
|
||||||
def login(self):
|
def login(self):
|
||||||
"""Execute Https Login API."""
|
"""Execute Https Login API."""
|
||||||
response = self._execute_login()
|
response = self._execute_login()
|
||||||
LOG.info(_LI('Successful login by user %s') % self.username)
|
LOG.info(_LI('Successful login by user %s'), self.username)
|
||||||
self.sid = response['authInfo']['sid']
|
self.sid = response['authInfo']['sid']
|
||||||
|
|
||||||
@_connection_checker
|
@_connection_checker
|
||||||
|
@ -482,12 +484,12 @@ class NimbleAPIExecutor(object):
|
||||||
|
|
||||||
LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
|
LOG.info(_LI('Creating a new volume=%(vol)s size=%(size)s'
|
||||||
' reserve=%(reserve)s in pool=%(pool)s'
|
' reserve=%(reserve)s in pool=%(pool)s'
|
||||||
' description=%(description)s')
|
' description=%(description)s'),
|
||||||
% {'vol': volume['name'],
|
{'vol': volume['name'],
|
||||||
'size': volume_size,
|
'size': volume_size,
|
||||||
'reserve': reserve,
|
'reserve': reserve,
|
||||||
'pool': pool_name,
|
'pool': pool_name,
|
||||||
'description': description})
|
'description': description})
|
||||||
return self.client.service.createVol(
|
return self.client.service.createVol(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'attr': {'name': volume['name'],
|
'attr': {'name': volume['name'],
|
||||||
|
@ -504,7 +506,7 @@ class NimbleAPIExecutor(object):
|
||||||
def create_vol(self, volume, pool_name, reserve):
|
def create_vol(self, volume, pool_name, reserve):
|
||||||
"""Execute createVol API."""
|
"""Execute createVol API."""
|
||||||
response = self._execute_create_vol(volume, pool_name, reserve)
|
response = self._execute_create_vol(volume, pool_name, reserve)
|
||||||
LOG.info(_LI('Successfully create volume %s') % response['name'])
|
LOG.info(_LI('Successfully create volume %s'), response['name'])
|
||||||
return response['name']
|
return response['name']
|
||||||
|
|
||||||
@_connection_checker
|
@_connection_checker
|
||||||
|
@ -524,9 +526,9 @@ class NimbleAPIExecutor(object):
|
||||||
def add_acl(self, volume, initiator_group_name):
|
def add_acl(self, volume, initiator_group_name):
|
||||||
"""Execute addAcl API."""
|
"""Execute addAcl API."""
|
||||||
LOG.info(_LI('Adding ACL to volume=%(vol)s with'
|
LOG.info(_LI('Adding ACL to volume=%(vol)s with'
|
||||||
' initiator group name %(igrp)s')
|
' initiator group name %(igrp)s'),
|
||||||
% {'vol': volume['name'],
|
{'vol': volume['name'],
|
||||||
'igrp': initiator_group_name})
|
'igrp': initiator_group_name})
|
||||||
return self.client.service.addVolAcl(
|
return self.client.service.addVolAcl(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'volname': volume['name'],
|
'volname': volume['name'],
|
||||||
|
@ -539,9 +541,9 @@ class NimbleAPIExecutor(object):
|
||||||
def remove_acl(self, volume, initiator_group_name):
|
def remove_acl(self, volume, initiator_group_name):
|
||||||
"""Execute removeVolAcl API."""
|
"""Execute removeVolAcl API."""
|
||||||
LOG.info(_LI('Removing ACL from volume=%(vol)s'
|
LOG.info(_LI('Removing ACL from volume=%(vol)s'
|
||||||
' for initiator group %(igrp)s')
|
' for initiator group %(igrp)s'),
|
||||||
% {'vol': volume['name'],
|
{'vol': volume['name'],
|
||||||
'igrp': initiator_group_name})
|
'igrp': initiator_group_name})
|
||||||
return self.client.service.removeVolAcl(
|
return self.client.service.removeVolAcl(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'volname': volume['name'],
|
'volname': volume['name'],
|
||||||
|
@ -553,23 +555,23 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def _execute_get_vol_info(self, vol_name):
|
def _execute_get_vol_info(self, vol_name):
|
||||||
LOG.info(_LI('Getting volume information '
|
LOG.info(_LI('Getting volume information '
|
||||||
'for vol_name=%s') % (vol_name))
|
'for vol_name=%s'), vol_name)
|
||||||
return self.client.service.getVolInfo(request={'sid': self.sid,
|
return self.client.service.getVolInfo(request={'sid': self.sid,
|
||||||
'name': vol_name})
|
'name': vol_name})
|
||||||
|
|
||||||
def get_vol_info(self, vol_name):
|
def get_vol_info(self, vol_name):
|
||||||
"""Execute getVolInfo API."""
|
"""Execute getVolInfo API."""
|
||||||
response = self._execute_get_vol_info(vol_name)
|
response = self._execute_get_vol_info(vol_name)
|
||||||
LOG.info(_LI('Successfully got volume information for volume %s')
|
LOG.info(_LI('Successfully got volume information for volume %s'),
|
||||||
% vol_name)
|
vol_name)
|
||||||
return response['vol']
|
return response['vol']
|
||||||
|
|
||||||
@_connection_checker
|
@_connection_checker
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def online_vol(self, vol_name, online_flag, *args, **kwargs):
|
def online_vol(self, vol_name, online_flag, *args, **kwargs):
|
||||||
"""Execute onlineVol API."""
|
"""Execute onlineVol API."""
|
||||||
LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s')
|
LOG.info(_LI('Setting volume %(vol)s to online_flag %(flag)s'),
|
||||||
% {'vol': vol_name, 'flag': online_flag})
|
{'vol': vol_name, 'flag': online_flag})
|
||||||
return self.client.service.onlineVol(request={'sid': self.sid,
|
return self.client.service.onlineVol(request={'sid': self.sid,
|
||||||
'name': vol_name,
|
'name': vol_name,
|
||||||
'online': online_flag})
|
'online': online_flag})
|
||||||
|
@ -578,8 +580,8 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
|
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
|
||||||
"""Execute onlineSnap API."""
|
"""Execute onlineSnap API."""
|
||||||
LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
|
LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s'),
|
||||||
% {'snap': snap_name, 'flag': online_flag})
|
{'snap': snap_name, 'flag': online_flag})
|
||||||
return self.client.service.onlineSnap(request={'sid': self.sid,
|
return self.client.service.onlineSnap(request={'sid': self.sid,
|
||||||
'vol': vol_name,
|
'vol': vol_name,
|
||||||
'name': snap_name,
|
'name': snap_name,
|
||||||
|
@ -589,7 +591,7 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def dissociate_volcoll(self, vol_name, *args, **kwargs):
|
def dissociate_volcoll(self, vol_name, *args, **kwargs):
|
||||||
"""Execute dissocProtPol API."""
|
"""Execute dissocProtPol API."""
|
||||||
LOG.info(_LI('Dissociating volume %s ') % vol_name)
|
LOG.info(_LI('Dissociating volume %s '), vol_name)
|
||||||
return self.client.service.dissocProtPol(
|
return self.client.service.dissocProtPol(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'vol-name': vol_name})
|
'vol-name': vol_name})
|
||||||
|
@ -598,7 +600,7 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def delete_vol(self, vol_name, *args, **kwargs):
|
def delete_vol(self, vol_name, *args, **kwargs):
|
||||||
"""Execute deleteVol API."""
|
"""Execute deleteVol API."""
|
||||||
LOG.info(_LI('Deleting volume %s ') % vol_name)
|
LOG.info(_LI('Deleting volume %s '), vol_name)
|
||||||
return self.client.service.deleteVol(request={'sid': self.sid,
|
return self.client.service.deleteVol(request={'sid': self.sid,
|
||||||
'name': vol_name})
|
'name': vol_name})
|
||||||
|
|
||||||
|
@ -615,10 +617,10 @@ class NimbleAPIExecutor(object):
|
||||||
# Limit to 254 characters
|
# Limit to 254 characters
|
||||||
snap_description = snap_description[:254]
|
snap_description = snap_description[:254]
|
||||||
LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
|
LOG.info(_LI('Creating snapshot for volume_name=%(vol)s'
|
||||||
' snap_name=%(name)s snap_description=%(desc)s')
|
' snap_name=%(name)s snap_description=%(desc)s'),
|
||||||
% {'vol': volume_name,
|
{'vol': volume_name,
|
||||||
'name': snap_name,
|
'name': snap_name,
|
||||||
'desc': snap_description})
|
'desc': snap_description})
|
||||||
return self.client.service.snapVol(
|
return self.client.service.snapVol(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'vol': volume_name,
|
'vol': volume_name,
|
||||||
|
@ -629,7 +631,7 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
|
def delete_snap(self, vol_name, snap_name, *args, **kwargs):
|
||||||
"""Execute deleteSnap API."""
|
"""Execute deleteSnap API."""
|
||||||
LOG.info(_LI('Deleting snapshot %s ') % snap_name)
|
LOG.info(_LI('Deleting snapshot %s '), snap_name)
|
||||||
return self.client.service.deleteSnap(request={'sid': self.sid,
|
return self.client.service.deleteSnap(request={'sid': self.sid,
|
||||||
'vol': vol_name,
|
'vol': vol_name,
|
||||||
'name': snap_name})
|
'name': snap_name})
|
||||||
|
@ -645,12 +647,12 @@ class NimbleAPIExecutor(object):
|
||||||
reserve_size = snap_size * units.Gi if reserve else 0
|
reserve_size = snap_size * units.Gi if reserve else 0
|
||||||
LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
|
LOG.info(_LI('Cloning volume from snapshot volume=%(vol)s '
|
||||||
'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
|
'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s'
|
||||||
'reserve=%(reserve)s')
|
'reserve=%(reserve)s'),
|
||||||
% {'vol': volume_name,
|
{'vol': volume_name,
|
||||||
'snap': snap_name,
|
'snap': snap_name,
|
||||||
'clone': clone_name,
|
'clone': clone_name,
|
||||||
'size': snap_size,
|
'size': snap_size,
|
||||||
'reserve': reserve})
|
'reserve': reserve})
|
||||||
clone_size = snap_size * units.Gi
|
clone_size = snap_size * units.Gi
|
||||||
return self.client.service.cloneVol(
|
return self.client.service.cloneVol(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
|
@ -668,8 +670,8 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def edit_vol(self, vol_name, mask, attr):
|
def edit_vol(self, vol_name, mask, attr):
|
||||||
"""Execute editVol API."""
|
"""Execute editVol API."""
|
||||||
LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s')
|
LOG.info(_LI('Editing Volume %(vol)s with mask %(mask)s'),
|
||||||
% {'vol': vol_name, 'mask': str(mask)})
|
{'vol': vol_name, 'mask': str(mask)})
|
||||||
return self.client.service.editVol(request={'sid': self.sid,
|
return self.client.service.editVol(request={'sid': self.sid,
|
||||||
'name': vol_name,
|
'name': vol_name,
|
||||||
'mask': mask,
|
'mask': mask,
|
||||||
|
@ -694,8 +696,8 @@ class NimbleAPIExecutor(object):
|
||||||
def create_initiator_group(self, initiator_group_name, initiator_name):
|
def create_initiator_group(self, initiator_group_name, initiator_name):
|
||||||
"""Execute createInitiatorGrp API."""
|
"""Execute createInitiatorGrp API."""
|
||||||
LOG.info(_LI('Creating initiator group %(igrp)s'
|
LOG.info(_LI('Creating initiator group %(igrp)s'
|
||||||
' with one initiator %(iname)s')
|
' with one initiator %(iname)s'),
|
||||||
% {'igrp': initiator_group_name, 'iname': initiator_name})
|
{'igrp': initiator_group_name, 'iname': initiator_name})
|
||||||
return self.client.service.createInitiatorGrp(
|
return self.client.service.createInitiatorGrp(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'attr': {'name': initiator_group_name,
|
'attr': {'name': initiator_group_name,
|
||||||
|
@ -706,7 +708,7 @@ class NimbleAPIExecutor(object):
|
||||||
@_response_checker
|
@_response_checker
|
||||||
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
|
def delete_initiator_group(self, initiator_group_name, *args, **kwargs):
|
||||||
"""Execute deleteInitiatorGrp API."""
|
"""Execute deleteInitiatorGrp API."""
|
||||||
LOG.info(_LI('Deleting deleteInitiatorGrp %s ') % initiator_group_name)
|
LOG.info(_LI('Deleting deleteInitiatorGrp %s '), initiator_group_name)
|
||||||
return self.client.service.deleteInitiatorGrp(
|
return self.client.service.deleteInitiatorGrp(
|
||||||
request={'sid': self.sid,
|
request={'sid': self.sid,
|
||||||
'name': initiator_group_name})
|
'name': initiator_group_name})
|
||||||
|
|
|
@ -66,8 +66,9 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
Options come from CONF
|
Options come from CONF
|
||||||
"""
|
"""
|
||||||
super(OVSVolumeDriver, self).__init__(*args, **kwargs)
|
super(OVSVolumeDriver, self).__init__(*args, **kwargs)
|
||||||
LOG.debug('INIT %s %s %s ', CONF.vpool_name, str(args),
|
LOG.debug('INIT %(pool_name)s %(arg)s %(kwarg)s ',
|
||||||
str(kwargs))
|
{'pool_name': CONF.vpool_name, 'arg': args,
|
||||||
|
'kwarg': kwargs})
|
||||||
self.configuration.append_config_values(OPTS)
|
self.configuration.append_config_values(OPTS)
|
||||||
self._vpool_name = self.configuration.vpool_name
|
self._vpool_name = self.configuration.vpool_name
|
||||||
if vpoollist is not None:
|
if vpoollist is not None:
|
||||||
|
@ -102,7 +103,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
location = '{}/{}.raw'.format(mountpoint, name)
|
location = '{}/{}.raw'.format(mountpoint, name)
|
||||||
size = volume.size
|
size = volume.size
|
||||||
|
|
||||||
LOG.debug('DO_CREATE_VOLUME %s %s', location, size)
|
LOG.debug('DO_CREATE_VOLUME %(location)s %(size)s',
|
||||||
|
{'location': location, 'size': size})
|
||||||
vdisklib.VDiskController.create_volume(location = location,
|
vdisklib.VDiskController.create_volume(location = location,
|
||||||
size = size)
|
size = size)
|
||||||
volume['provider_location'] = location
|
volume['provider_location'] = location
|
||||||
|
@ -136,7 +138,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
Downloads image from glance server into local .raw
|
Downloads image from glance server into local .raw
|
||||||
:param volume: volume reference (sqlalchemy Model)
|
:param volume: volume reference (sqlalchemy Model)
|
||||||
"""
|
"""
|
||||||
LOG.debug("CP_IMG_TO_VOL %s %s", image_service, image_id)
|
LOG.debug("CP_IMG_TO_VOL %(image_service)s %(image_id)s",
|
||||||
|
{'image_service': image_service, 'image_id': image_id})
|
||||||
|
|
||||||
name = volume.display_name
|
name = volume.display_name
|
||||||
if not name:
|
if not name:
|
||||||
|
@ -172,7 +175,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
Called on "cinder upload-to-image ...volume... ...image-name..."
|
Called on "cinder upload-to-image ...volume... ...image-name..."
|
||||||
:param volume: volume reference (sqlalchemy Model)
|
:param volume: volume reference (sqlalchemy Model)
|
||||||
"""
|
"""
|
||||||
LOG.debug("CP_VOL_TO_IMG %s %s", image_service, image_meta)
|
LOG.debug("CP_VOL_TO_IMG %(image_service)s %(image_meta)s",
|
||||||
|
{'image_service': image_service, 'image_meta': image_meta})
|
||||||
super(OVSVolumeDriver, self).copy_volume_to_image(
|
super(OVSVolumeDriver, self).copy_volume_to_image(
|
||||||
context, volume, image_service, image_meta)
|
context, volume, image_service, image_meta)
|
||||||
|
|
||||||
|
@ -222,8 +226,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
|
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
|
||||||
vdisk.cinder_id = volume.id
|
vdisk.cinder_id = volume.id
|
||||||
vdisk.name = name
|
vdisk.name = name
|
||||||
LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s',
|
LOG.debug('[CREATE FROM TEMPLATE] Updating meta %(volume_id)s '
|
||||||
volume.id, name)
|
'%(name)s', {'volume_id': volume.id, 'name': name})
|
||||||
vdisk.save()
|
vdisk.save()
|
||||||
else:
|
else:
|
||||||
LOG.debug('[THIN CLONE] VDisk %s is not a template',
|
LOG.debug('[THIN CLONE] VDisk %s is not a template',
|
||||||
|
@ -239,7 +243,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
'machineguid': source_ovs_disk.vmachine_guid,
|
'machineguid': source_ovs_disk.vmachine_guid,
|
||||||
'is_automatic': False}
|
'is_automatic': False}
|
||||||
|
|
||||||
LOG.debug('CREATE_SNAP %s %s', name, str(metadata))
|
LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
|
||||||
|
{'name': name, 'metadata': metadata})
|
||||||
snapshotid = vdisklib.VDiskController.create_snapshot(
|
snapshotid = vdisklib.VDiskController.create_snapshot(
|
||||||
diskguid = source_ovs_disk.guid,
|
diskguid = source_ovs_disk.guid,
|
||||||
metadata = metadata,
|
metadata = metadata,
|
||||||
|
@ -306,8 +311,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
'machineguid': ovs_disk.vmachine_guid,
|
'machineguid': ovs_disk.vmachine_guid,
|
||||||
'is_automatic': False}
|
'is_automatic': False}
|
||||||
|
|
||||||
LOG.debug('CREATE_SNAP %s %s', snapshot.display_name,
|
LOG.debug('CREATE_SNAP %(name)s %(metadata)s',
|
||||||
str(metadata))
|
{'name': snapshot.display_name, 'metadata': metadata})
|
||||||
vdisklib.VDiskController.create_snapshot(diskguid = ovs_disk.guid,
|
vdisklib.VDiskController.create_snapshot(diskguid = ovs_disk.guid,
|
||||||
metadata = metadata,
|
metadata = metadata,
|
||||||
snapshotid =
|
snapshotid =
|
||||||
|
@ -352,9 +357,10 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
|
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
|
||||||
six.text_type(volume.host))
|
six.text_type(volume.host))
|
||||||
|
|
||||||
LOG.debug('[CLONE FROM SNAP] %s %s %s %s',
|
LOG.debug('[CLONE FROM SNAP] %(disk)s %(snapshot)s %(device)s '
|
||||||
ovs_snap_disk.guid, snapshot.id, devicename,
|
'%(machine)s',
|
||||||
pmachineguid)
|
{'disk': ovs_snap_disk.guid, 'snapshot': snapshot.id,
|
||||||
|
'device': devicename, 'machine': pmachineguid})
|
||||||
disk_meta = vdisklib.VDiskController.clone(
|
disk_meta = vdisklib.VDiskController.clone(
|
||||||
diskguid = ovs_snap_disk.guid,
|
diskguid = ovs_snap_disk.guid,
|
||||||
snapshotid = snapshot.id,
|
snapshotid = snapshot.id,
|
||||||
|
@ -427,7 +433,8 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
The volume is a .raw file on a virtual filesystem.
|
The volume is a .raw file on a virtual filesystem.
|
||||||
Connection is always allowed based on POSIX permissions.
|
Connection is always allowed based on POSIX permissions.
|
||||||
"""
|
"""
|
||||||
LOG.debug('TERM_CONN %s %s ', six.text_type(connector), force)
|
LOG.debug('TERM_CONN %(connector)s %(force)s ',
|
||||||
|
{'connector': six.text_type(connector), 'force': force})
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Validate driver setup"""
|
"""Validate driver setup"""
|
||||||
|
@ -493,12 +500,15 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
_location = "{0}/{1}".format(vsr.mountpoint,
|
_location = "{0}/{1}".format(vsr.mountpoint,
|
||||||
vd.devicename)
|
vd.devicename)
|
||||||
if _location == location:
|
if _location == location:
|
||||||
LOG.debug('Location %s Disk found %s',
|
LOG.debug('Location %(location)s Disk '
|
||||||
(location, vd.guid))
|
'found %(id)s',
|
||||||
|
{'location': location,
|
||||||
|
'id': vd.guid})
|
||||||
disk = vdiskhybrid.VDisk(vd.guid)
|
disk = vdiskhybrid.VDisk(vd.guid)
|
||||||
return disk
|
return disk
|
||||||
msg = 'NO RESULT Attempt %s timeout %s max attempts %s'
|
LOG.debug('NO RESULT Attempt %(attempt)s timeout %(timeout)s max '
|
||||||
LOG.debug(msg, attempt, timeout, retry)
|
'attempts %(retry)s',
|
||||||
|
{'attempt': attempt, 'timeout': timeout, 'retry': retry})
|
||||||
if timeout:
|
if timeout:
|
||||||
time.sleep(timeout)
|
time.sleep(timeout)
|
||||||
attempt += 1
|
attempt += 1
|
||||||
|
@ -511,16 +521,16 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
:return guid: GUID
|
:return guid: GUID
|
||||||
"""
|
"""
|
||||||
hostname = self._get_real_hostname(hostname)
|
hostname = self._get_real_hostname(hostname)
|
||||||
LOG.debug('[_FIND OVS PMACHINE] Hostname %s' % (hostname))
|
LOG.debug('[_FIND OVS PMACHINE] Hostname %s', hostname)
|
||||||
mapping = [(pm.guid, six.text_type(sr.name))
|
mapping = [(pm.guid, six.text_type(sr.name))
|
||||||
for pm in pmachinelist.PMachineList.get_pmachines()
|
for pm in pmachinelist.PMachineList.get_pmachines()
|
||||||
for sr in pm.storagerouters]
|
for sr in pm.storagerouters]
|
||||||
for item in mapping:
|
for item in mapping:
|
||||||
if item[1] == str(hostname):
|
if item[1] == str(hostname):
|
||||||
msg = 'Found pmachineguid %s for Hostname %s'
|
LOG.debug('Found pmachineguid %(item)s for Hostname %(host)s',
|
||||||
LOG.debug(msg, item[0], hostname)
|
{'item': item[0], 'host': hostname})
|
||||||
return item[0]
|
return item[0]
|
||||||
msg = (_('No PMachine guid found for Hostname %s'), hostname)
|
msg = (_('No PMachine guid found for Hostname %s') % hostname)
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
|
@ -528,13 +538,14 @@ class OVSVolumeDriver(driver.VolumeDriver):
|
||||||
"""Find OVS disk object based on snapshot id
|
"""Find OVS disk object based on snapshot id
|
||||||
:return VDisk: OVS DAL model object
|
:return VDisk: OVS DAL model object
|
||||||
"""
|
"""
|
||||||
LOG.debug('[_FIND OVS DISK] Snapshotid %s' % snapshotid)
|
LOG.debug('[_FIND OVS DISK] Snapshotid %s', snapshotid)
|
||||||
for disk in vdisklist.VDiskList.get_vdisks():
|
for disk in vdisklist.VDiskList.get_vdisks():
|
||||||
snaps_guid = [s['guid'] for s in disk.snapshots]
|
snaps_guid = [s['guid'] for s in disk.snapshots]
|
||||||
if str(snapshotid) in snaps_guid:
|
if str(snapshotid) in snaps_guid:
|
||||||
LOG.debug('[_FIND OVS DISK] Snapshot id %s Disk found %s',
|
LOG.debug('[_FIND OVS DISK] Snapshot id %(snapshot)s Disk '
|
||||||
(snapshotid, disk))
|
'found %(disk)s',
|
||||||
|
{'snapshot': snapshotid, 'disk': disk})
|
||||||
return disk
|
return disk
|
||||||
msg = (_('No disk found for snapshotid %s'), snapshotid)
|
msg = (_('No disk found for snapshotid %s') % snapshotid)
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
|
@ -135,10 +135,9 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
|
||||||
def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename):
|
def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename):
|
||||||
ret = 0
|
ret = 0
|
||||||
output = ''
|
output = ''
|
||||||
msg = _('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s') \
|
LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s',
|
||||||
% {'volume': volumeid, 'wwpns': targetwwpns,
|
{'volume': volumeid, 'wwpns': targetwwpns,
|
||||||
'iqn': initiatorwwpns, 'volumename': volumename}
|
'iqn': initiatorwwpns, 'volumename': volumename})
|
||||||
LOG.debug(msg)
|
|
||||||
try:
|
try:
|
||||||
ret, output = self.dpl.assign_vdev_fc(
|
ret, output = self.dpl.assign_vdev_fc(
|
||||||
self._conver_uuid2hex(volumeid), targetwwpns,
|
self._conver_uuid2hex(volumeid), targetwwpns,
|
||||||
|
|
|
@ -213,8 +213,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
|
||||||
ERR_MSG_NOT_EXIST in err.text:
|
ERR_MSG_NOT_EXIST in err.text:
|
||||||
# Happens if the volume does not exist.
|
# Happens if the volume does not exist.
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
LOG.warn(_LW("Volume deletion failed with message: %s"),
|
LOG.warning(_LW("Volume deletion failed with message: %s"),
|
||||||
err.text)
|
err.text)
|
||||||
LOG.debug("Leave PureISCSIDriver.delete_volume.")
|
LOG.debug("Leave PureISCSIDriver.delete_volume.")
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
|
@ -279,11 +279,11 @@ class PureISCSIDriver(san.SanISCSIDriver):
|
||||||
self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
|
self._run_iscsiadm_bare(["-m", "discovery", "-t", "sendtargets",
|
||||||
"-p", self._iscsi_port["portal"]])
|
"-p", self._iscsi_port["portal"]])
|
||||||
except processutils.ProcessExecutionError as err:
|
except processutils.ProcessExecutionError as err:
|
||||||
LOG.warn(_LW("iSCSI discovery of port %(port_name)s at "
|
LOG.warning(_LW("iSCSI discovery of port %(port_name)s at "
|
||||||
"%(port_portal)s failed with error: %(err_msg)s"),
|
"%(port_portal)s failed with error: %(err_msg)s"),
|
||||||
{"port_name": self._iscsi_port["name"],
|
{"port_name": self._iscsi_port["name"],
|
||||||
"port_portal": self._iscsi_port["portal"],
|
"port_portal": self._iscsi_port["portal"],
|
||||||
"err_msg": err.stderr})
|
"err_msg": err.stderr})
|
||||||
self._iscsi_port = self._choose_target_iscsi_port()
|
self._iscsi_port = self._choose_target_iscsi_port()
|
||||||
return self._iscsi_port
|
return self._iscsi_port
|
||||||
|
|
||||||
|
@ -384,8 +384,8 @@ class PureISCSIDriver(san.SanISCSIDriver):
|
||||||
"Connection already exists" in err.text):
|
"Connection already exists" in err.text):
|
||||||
# Happens if the volume is already connected to the host.
|
# Happens if the volume is already connected to the host.
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
LOG.warn(_LW("Volume connection already exists with "
|
LOG.warning(_LW("Volume connection already exists with "
|
||||||
"message: %s"), err.text)
|
"message: %s"), err.text)
|
||||||
# Get the info for the existing connection
|
# Get the info for the existing connection
|
||||||
connected_hosts = \
|
connected_hosts = \
|
||||||
self._array.list_volume_private_connections(vol_name)
|
self._array.list_volume_private_connections(vol_name)
|
||||||
|
@ -724,5 +724,5 @@ class PureISCSIDriver(san.SanISCSIDriver):
|
||||||
if (err.code == 400 and
|
if (err.code == 400 and
|
||||||
ERR_MSG_NOT_EXIST in err.text):
|
ERR_MSG_NOT_EXIST in err.text):
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
LOG.warn(_LW("Volume unmanage was unable to rename "
|
LOG.warning(_LW("Volume unmanage was unable to rename "
|
||||||
"the volume, error message: %s"), err.text)
|
"the volume, error message: %s"), err.text)
|
||||||
|
|
|
@ -102,10 +102,10 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
if not self.configuration.quobyte_volume_url:
|
if not self.configuration.quobyte_volume_url:
|
||||||
msg = (_LW("There's no Quobyte volume configured (%s). Example:"
|
msg = (_("There's no Quobyte volume configured (%s). Example:"
|
||||||
" quobyte://<DIR host>/<volume name>") %
|
" quobyte://<DIR host>/<volume name>") %
|
||||||
'quobyte_volume_url')
|
'quobyte_volume_url')
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeDriverException(msg)
|
raise exception.VolumeDriverException(msg)
|
||||||
|
|
||||||
# Check if mount.quobyte is installed
|
# Check if mount.quobyte is installed
|
||||||
|
@ -148,11 +148,10 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
qcow2.
|
qcow2.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
|
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ",
|
||||||
"volume_size: %(size)s"
|
{'snap': snapshot['id'],
|
||||||
% {'snap': snapshot['id'],
|
'vol': volume['id'],
|
||||||
'vol': volume['id'],
|
'size': volume_size})
|
||||||
'size': volume_size})
|
|
||||||
|
|
||||||
info_path = self._local_path_volume_info(snapshot['volume'])
|
info_path = self._local_path_volume_info(snapshot['volume'])
|
||||||
snap_info = self._read_info_file(info_path)
|
snap_info = self._read_info_file(info_path)
|
||||||
|
@ -168,7 +167,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
|
|
||||||
path_to_new_vol = self._local_path_volume(volume)
|
path_to_new_vol = self._local_path_volume(volume)
|
||||||
|
|
||||||
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
|
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
|
||||||
|
|
||||||
if self.configuration.quobyte_qcow2_volumes:
|
if self.configuration.quobyte_qcow2_volumes:
|
||||||
out_format = 'qcow2'
|
out_format = 'qcow2'
|
||||||
|
@ -187,8 +186,8 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
|
|
||||||
if not volume['provider_location']:
|
if not volume['provider_location']:
|
||||||
LOG.warn(_LW('Volume %s does not have provider_location '
|
LOG.warning(_LW('Volume %s does not have provider_location '
|
||||||
'specified, skipping'), volume['name'])
|
'specified, skipping'), volume['name'])
|
||||||
return
|
return
|
||||||
|
|
||||||
self._ensure_share_mounted(volume['provider_location'])
|
self._ensure_share_mounted(volume['provider_location'])
|
||||||
|
@ -355,7 +354,7 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
' one Quobyte volume.'
|
' one Quobyte volume.'
|
||||||
target_volume = self._mounted_shares[0]
|
target_volume = self._mounted_shares[0]
|
||||||
|
|
||||||
LOG.debug('Selected %s as target Quobyte volume.' % target_volume)
|
LOG.debug('Selected %s as target Quobyte volume.', target_volume)
|
||||||
|
|
||||||
return target_volume
|
return target_volume
|
||||||
|
|
||||||
|
@ -387,16 +386,17 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
mounted = False
|
mounted = False
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('Fixing previous mount %s which was not'
|
LOG.info(_LI('Fixing previous mount %s which was not'
|
||||||
' unmounted correctly.') % mount_path)
|
' unmounted correctly.'), mount_path)
|
||||||
self._execute('umount.quobyte', mount_path,
|
self._execute('umount.quobyte', mount_path,
|
||||||
run_as_root=False)
|
run_as_root=False)
|
||||||
except processutils.ProcessExecutionError as exc:
|
except processutils.ProcessExecutionError as exc:
|
||||||
LOG.warn(_LW("Failed to unmount previous mount: %s"),
|
LOG.warning(_LW("Failed to unmount previous mount: "
|
||||||
exc)
|
"%s"), exc)
|
||||||
else:
|
else:
|
||||||
# TODO(quobyte): Extend exc analysis in here?
|
# TODO(quobyte): Extend exc analysis in here?
|
||||||
LOG.warn(_LW("Unknown error occurred while checking mount"
|
LOG.warning(_LW("Unknown error occurred while checking "
|
||||||
" point: %s Trying to continue."), exc)
|
"mount point: %s Trying to continue."),
|
||||||
|
exc)
|
||||||
|
|
||||||
if not mounted:
|
if not mounted:
|
||||||
if not os.path.isdir(mount_path):
|
if not os.path.isdir(mount_path):
|
||||||
|
@ -407,13 +407,13 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
command.extend(['-c', self.configuration.quobyte_client_cfg])
|
command.extend(['-c', self.configuration.quobyte_client_cfg])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('Mounting volume: %s ...') % quobyte_volume)
|
LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume)
|
||||||
self._execute(*command, run_as_root=False)
|
self._execute(*command, run_as_root=False)
|
||||||
LOG.info(_LI('Mounting volume: %s succeeded') % quobyte_volume)
|
LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume)
|
||||||
mounted = True
|
mounted = True
|
||||||
except processutils.ProcessExecutionError as exc:
|
except processutils.ProcessExecutionError as exc:
|
||||||
if ensure and 'already mounted' in exc.stderr:
|
if ensure and 'already mounted' in exc.stderr:
|
||||||
LOG.warn(_LW("%s is already mounted"), quobyte_volume)
|
LOG.warning(_LW("%s is already mounted"), quobyte_volume)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -432,5 +432,5 @@ class QuobyteDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
raise exception.VolumeDriverException(msg)
|
raise exception.VolumeDriverException(msg)
|
||||||
|
|
||||||
if not os.access(mount_path, os.W_OK | os.X_OK):
|
if not os.access(mount_path, os.W_OK | os.X_OK):
|
||||||
LOG.warn(_LW("Volume is not writable. Please broaden the file"
|
LOG.warning(_LW("Volume is not writable. Please broaden the file"
|
||||||
" permissions. Mount: %s"), mount_path)
|
" permissions. Mount: %s"), mount_path)
|
||||||
|
|
|
@ -308,8 +308,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def _connect_to_rados(self, pool=None):
|
def _connect_to_rados(self, pool=None):
|
||||||
LOG.debug("opening connection to ceph cluster (timeout=%s)." %
|
LOG.debug("opening connection to ceph cluster (timeout=%s).",
|
||||||
(self.configuration.rados_connect_timeout))
|
self.configuration.rados_connect_timeout)
|
||||||
|
|
||||||
# NOTE (e0ne): rados is binding to C lbirary librados.
|
# NOTE (e0ne): rados is binding to C lbirary librados.
|
||||||
# It blocks eventlet loop so we need to run it in a native
|
# It blocks eventlet loop so we need to run it in a native
|
||||||
|
@ -468,8 +468,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
# infinite is allowed.
|
# infinite is allowed.
|
||||||
if depth == CONF.rbd_max_clone_depth:
|
if depth == CONF.rbd_max_clone_depth:
|
||||||
LOG.debug("maximum clone depth (%d) has been reached - "
|
LOG.debug("maximum clone depth (%d) has been reached - "
|
||||||
"flattening source volume" %
|
"flattening source volume",
|
||||||
(CONF.rbd_max_clone_depth))
|
CONF.rbd_max_clone_depth)
|
||||||
flatten_parent = True
|
flatten_parent = True
|
||||||
|
|
||||||
src_volume = self.rbd.Image(client.ioctx, src_name)
|
src_volume = self.rbd.Image(client.ioctx, src_name)
|
||||||
|
@ -479,7 +479,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
_pool, parent, snap = self._get_clone_info(src_volume,
|
_pool, parent, snap = self._get_clone_info(src_volume,
|
||||||
src_name)
|
src_name)
|
||||||
# Flatten source volume
|
# Flatten source volume
|
||||||
LOG.debug("flattening source volume %s" % (src_name))
|
LOG.debug("flattening source volume %s", src_name)
|
||||||
src_volume.flatten()
|
src_volume.flatten()
|
||||||
# Delete parent clone snap
|
# Delete parent clone snap
|
||||||
parent_volume = self.rbd.Image(client.ioctx, parent)
|
parent_volume = self.rbd.Image(client.ioctx, parent)
|
||||||
|
@ -491,7 +491,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
|
|
||||||
# Create new snapshot of source volume
|
# Create new snapshot of source volume
|
||||||
clone_snap = "%s.clone_snap" % dest_name
|
clone_snap = "%s.clone_snap" % dest_name
|
||||||
LOG.debug("creating snapshot='%s'" % (clone_snap))
|
LOG.debug("creating snapshot='%s'", clone_snap)
|
||||||
src_volume.create_snap(clone_snap)
|
src_volume.create_snap(clone_snap)
|
||||||
src_volume.protect_snap(clone_snap)
|
src_volume.protect_snap(clone_snap)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
@ -502,7 +502,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
# Now clone source volume snapshot
|
# Now clone source volume snapshot
|
||||||
try:
|
try:
|
||||||
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
|
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
|
||||||
"'%(dest)s'" %
|
"'%(dest)s'",
|
||||||
{'src_vol': src_name, 'src_snap': clone_snap,
|
{'src_vol': src_name, 'src_snap': clone_snap,
|
||||||
'dest': dest_name})
|
'dest': dest_name})
|
||||||
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
|
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
|
||||||
|
@ -517,7 +517,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
|
|
||||||
if volume['size'] != src_vref['size']:
|
if volume['size'] != src_vref['size']:
|
||||||
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
|
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
|
||||||
"%(dst_size)d" %
|
"%(dst_size)d",
|
||||||
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
|
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
|
||||||
'dst_size': volume['size']})
|
'dst_size': volume['size']})
|
||||||
self._resize(volume)
|
self._resize(volume)
|
||||||
|
@ -528,7 +528,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
"""Creates a logical volume."""
|
"""Creates a logical volume."""
|
||||||
size = int(volume['size']) * units.Gi
|
size = int(volume['size']) * units.Gi
|
||||||
|
|
||||||
LOG.debug("creating volume '%s'" % (volume['name']))
|
LOG.debug("creating volume '%s'", volume['name'])
|
||||||
|
|
||||||
chunk_size = CONF.rbd_store_chunk_size * units.Mi
|
chunk_size = CONF.rbd_store_chunk_size * units.Mi
|
||||||
order = int(math.log(chunk_size, 2))
|
order = int(math.log(chunk_size, 2))
|
||||||
|
@ -542,13 +542,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
features=client.features)
|
features=client.features)
|
||||||
|
|
||||||
def _flatten(self, pool, volume_name):
|
def _flatten(self, pool, volume_name):
|
||||||
LOG.debug('flattening %(pool)s/%(img)s' %
|
LOG.debug('flattening %(pool)s/%(img)s',
|
||||||
dict(pool=pool, img=volume_name))
|
dict(pool=pool, img=volume_name))
|
||||||
with RBDVolumeProxy(self, volume_name, pool) as vol:
|
with RBDVolumeProxy(self, volume_name, pool) as vol:
|
||||||
vol.flatten()
|
vol.flatten()
|
||||||
|
|
||||||
def _clone(self, volume, src_pool, src_image, src_snap):
|
def _clone(self, volume, src_pool, src_image, src_snap):
|
||||||
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s' %
|
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
|
||||||
dict(pool=src_pool, img=src_image, snap=src_snap,
|
dict(pool=src_pool, img=src_image, snap=src_snap,
|
||||||
dst=volume['name']))
|
dst=volume['name']))
|
||||||
with RADOSClient(self, src_pool) as src_client:
|
with RADOSClient(self, src_pool) as src_client:
|
||||||
|
@ -604,7 +604,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
if parent_snap == "%s.clone_snap" % volume_name:
|
if parent_snap == "%s.clone_snap" % volume_name:
|
||||||
return pool, parent, parent_snap
|
return pool, parent, parent_snap
|
||||||
except self.rbd.ImageNotFound:
|
except self.rbd.ImageNotFound:
|
||||||
LOG.debug("volume %s is not a clone" % volume_name)
|
LOG.debug("volume %s is not a clone", volume_name)
|
||||||
volume.set_snap(None)
|
volume.set_snap(None)
|
||||||
|
|
||||||
return (None, None, None)
|
return (None, None, None)
|
||||||
|
@ -622,7 +622,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
parent_name,
|
parent_name,
|
||||||
parent_snap)
|
parent_snap)
|
||||||
|
|
||||||
LOG.debug("deleting parent snapshot %s" % (parent_snap))
|
LOG.debug("deleting parent snapshot %s", parent_snap)
|
||||||
parent_rbd.unprotect_snap(parent_snap)
|
parent_rbd.unprotect_snap(parent_snap)
|
||||||
parent_rbd.remove_snap(parent_snap)
|
parent_rbd.remove_snap(parent_snap)
|
||||||
|
|
||||||
|
@ -633,7 +633,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
# If parent has been deleted in Cinder, delete the silent reference and
|
# If parent has been deleted in Cinder, delete the silent reference and
|
||||||
# keep walking up the chain if it is itself a clone.
|
# keep walking up the chain if it is itself a clone.
|
||||||
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
|
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
|
||||||
LOG.debug("deleting parent %s" % (parent_name))
|
LOG.debug("deleting parent %s", parent_name)
|
||||||
self.RBDProxy().remove(client.ioctx, parent_name)
|
self.RBDProxy().remove(client.ioctx, parent_name)
|
||||||
|
|
||||||
# Now move up to grandparent if there is one
|
# Now move up to grandparent if there is one
|
||||||
|
@ -649,8 +649,8 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
try:
|
try:
|
||||||
rbd_image = self.rbd.Image(client.ioctx, volume_name)
|
rbd_image = self.rbd.Image(client.ioctx, volume_name)
|
||||||
except self.rbd.ImageNotFound:
|
except self.rbd.ImageNotFound:
|
||||||
LOG.info(_LI("volume %s no longer exists in backend")
|
LOG.info(_LI("volume %s no longer exists in backend"),
|
||||||
% (volume_name))
|
volume_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
clone_snap = None
|
clone_snap = None
|
||||||
|
@ -681,7 +681,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
rbd_image.close()
|
rbd_image.close()
|
||||||
|
|
||||||
if clone_snap is None:
|
if clone_snap is None:
|
||||||
LOG.debug("deleting rbd volume %s" % (volume_name))
|
LOG.debug("deleting rbd volume %s", volume_name)
|
||||||
try:
|
try:
|
||||||
self.RBDProxy().remove(client.ioctx, volume_name)
|
self.RBDProxy().remove(client.ioctx, volume_name)
|
||||||
except self.rbd.ImageBusy:
|
except self.rbd.ImageBusy:
|
||||||
|
@ -690,14 +690,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
"connection from a client that has crashed and, "
|
"connection from a client that has crashed and, "
|
||||||
"if so, may be resolved by retrying the delete "
|
"if so, may be resolved by retrying the delete "
|
||||||
"after 30 seconds has elapsed."))
|
"after 30 seconds has elapsed."))
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
# Now raise this so that volume stays available so that we
|
# Now raise this so that volume stays available so that we
|
||||||
# delete can be retried.
|
# delete can be retried.
|
||||||
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
|
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
|
||||||
except self.rbd.ImageNotFound:
|
except self.rbd.ImageNotFound:
|
||||||
msg = (_LI("RBD volume %s not found, allowing delete "
|
LOG.info(_LI("RBD volume %s not found, allowing delete "
|
||||||
"operation to proceed.") % volume_name)
|
"operation to proceed."), volume_name)
|
||||||
LOG.info(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# If it is a clone, walk back up the parent chain deleting
|
# If it is a clone, walk back up the parent chain deleting
|
||||||
|
@ -815,15 +814,13 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self._get_fsid() != fsid:
|
if self._get_fsid() != fsid:
|
||||||
reason = ('%s is in a different ceph cluster') % image_location
|
LOG.debug('%s is in a different ceph cluster', image_location)
|
||||||
LOG.debug(reason)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if image_meta['disk_format'] != 'raw':
|
if image_meta['disk_format'] != 'raw':
|
||||||
reason = ("rbd image clone requires image format to be "
|
LOG.debug(("rbd image clone requires image format to be "
|
||||||
"'raw' but image {0} is '{1}'").format(
|
"'raw' but image {0} is '{1}'").format(
|
||||||
image_location, image_meta['disk_format'])
|
image_location, image_meta['disk_format']))
|
||||||
LOG.debug(reason)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# check that we can read the image
|
# check that we can read the image
|
||||||
|
@ -834,7 +831,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
read_only=True):
|
read_only=True):
|
||||||
return True
|
return True
|
||||||
except self.rbd.Error as e:
|
except self.rbd.Error as e:
|
||||||
LOG.debug('Unable to open image %(loc)s: %(err)s' %
|
LOG.debug('Unable to open image %(loc)s: %(err)s',
|
||||||
dict(loc=image_location, err=e))
|
dict(loc=image_location, err=e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -855,9 +852,9 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
|
||||||
CONF.image_conversion_dir or
|
CONF.image_conversion_dir or
|
||||||
tempfile.gettempdir())
|
tempfile.gettempdir())
|
||||||
|
|
||||||
if (tmpdir == self.configuration.volume_tmp_dir):
|
if tmpdir == self.configuration.volume_tmp_dir:
|
||||||
LOG.warn(_LW('volume_tmp_dir is now deprecated, please use '
|
LOG.warning(_LW('volume_tmp_dir is now deprecated, please use '
|
||||||
'image_conversion_dir'))
|
'image_conversion_dir'))
|
||||||
|
|
||||||
# ensure temporary directory exists
|
# ensure temporary directory exists
|
||||||
if not os.path.exists(tmpdir):
|
if not os.path.exists(tmpdir):
|
||||||
|
|
|
@ -205,7 +205,7 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
|
|
||||||
volume['provider_location'] = self._find_share(volume['size'])
|
volume['provider_location'] = self._find_share(volume['size'])
|
||||||
|
|
||||||
LOG.info(_LI('casted to %s') % volume['provider_location'])
|
LOG.info(_LI('casted to %s'), volume['provider_location'])
|
||||||
|
|
||||||
self._do_create_volume(volume)
|
self._do_create_volume(volume)
|
||||||
|
|
||||||
|
@ -242,11 +242,11 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
self._ensure_share_mounted(share)
|
self._ensure_share_mounted(share)
|
||||||
mounted_shares.append(share)
|
mounted_shares.append(share)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('Exception during mounting %s') % (exc,))
|
LOG.error(_LE('Exception during mounting %s'), exc)
|
||||||
|
|
||||||
self._mounted_shares = mounted_shares
|
self._mounted_shares = mounted_shares
|
||||||
|
|
||||||
LOG.debug('Available shares %s' % self._mounted_shares)
|
LOG.debug('Available shares %s', self._mounted_shares)
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
@ -257,9 +257,9 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
:param volume: volume reference
|
:param volume: volume reference
|
||||||
"""
|
"""
|
||||||
if not volume['provider_location']:
|
if not volume['provider_location']:
|
||||||
LOG.warn(_LW('Volume %s does not have '
|
LOG.warning(_LW('Volume %s does not have '
|
||||||
'provider_location specified, '
|
'provider_location specified, '
|
||||||
'skipping'), volume['name'])
|
'skipping'), volume['name'])
|
||||||
return
|
return
|
||||||
|
|
||||||
self._ensure_share_mounted(volume['provider_location'])
|
self._ensure_share_mounted(volume['provider_location'])
|
||||||
|
@ -330,13 +330,13 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
"""
|
"""
|
||||||
if self.configuration.nas_secure_file_permissions == 'true':
|
if self.configuration.nas_secure_file_permissions == 'true':
|
||||||
permissions = '660'
|
permissions = '660'
|
||||||
LOG.debug('File path %s is being set with permissions: %s' %
|
LOG.debug('File path %(path)s is being set with permissions: '
|
||||||
(path, permissions))
|
'%(permissions)s',
|
||||||
|
{'path': path, 'permissions': permissions})
|
||||||
else:
|
else:
|
||||||
permissions = 'ugo+rw'
|
permissions = 'ugo+rw'
|
||||||
parms = {'path': path, 'perm': permissions}
|
LOG.warning(_LW('%(path)s is being set with open permissions: '
|
||||||
LOG.warn(_LW('%(path)s is being set with open permissions: '
|
'%(perm)s'), {'path': path, 'perm': permissions})
|
||||||
'%(perm)s') % parms)
|
|
||||||
|
|
||||||
self._execute('chmod', permissions, path,
|
self._execute('chmod', permissions, path,
|
||||||
run_as_root=self._execute_as_root)
|
run_as_root=self._execute_as_root)
|
||||||
|
@ -424,7 +424,7 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
self.shares[share_address] = self.configuration.nas_mount_options
|
self.shares[share_address] = self.configuration.nas_mount_options
|
||||||
|
|
||||||
elif share_file is not None:
|
elif share_file is not None:
|
||||||
LOG.debug('Loading shares from %s.' % share_file)
|
LOG.debug('Loading shares from %s.', share_file)
|
||||||
|
|
||||||
for share in self._read_config_file(share_file):
|
for share in self._read_config_file(share_file):
|
||||||
# A configuration line may be either:
|
# A configuration line may be either:
|
||||||
|
@ -527,18 +527,18 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
|
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
|
||||||
"/nfs_backend.html"
|
"/nfs_backend.html"
|
||||||
self.configuration.nas_secure_file_operations = 'false'
|
self.configuration.nas_secure_file_operations = 'false'
|
||||||
LOG.warn(_LW("The NAS file operations will be run as root: allowing "
|
LOG.warning(_LW("The NAS file operations will be run as root: "
|
||||||
"root level access at the storage backend. This is "
|
"allowing root level access at the storage backend. "
|
||||||
"considered an insecure NAS environment. "
|
"This is considered an insecure NAS environment. "
|
||||||
"Please see %s for information on a secure NAS "
|
"Please see %s for information on a secure NAS "
|
||||||
"configuration.") %
|
"configuration."),
|
||||||
doc_html)
|
doc_html)
|
||||||
self.configuration.nas_secure_file_permissions = 'false'
|
self.configuration.nas_secure_file_permissions = 'false'
|
||||||
LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
|
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
|
||||||
"other/world read & write access). This is considered an "
|
"other/world read & write access). This is considered "
|
||||||
"insecure NAS environment. Please see %s for information "
|
"an insecure NAS environment. Please see %s for "
|
||||||
"on a secure NFS configuration.") %
|
"information on a secure NFS configuration."),
|
||||||
doc_html)
|
doc_html)
|
||||||
|
|
||||||
def _determine_nas_security_option_setting(self, nas_option, mount_point,
|
def _determine_nas_security_option_setting(self, nas_option, mount_point,
|
||||||
is_new_cinder_install):
|
is_new_cinder_install):
|
||||||
|
@ -579,11 +579,11 @@ class RemoteFSDriver(driver.VolumeDriver):
|
||||||
self._execute('chmod', '640', file_path,
|
self._execute('chmod', '640', file_path,
|
||||||
run_as_root=False)
|
run_as_root=False)
|
||||||
LOG.info(_LI('New Cinder secure environment indicator'
|
LOG.info(_LI('New Cinder secure environment indicator'
|
||||||
' file created at path %s.') % file_path)
|
' file created at path %s.'), file_path)
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
LOG.error(_LE('Failed to created Cinder secure '
|
LOG.error(_LE('Failed to created Cinder secure '
|
||||||
'environment indicator file: %s') %
|
'environment indicator file: %s'),
|
||||||
format(err))
|
err)
|
||||||
else:
|
else:
|
||||||
# For existing installs, we default to 'false'. The
|
# For existing installs, we default to 'false'. The
|
||||||
# admin can always set the option at the driver config.
|
# admin can always set the option at the driver config.
|
||||||
|
@ -837,7 +837,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
return snap_info['active']
|
return snap_info['active']
|
||||||
|
|
||||||
def _create_cloned_volume(self, volume, src_vref):
|
def _create_cloned_volume(self, volume, src_vref):
|
||||||
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
|
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
|
||||||
{'src': src_vref['id'],
|
{'src': src_vref['id'],
|
||||||
'dst': volume['id']})
|
'dst': volume['id']})
|
||||||
|
|
||||||
|
@ -881,7 +881,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
if (snapshot_file == active_file):
|
if (snapshot_file == active_file):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
|
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
|
||||||
self._delete(snapshot_path)
|
self._delete(snapshot_path)
|
||||||
del(snap_info[snapshot['id']])
|
del(snap_info[snapshot['id']])
|
||||||
self._write_info_file(info_path, snap_info)
|
self._write_info_file(info_path, snap_info)
|
||||||
|
@ -901,7 +901,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug('Deleting snapshot %s:' % snapshot['id'])
|
LOG.debug('Deleting snapshot %s:', snapshot['id'])
|
||||||
|
|
||||||
volume_status = snapshot['volume']['status']
|
volume_status = snapshot['volume']['status']
|
||||||
if volume_status not in ['available', 'in-use']:
|
if volume_status not in ['available', 'in-use']:
|
||||||
|
@ -922,11 +922,11 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
# (This happens, for example, if snapshot_create failed due to lack
|
# (This happens, for example, if snapshot_create failed due to lack
|
||||||
# of permission to write to the share.)
|
# of permission to write to the share.)
|
||||||
LOG.info(_LI('Snapshot record for %s is not present, allowing '
|
LOG.info(_LI('Snapshot record for %s is not present, allowing '
|
||||||
'snapshot_delete to proceed.') % snapshot['id'])
|
'snapshot_delete to proceed.'), snapshot['id'])
|
||||||
return
|
return
|
||||||
|
|
||||||
snapshot_file = snap_info[snapshot['id']]
|
snapshot_file = snap_info[snapshot['id']]
|
||||||
LOG.debug('snapshot_file for this snap is: %s' % snapshot_file)
|
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
|
||||||
snapshot_path = os.path.join(
|
snapshot_path = os.path.join(
|
||||||
self._local_volume_dir(snapshot['volume']),
|
self._local_volume_dir(snapshot['volume']),
|
||||||
snapshot_file)
|
snapshot_file)
|
||||||
|
@ -949,9 +949,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
if base_file is None:
|
if base_file is None:
|
||||||
# There should always be at least the original volume
|
# There should always be at least the original volume
|
||||||
# file as base.
|
# file as base.
|
||||||
msg = _('No backing file found for %s, allowing snapshot '
|
LOG.warning(_LW('No backing file found for %s, allowing '
|
||||||
'to be deleted.') % snapshot_path
|
'snapshot to be deleted.'), snapshot_path)
|
||||||
LOG.warn(msg)
|
|
||||||
|
|
||||||
# Snapshot may be stale, so just delete it and update the
|
# Snapshot may be stale, so just delete it and update the
|
||||||
# info file instead of blocking
|
# info file instead of blocking
|
||||||
|
@ -971,10 +970,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
break
|
break
|
||||||
if base_id is None:
|
if base_id is None:
|
||||||
# This means we are deleting the oldest snapshot
|
# This means we are deleting the oldest snapshot
|
||||||
msg = 'No %(base_id)s found for %(file)s' % {
|
LOG.debug('No %(base_id)s found for %(file)s',
|
||||||
'base_id': 'base_id',
|
{'base_id': 'base_id', 'file': snapshot_file})
|
||||||
'file': snapshot_file}
|
|
||||||
LOG.debug(msg)
|
|
||||||
|
|
||||||
online_delete_info = {
|
online_delete_info = {
|
||||||
'active_file': active_file,
|
'active_file': active_file,
|
||||||
|
@ -1042,8 +1039,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
higher_file),
|
higher_file),
|
||||||
None)
|
None)
|
||||||
if highest_file is None:
|
if highest_file is None:
|
||||||
msg = 'No file depends on %s.' % higher_file
|
LOG.debug('No file depends on %s.', higher_file)
|
||||||
LOG.debug(msg)
|
|
||||||
|
|
||||||
# Committing higher_file into snapshot_file
|
# Committing higher_file into snapshot_file
|
||||||
# And update pointer in highest_file
|
# And update pointer in highest_file
|
||||||
|
@ -1248,11 +1244,10 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
context,
|
context,
|
||||||
snapshot['volume_id'],
|
snapshot['volume_id'],
|
||||||
connection_info)
|
connection_info)
|
||||||
LOG.debug('nova call result: %s' % result)
|
LOG.debug('nova call result: %s', result)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Call to Nova to create snapshot failed'))
|
LOG.error(_LE('Call to Nova to create snapshot failed %s'), e)
|
||||||
LOG.exception(e)
|
raise
|
||||||
raise e
|
|
||||||
|
|
||||||
# Loop and wait for result
|
# Loop and wait for result
|
||||||
# Nova will call Cinderclient to update the status in the database
|
# Nova will call Cinderclient to update the status in the database
|
||||||
|
@ -1276,10 +1271,9 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
'while creating snapshot.')
|
'while creating snapshot.')
|
||||||
raise exception.RemoteFSException(msg)
|
raise exception.RemoteFSException(msg)
|
||||||
|
|
||||||
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
|
LOG.debug('Status of snapshot %(id)s is now %(status)s',
|
||||||
'id': snapshot['id'],
|
{'id': snapshot['id'],
|
||||||
'status': s['status']
|
'status': s['status']})
|
||||||
})
|
|
||||||
|
|
||||||
if 10 < seconds_elapsed <= 20:
|
if 10 < seconds_elapsed <= 20:
|
||||||
increment = 2
|
increment = 2
|
||||||
|
@ -1337,9 +1331,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
snapshot['id'],
|
snapshot['id'],
|
||||||
delete_info)
|
delete_info)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Call to Nova delete snapshot failed'))
|
LOG.error(_LE('Call to Nova delete snapshot failed %s'), e)
|
||||||
LOG.exception(e)
|
raise
|
||||||
raise e
|
|
||||||
|
|
||||||
# Loop and wait for result
|
# Loop and wait for result
|
||||||
# Nova will call Cinderclient to update the status in the database
|
# Nova will call Cinderclient to update the status in the database
|
||||||
|
@ -1355,9 +1348,8 @@ class RemoteFSSnapDriver(RemoteFSDriver):
|
||||||
# Nova tasks completed successfully
|
# Nova tasks completed successfully
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = ('status of snapshot %s is '
|
LOG.debug('status of snapshot %s is still "deleting"... '
|
||||||
'still "deleting"... waiting') % snapshot['id']
|
'waiting', snapshot['id'])
|
||||||
LOG.debug(msg)
|
|
||||||
time.sleep(increment)
|
time.sleep(increment)
|
||||||
seconds_elapsed += increment
|
seconds_elapsed += increment
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -772,9 +772,8 @@ class HP3PARCommon(object):
|
||||||
break
|
break
|
||||||
|
|
||||||
if found_vlun is None:
|
if found_vlun is None:
|
||||||
msg = (_("3PAR vlun %(name)s not found on host %(host)s") %
|
LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
|
||||||
{'name': volume_name, 'host': hostname})
|
{'name': volume_name, 'host': hostname})
|
||||||
LOG.info(msg)
|
|
||||||
return found_vlun
|
return found_vlun
|
||||||
|
|
||||||
def create_vlun(self, volume, host, nsp=None):
|
def create_vlun(self, volume, host, nsp=None):
|
||||||
|
@ -794,10 +793,8 @@ class HP3PARCommon(object):
|
||||||
if volume_name in vlun['volumeName']:
|
if volume_name in vlun['volumeName']:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
msg = (
|
LOG.info(_LI("3PAR vlun for volume %(name)s not found on host "
|
||||||
_("3PAR vlun for volume %(name)s not found on host %(host)s") %
|
"%(host)s"), {'name': volume_name, 'host': hostname})
|
||||||
{'name': volume_name, 'host': hostname})
|
|
||||||
LOG.info(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# VLUN Type of MATCHED_SET 4 requires the port to be provided
|
# VLUN Type of MATCHED_SET 4 requires the port to be provided
|
||||||
|
@ -833,13 +830,11 @@ class HP3PARCommon(object):
|
||||||
# for future needs (e.g. export volume to host set).
|
# for future needs (e.g. export volume to host set).
|
||||||
|
|
||||||
# The log info explains why the host was left alone.
|
# The log info explains why the host was left alone.
|
||||||
msg = (_("3PAR vlun for volume '%(name)s' was deleted, "
|
LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
|
||||||
"but the host '%(host)s' was not deleted because: "
|
"but the host '%(host)s' was not deleted "
|
||||||
"%(reason)s") %
|
"because: %(reason)s"),
|
||||||
{'name': volume_name,
|
{'name': volume_name, 'host': hostname,
|
||||||
'host': hostname,
|
'reason': ex.get_description()})
|
||||||
'reason': ex.get_description()})
|
|
||||||
LOG.info(msg)
|
|
||||||
|
|
||||||
def _get_volume_type(self, type_id):
|
def _get_volume_type(self, type_id):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
|
@ -1215,23 +1210,23 @@ class HP3PARCommon(object):
|
||||||
except exception.InvalidInput as ex:
|
except exception.InvalidInput as ex:
|
||||||
# Delete the volume if unable to add it to the volume set
|
# Delete the volume if unable to add it to the volume set
|
||||||
self.client.deleteVolume(volume_name)
|
self.client.deleteVolume(volume_name)
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
except hpexceptions.HTTPConflict:
|
except hpexceptions.HTTPConflict:
|
||||||
msg = _("Volume (%s) already exists on array") % volume_name
|
msg = _("Volume (%s) already exists on array") % volume_name
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.Duplicate(msg)
|
raise exception.Duplicate(msg)
|
||||||
except hpexceptions.HTTPBadRequest as ex:
|
except hpexceptions.HTTPBadRequest as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.Invalid(ex.get_description())
|
raise exception.Invalid(ex.get_description())
|
||||||
except exception.InvalidInput as ex:
|
except exception.InvalidInput as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
|
|
||||||
return self._get_model_update(volume['host'], cpg)
|
return self._get_model_update(volume['host'], cpg)
|
||||||
|
@ -1315,7 +1310,7 @@ class HP3PARCommon(object):
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
raise exception.NotFound()
|
raise exception.NotFound()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
|
@ -1336,11 +1331,11 @@ class HP3PARCommon(object):
|
||||||
# the volume once it stops the copy.
|
# the volume once it stops the copy.
|
||||||
self.client.stopOnlinePhysicalCopy(volume_name)
|
self.client.stopOnlinePhysicalCopy(volume_name)
|
||||||
else:
|
else:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
else:
|
else:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
except hpexceptions.HTTPConflict as ex:
|
except hpexceptions.HTTPConflict as ex:
|
||||||
if ex.get_code() == 34:
|
if ex.get_code() == 34:
|
||||||
# This is a special case which means the
|
# This is a special case which means the
|
||||||
|
@ -1370,23 +1365,23 @@ class HP3PARCommon(object):
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeIsBusy(message=msg)
|
raise exception.VolumeIsBusy(message=msg)
|
||||||
else:
|
else:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.VolumeIsBusy(message=ex.get_description())
|
raise exception.VolumeIsBusy(message=ex.get_description())
|
||||||
|
|
||||||
except hpexceptions.HTTPNotFound as ex:
|
except hpexceptions.HTTPNotFound as ex:
|
||||||
# We'll let this act as if it worked
|
# We'll let this act as if it worked
|
||||||
# it helps clean up the cinder entries.
|
# it helps clean up the cinder entries.
|
||||||
msg = _("Delete volume id not found. Removing from cinder: "
|
LOG.warning(_LW("Delete volume id not found. Removing from "
|
||||||
"%(id)s Ex: %(msg)s") % {'id': volume['id'], 'msg': ex}
|
"cinder: %(id)s Ex: %(msg)s"),
|
||||||
LOG.warning(msg)
|
{'id': volume['id'], 'msg': ex})
|
||||||
except hpexceptions.HTTPForbidden as ex:
|
except hpexceptions.HTTPForbidden as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotAuthorized(ex.get_description())
|
raise exception.NotAuthorized(ex.get_description())
|
||||||
except hpexceptions.HTTPConflict as ex:
|
except hpexceptions.HTTPConflict as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.VolumeIsBusy(message=ex.get_description())
|
raise exception.VolumeIsBusy(message=ex.get_description())
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
|
@ -1461,16 +1456,16 @@ class HP3PARCommon(object):
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
# Delete the volume if unable to add it to the volume set
|
# Delete the volume if unable to add it to the volume set
|
||||||
self.client.deleteVolume(volume_name)
|
self.client.deleteVolume(volume_name)
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
except hpexceptions.HTTPForbidden as ex:
|
except hpexceptions.HTTPForbidden as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotAuthorized()
|
raise exception.NotAuthorized()
|
||||||
except hpexceptions.HTTPNotFound as ex:
|
except hpexceptions.HTTPNotFound as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotFound()
|
raise exception.NotFound()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
return model_update
|
return model_update
|
||||||
|
|
||||||
|
@ -1508,10 +1503,10 @@ class HP3PARCommon(object):
|
||||||
|
|
||||||
self.client.createSnapshot(snap_name, vol_name, optional)
|
self.client.createSnapshot(snap_name, vol_name, optional)
|
||||||
except hpexceptions.HTTPForbidden as ex:
|
except hpexceptions.HTTPForbidden as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotAuthorized()
|
raise exception.NotAuthorized()
|
||||||
except hpexceptions.HTTPNotFound as ex:
|
except hpexceptions.HTTPNotFound as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotFound()
|
raise exception.NotFound()
|
||||||
|
|
||||||
def update_volume_key_value_pair(self, volume, key, value):
|
def update_volume_key_value_pair(self, volume, key, value):
|
||||||
|
@ -1549,7 +1544,8 @@ class HP3PARCommon(object):
|
||||||
volume_name = self._get_3par_vol_name(volume['id'])
|
volume_name = self._get_3par_vol_name(volume['id'])
|
||||||
self.client.removeVolumeMetaData(volume_name, key)
|
self.client.removeVolumeMetaData(volume_name, key)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = _('Failure in clear_volume_key_value_pair:%s') % ex
|
msg = _('Failure in clear_volume_key_value_pair: '
|
||||||
|
'%s') % six.text_type(ex)
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
|
@ -1685,16 +1681,16 @@ class HP3PARCommon(object):
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.Duplicate(msg)
|
raise exception.Duplicate(msg)
|
||||||
except hpexceptions.HTTPBadRequest as ex:
|
except hpexceptions.HTTPBadRequest as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.Invalid(ex.get_description())
|
raise exception.Invalid(ex.get_description())
|
||||||
except exception.InvalidInput as ex:
|
except exception.InvalidInput as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise ex
|
raise
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.CinderException(ex)
|
raise exception.CinderException(ex)
|
||||||
|
|
||||||
return self._get_model_update(volume['host'], cpg)
|
return self._get_model_update(volume['host'], cpg)
|
||||||
|
@ -1707,16 +1703,16 @@ class HP3PARCommon(object):
|
||||||
snap_name = self._get_3par_snap_name(snapshot['id'])
|
snap_name = self._get_3par_snap_name(snapshot['id'])
|
||||||
self.client.deleteVolume(snap_name)
|
self.client.deleteVolume(snap_name)
|
||||||
except hpexceptions.HTTPForbidden as ex:
|
except hpexceptions.HTTPForbidden as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.NotAuthorized()
|
raise exception.NotAuthorized()
|
||||||
except hpexceptions.HTTPNotFound as ex:
|
except hpexceptions.HTTPNotFound as ex:
|
||||||
# We'll let this act as if it worked
|
# We'll let this act as if it worked
|
||||||
# it helps clean up the cinder entries.
|
# it helps clean up the cinder entries.
|
||||||
msg = _("Delete Snapshot id not found. Removing from cinder: "
|
LOG.warning(_LW("Delete Snapshot id not found. Removing from "
|
||||||
"%(id)s Ex: %(msg)s") % {'id': snapshot['id'], 'msg': ex}
|
"cinder: %(id)s Ex: %(msg)s"),
|
||||||
LOG.warning(msg)
|
{'id': snapshot['id'], 'msg': ex})
|
||||||
except hpexceptions.HTTPConflict as ex:
|
except hpexceptions.HTTPConflict as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
|
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
|
||||||
|
|
||||||
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
|
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
|
||||||
|
@ -1762,12 +1758,12 @@ class HP3PARCommon(object):
|
||||||
# use the wwn to see if we can find the hostname
|
# use the wwn to see if we can find the hostname
|
||||||
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
|
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
|
||||||
# no 3par host, re-throw
|
# no 3par host, re-throw
|
||||||
if (hostname is None):
|
if hostname is None:
|
||||||
LOG.error(e)
|
LOG.error(_LE("Exception: %s"), e)
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
# not a 'host does not exist' HTTPNotFound exception, re-throw
|
# not a 'host does not exist' HTTPNotFound exception, re-throw
|
||||||
LOG.error(e)
|
LOG.error(_LE("Exception: %s"), e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# try again with name retrieved from 3par
|
# try again with name retrieved from 3par
|
||||||
|
@ -1822,17 +1818,17 @@ class HP3PARCommon(object):
|
||||||
if new_tpvv:
|
if new_tpvv:
|
||||||
cop = self.CONVERT_TO_THIN
|
cop = self.CONVERT_TO_THIN
|
||||||
LOG.info(_LI("Converting %(volume_name)s to thin provisioning "
|
LOG.info(_LI("Converting %(volume_name)s to thin provisioning "
|
||||||
"with userCPG=%(new_cpg)s") %
|
"with userCPG=%(new_cpg)s"),
|
||||||
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
||||||
elif new_tdvv:
|
elif new_tdvv:
|
||||||
cop = self.CONVERT_TO_DEDUP
|
cop = self.CONVERT_TO_DEDUP
|
||||||
LOG.info(_LI("Converting %(volume_name)s to thin dedup "
|
LOG.info(_LI("Converting %(volume_name)s to thin dedup "
|
||||||
"provisioning with userCPG=%(new_cpg)s") %
|
"provisioning with userCPG=%(new_cpg)s"),
|
||||||
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
||||||
else:
|
else:
|
||||||
cop = self.CONVERT_TO_FULL
|
cop = self.CONVERT_TO_FULL
|
||||||
LOG.info(_LI("Converting %(volume_name)s to full provisioning "
|
LOG.info(_LI("Converting %(volume_name)s to full provisioning "
|
||||||
"with userCPG=%(new_cpg)s") %
|
"with userCPG=%(new_cpg)s"),
|
||||||
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
{'volume_name': volume_name, 'new_cpg': new_cpg})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -1849,7 +1845,7 @@ class HP3PARCommon(object):
|
||||||
# info and then raise.
|
# info and then raise.
|
||||||
LOG.info(_LI("tunevv failed because the volume '%s' "
|
LOG.info(_LI("tunevv failed because the volume '%s' "
|
||||||
"has snapshots."), volume_name)
|
"has snapshots."), volume_name)
|
||||||
raise ex
|
raise
|
||||||
|
|
||||||
task_id = body['taskid']
|
task_id = body['taskid']
|
||||||
status = self.TaskWaiter(self.client, task_id).wait_for_task()
|
status = self.TaskWaiter(self.client, task_id).wait_for_task()
|
||||||
|
@ -2241,7 +2237,7 @@ class ModifySpecsTask(flow_utils.CinderTask):
|
||||||
if ex.get_code() != 102:
|
if ex.get_code() != 102:
|
||||||
LOG.error(_LE("Unexpected error when retype() tried to "
|
LOG.error(_LE("Unexpected error when retype() tried to "
|
||||||
"deleteVolumeSet(%s)"), vvs_name)
|
"deleteVolumeSet(%s)"), vvs_name)
|
||||||
raise ex
|
raise
|
||||||
|
|
||||||
if new_vvs or new_qos or new_flash_cache:
|
if new_vvs or new_qos or new_flash_cache:
|
||||||
common._add_volume_to_volume_set(
|
common._add_volume_to_volume_set(
|
||||||
|
|
|
@ -467,7 +467,7 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||||
protocol = host['capabilities']['storage_protocol']
|
protocol = host['capabilities']['storage_protocol']
|
||||||
if protocol != 'FC':
|
if protocol != 'FC':
|
||||||
LOG.debug("3PAR FC driver cannot migrate in-use volume "
|
LOG.debug("3PAR FC driver cannot migrate in-use volume "
|
||||||
"to a host with storage_protocol=%s." % protocol)
|
"to a host with storage_protocol=%s.", protocol)
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
common = self._login()
|
common = self._login()
|
||||||
|
|
|
@ -159,8 +159,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
elif len(ip) == 2:
|
elif len(ip) == 2:
|
||||||
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
|
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
|
||||||
else:
|
else:
|
||||||
msg = _("Invalid IP address format '%s'") % ip_addr
|
LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr)
|
||||||
LOG.warn(msg)
|
|
||||||
|
|
||||||
# add the single value iscsi_ip_address option to the IP dictionary.
|
# add the single value iscsi_ip_address option to the IP dictionary.
|
||||||
# This way we can see if it's a valid iSCSI IP. If it's not valid,
|
# This way we can see if it's a valid iSCSI IP. If it's not valid,
|
||||||
|
@ -193,15 +192,15 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
|
|
||||||
# lets see if there are invalid iSCSI IPs left in the temp dict
|
# lets see if there are invalid iSCSI IPs left in the temp dict
|
||||||
if len(temp_iscsi_ip) > 0:
|
if len(temp_iscsi_ip) > 0:
|
||||||
msg = (_("Found invalid iSCSI IP address(s) in configuration "
|
LOG.warning(_LW("Found invalid iSCSI IP address(s) in "
|
||||||
"option(s) hp3par_iscsi_ips or iscsi_ip_address '%s.'") %
|
"configuration option(s) hp3par_iscsi_ips or "
|
||||||
(", ".join(temp_iscsi_ip)))
|
"iscsi_ip_address '%s.'"),
|
||||||
LOG.warn(msg)
|
(", ".join(temp_iscsi_ip)))
|
||||||
|
|
||||||
if not len(self.iscsi_ips) > 0:
|
if not len(self.iscsi_ips) > 0:
|
||||||
msg = _('At least one valid iSCSI IP address must be set.')
|
msg = _('At least one valid iSCSI IP address must be set.')
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidInput(reason=(msg))
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Setup errors are already checked for in do_setup so return pass."""
|
"""Setup errors are already checked for in do_setup so return pass."""
|
||||||
|
@ -312,9 +311,8 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
vlun = common.create_vlun(volume, host, least_used_nsp)
|
vlun = common.create_vlun(volume, host, least_used_nsp)
|
||||||
|
|
||||||
if least_used_nsp is None:
|
if least_used_nsp is None:
|
||||||
msg = _("Least busy iSCSI port not found, "
|
LOG.warning(_LW("Least busy iSCSI port not found, "
|
||||||
"using first iSCSI port in list.")
|
"using first iSCSI port in list."))
|
||||||
LOG.warn(msg)
|
|
||||||
iscsi_ip = self.iscsi_ips.keys()[0]
|
iscsi_ip = self.iscsi_ips.keys()[0]
|
||||||
else:
|
else:
|
||||||
iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
|
iscsi_ip = self._get_ip_using_nsp(least_used_nsp)
|
||||||
|
@ -466,10 +464,9 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
host = common._get_3par_host(hostname)
|
host = common._get_3par_host(hostname)
|
||||||
elif (not host['initiatorChapEnabled'] and
|
elif (not host['initiatorChapEnabled'] and
|
||||||
self.configuration.hp3par_iscsi_chap_enabled):
|
self.configuration.hp3par_iscsi_chap_enabled):
|
||||||
LOG.warn(_LW("Host exists without CHAP credentials set "
|
LOG.warning(_LW("Host exists without CHAP credentials set and "
|
||||||
"and has iSCSI attachments but CHAP is "
|
"has iSCSI attachments but CHAP is enabled. "
|
||||||
"enabled. Updating host with new CHAP "
|
"Updating host with new CHAP credentials."))
|
||||||
"credentials."))
|
|
||||||
self._set_3par_chaps(
|
self._set_3par_chaps(
|
||||||
common,
|
common,
|
||||||
hostname,
|
hostname,
|
||||||
|
@ -499,11 +496,12 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
host_info = common.client.getHost(chap_username)
|
host_info = common.client.getHost(chap_username)
|
||||||
|
|
||||||
if not host_info['initiatorChapEnabled']:
|
if not host_info['initiatorChapEnabled']:
|
||||||
LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
|
LOG.warning(_LW("Host has no CHAP key, but CHAP is enabled."))
|
||||||
|
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
chap_password = volume_utils.generate_password(16)
|
chap_password = volume_utils.generate_password(16)
|
||||||
LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
|
LOG.warning(_LW("No host or VLUNs exist. Generating new "
|
||||||
|
"CHAP key."))
|
||||||
else:
|
else:
|
||||||
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
|
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
|
||||||
# key assigned to one of them. Use that CHAP key if present,
|
# key assigned to one of them. Use that CHAP key if present,
|
||||||
|
@ -528,15 +526,15 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
break
|
break
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
LOG.debug("The VLUN %s is missing CHAP credentials "
|
LOG.debug("The VLUN %s is missing CHAP credentials "
|
||||||
"but CHAP is enabled. Skipping." %
|
"but CHAP is enabled. Skipping.",
|
||||||
vlun['remoteName'])
|
vlun['remoteName'])
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Non-iSCSI VLUN detected."))
|
LOG.warning(_LW("Non-iSCSI VLUN detected."))
|
||||||
|
|
||||||
if not chap_exists:
|
if not chap_exists:
|
||||||
chap_password = volume_utils.generate_password(16)
|
chap_password = volume_utils.generate_password(16)
|
||||||
LOG.warn(_LW("No VLUN contained CHAP credentials. "
|
LOG.warning(_LW("No VLUN contained CHAP credentials. "
|
||||||
"Generating new CHAP key."))
|
"Generating new CHAP key."))
|
||||||
|
|
||||||
# Add CHAP credentials to the volume metadata
|
# Add CHAP credentials to the volume metadata
|
||||||
vol_name = common._get_3par_vol_name(volume['id'])
|
vol_name = common._get_3par_vol_name(volume['id'])
|
||||||
|
@ -718,7 +716,7 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||||
protocol = host['capabilities']['storage_protocol']
|
protocol = host['capabilities']['storage_protocol']
|
||||||
if protocol != 'iSCSI':
|
if protocol != 'iSCSI':
|
||||||
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
|
LOG.debug("3PAR ISCSI driver cannot migrate in-use volume "
|
||||||
"to a host with storage_protocol=%s." % protocol)
|
"to a host with storage_protocol=%s.", protocol)
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
common = self._login()
|
common = self._login()
|
||||||
|
|
|
@ -203,7 +203,7 @@ class HPLeftHandCLIQProxy(san.SanISCSIDriver):
|
||||||
for k, v in status_node.attrib.items():
|
for k, v in status_node.attrib.items():
|
||||||
volume_attributes["permission." + k] = v
|
volume_attributes["permission." + k] = v
|
||||||
|
|
||||||
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s" %
|
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s",
|
||||||
{'volume_name': volume_name,
|
{'volume_name': volume_name,
|
||||||
'volume_attributes': volume_attributes})
|
'volume_attributes': volume_attributes})
|
||||||
return volume_attributes
|
return volume_attributes
|
||||||
|
@ -259,7 +259,7 @@ class HPLeftHandCLIQProxy(san.SanISCSIDriver):
|
||||||
for k, v in status_node.attrib.items():
|
for k, v in status_node.attrib.items():
|
||||||
snapshot_attributes["permission." + k] = v
|
snapshot_attributes["permission." + k] = v
|
||||||
|
|
||||||
LOG.debug("Snapshot info: %(name)s => %(attributes)s" %
|
LOG.debug("Snapshot info: %(name)s => %(attributes)s",
|
||||||
{'name': snapshot_name, 'attributes': snapshot_attributes})
|
{'name': snapshot_name, 'attributes': snapshot_attributes})
|
||||||
return snapshot_attributes
|
return snapshot_attributes
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ LeftHand array.
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _LE, _LI
|
from cinder.i18n import _, _LI
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
|
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
|
||||||
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
|
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
|
||||||
|
@ -81,7 +81,7 @@ class HPLeftHandISCSIDriver(driver.VolumeDriver):
|
||||||
self.proxy = self._create_proxy(*self.args, **self.kwargs)
|
self.proxy = self._create_proxy(*self.args, **self.kwargs)
|
||||||
|
|
||||||
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
|
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
|
||||||
"proxy %(proxy_ver)s") % {
|
"proxy %(proxy_ver)s"), {
|
||||||
"driver_ver": self.VERSION,
|
"driver_ver": self.VERSION,
|
||||||
"proxy_ver": self.proxy.get_version_string()})
|
"proxy_ver": self.proxy.get_version_string()})
|
||||||
|
|
||||||
|
@ -91,10 +91,10 @@ class HPLeftHandISCSIDriver(driver.VolumeDriver):
|
||||||
# Check minimum client version for REST proxy
|
# Check minimum client version for REST proxy
|
||||||
client_version = rest_proxy.hplefthandclient.version
|
client_version = rest_proxy.hplefthandclient.version
|
||||||
|
|
||||||
if (client_version < MIN_CLIENT_VERSION):
|
if client_version < MIN_CLIENT_VERSION:
|
||||||
ex_msg = (_LE("Invalid hplefthandclient version found ("
|
ex_msg = (_("Invalid hplefthandclient version found ("
|
||||||
"%(found)s). Version %(minimum)s or greater "
|
"%(found)s). Version %(minimum)s or greater "
|
||||||
"required.")
|
"required.")
|
||||||
% {'found': client_version,
|
% {'found': client_version,
|
||||||
'minimum': MIN_CLIENT_VERSION})
|
'minimum': MIN_CLIENT_VERSION})
|
||||||
LOG.error(ex_msg)
|
LOG.error(ex_msg)
|
||||||
|
|
|
@ -457,7 +457,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
client_options[client_key] = client_value
|
client_options[client_key] = client_value
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(_LE("'%(value)s' is an invalid value "
|
LOG.error(_LE("'%(value)s' is an invalid value "
|
||||||
"for extra spec '%(key)s'") %
|
"for extra spec '%(key)s'"),
|
||||||
{'value': value, 'key': key})
|
{'value': value, 'key': key})
|
||||||
return client_options
|
return client_options
|
||||||
|
|
||||||
|
@ -477,10 +477,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
chap_secret = server_info['chapTargetSecret']
|
chap_secret = server_info['chapTargetSecret']
|
||||||
if not chap_enabled and chap_secret:
|
if not chap_enabled and chap_secret:
|
||||||
LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
|
LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
|
||||||
'disabled') % connector['host'])
|
'disabled'), connector['host'])
|
||||||
if chap_enabled and chap_secret is None:
|
if chap_enabled and chap_secret is None:
|
||||||
LOG.warning(_LW('CHAP is enabled, but server secret not '
|
LOG.warning(_LW('CHAP is enabled, but server secret not '
|
||||||
'configured on server %s') % connector['host'])
|
'configured on server %s'), connector['host'])
|
||||||
return server_info
|
return server_info
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
# server does not exist, so create one
|
# server does not exist, so create one
|
||||||
|
@ -522,10 +522,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
dictionary of its reported capabilities.
|
dictionary of its reported capabilities.
|
||||||
"""
|
"""
|
||||||
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
|
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
|
||||||
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
|
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
|
||||||
'new_type': new_type,
|
'new_type': new_type,
|
||||||
'diff': diff,
|
'diff': diff,
|
||||||
'host': host})
|
'host': host})
|
||||||
client = self._login()
|
client = self._login()
|
||||||
try:
|
try:
|
||||||
volume_info = client.getVolumeByName(volume['name'])
|
volume_info = client.getVolumeByName(volume['name'])
|
||||||
|
@ -536,7 +536,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
new_extra_specs,
|
new_extra_specs,
|
||||||
extra_specs_key_map.keys())
|
extra_specs_key_map.keys())
|
||||||
|
|
||||||
LOG.debug('LH specs=%(specs)s' % {'specs': lh_extra_specs})
|
LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs})
|
||||||
|
|
||||||
# only set the ones that have changed
|
# only set the ones that have changed
|
||||||
changed_extra_specs = {}
|
changed_extra_specs = {}
|
||||||
|
@ -553,7 +553,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
raise exception.VolumeNotFound(volume_id=volume['id'])
|
raise exception.VolumeNotFound(volume_id=volume['id'])
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.warning("%s" % ex)
|
LOG.warning(_LW("%s"), ex)
|
||||||
finally:
|
finally:
|
||||||
self._logout(client)
|
self._logout(client)
|
||||||
|
|
||||||
|
@ -581,7 +581,7 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
dictionary of its reported capabilities.
|
dictionary of its reported capabilities.
|
||||||
"""
|
"""
|
||||||
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
|
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
|
||||||
'cluster=%(cluster)s' % {
|
'cluster=%(cluster)s', {
|
||||||
'id': volume['id'],
|
'id': volume['id'],
|
||||||
'host': host,
|
'host': host,
|
||||||
'cluster': self.configuration.hplefthand_clustername})
|
'cluster': self.configuration.hplefthand_clustername})
|
||||||
|
@ -596,24 +596,24 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
try:
|
try:
|
||||||
# get the cluster info, if it exists and compare
|
# get the cluster info, if it exists and compare
|
||||||
cluster_info = client.getClusterByName(cluster)
|
cluster_info = client.getClusterByName(cluster)
|
||||||
LOG.debug('Cluster info: %s' % cluster_info)
|
LOG.debug('Cluster info: %s', cluster_info)
|
||||||
virtual_ips = cluster_info['virtualIPAddresses']
|
virtual_ips = cluster_info['virtualIPAddresses']
|
||||||
|
|
||||||
if driver != self.__class__.__name__:
|
if driver != self.__class__.__name__:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration for "
|
LOG.info(_LI("Cannot provide backend assisted migration for "
|
||||||
"volume: %s because volume is from a different "
|
"volume: %s because volume is from a different "
|
||||||
"backend.") % volume['name'])
|
"backend."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
if vip != virtual_ips[0]['ipV4Address']:
|
if vip != virtual_ips[0]['ipV4Address']:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration for "
|
LOG.info(_LI("Cannot provide backend assisted migration for "
|
||||||
"volume: %s because cluster exists in different "
|
"volume: %s because cluster exists in different "
|
||||||
"management group.") % volume['name'])
|
"management group."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
|
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration for "
|
LOG.info(_LI("Cannot provide backend assisted migration for "
|
||||||
"volume: %s because cluster exists in different "
|
"volume: %s because cluster exists in different "
|
||||||
"management group.") % volume['name'])
|
"management group."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
finally:
|
finally:
|
||||||
self._logout(client)
|
self._logout(client)
|
||||||
|
@ -621,24 +621,24 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
client = self._login()
|
client = self._login()
|
||||||
try:
|
try:
|
||||||
volume_info = client.getVolumeByName(volume['name'])
|
volume_info = client.getVolumeByName(volume['name'])
|
||||||
LOG.debug('Volume info: %s' % volume_info)
|
LOG.debug('Volume info: %s', volume_info)
|
||||||
|
|
||||||
# can't migrate if server is attached
|
# can't migrate if server is attached
|
||||||
if volume_info['iscsiSessions'] is not None:
|
if volume_info['iscsiSessions'] is not None:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration "
|
LOG.info(_LI("Cannot provide backend assisted migration "
|
||||||
"for volume: %s because the volume has been "
|
"for volume: %s because the volume has been "
|
||||||
"exported.") % volume['name'])
|
"exported."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
|
|
||||||
# can't migrate if volume has snapshots
|
# can't migrate if volume has snapshots
|
||||||
snap_info = client.getVolume(
|
snap_info = client.getVolume(
|
||||||
volume_info['id'],
|
volume_info['id'],
|
||||||
'fields=snapshots,snapshots[resource[members[name]]]')
|
'fields=snapshots,snapshots[resource[members[name]]]')
|
||||||
LOG.debug('Snapshot info: %s' % snap_info)
|
LOG.debug('Snapshot info: %s', snap_info)
|
||||||
if snap_info['snapshots']['resource'] is not None:
|
if snap_info['snapshots']['resource'] is not None:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration "
|
LOG.info(_LI("Cannot provide backend assisted migration "
|
||||||
"for volume: %s because the volume has "
|
"for volume: %s because the volume has "
|
||||||
"snapshots.") % volume['name'])
|
"snapshots."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
|
|
||||||
options = {'clusterName': cluster}
|
options = {'clusterName': cluster}
|
||||||
|
@ -646,10 +646,10 @@ class HPLeftHandRESTProxy(driver.ISCSIDriver):
|
||||||
except hpexceptions.HTTPNotFound:
|
except hpexceptions.HTTPNotFound:
|
||||||
LOG.info(_LI("Cannot provide backend assisted migration for "
|
LOG.info(_LI("Cannot provide backend assisted migration for "
|
||||||
"volume: %s because volume does not exist in this "
|
"volume: %s because volume does not exist in this "
|
||||||
"management group.") % volume['name'])
|
"management group."), volume['name'])
|
||||||
return false_ret
|
return false_ret
|
||||||
except hpexceptions.HTTPServerError as ex:
|
except hpexceptions.HTTPServerError as ex:
|
||||||
LOG.error(ex)
|
LOG.error(_LE("Exception: %s"), ex)
|
||||||
return false_ret
|
return false_ret
|
||||||
finally:
|
finally:
|
||||||
self._logout(client)
|
self._logout(client)
|
||||||
|
|
|
@ -147,7 +147,7 @@ class SanDriver(driver.VolumeDriver):
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error running SSH command: %s") % command)
|
LOG.error(_LE("Error running SSH command: %s"), command)
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
def ensure_export(self, context, volume):
|
||||||
"""Synchronously recreates an export for a logical volume."""
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
|
|
|
@ -72,7 +72,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
config = self.configuration.scality_sofs_config
|
config = self.configuration.scality_sofs_config
|
||||||
if not config:
|
if not config:
|
||||||
msg = _("Value required for 'scality_sofs_config'")
|
msg = _("Value required for 'scality_sofs_config'")
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
# config can be a file path or a URL, check it
|
# config can be a file path or a URL, check it
|
||||||
|
@ -83,13 +83,13 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
urllib2.urlopen(config, timeout=5).close()
|
urllib2.urlopen(config, timeout=5).close()
|
||||||
except urllib2.URLError as e:
|
except urllib2.URLError as e:
|
||||||
msg = _("Cannot access 'scality_sofs_config': %s") % e
|
msg = _("Cannot access 'scality_sofs_config': %s") % e
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
# mount.sofs must be installed
|
# mount.sofs must be installed
|
||||||
if not os.access('/sbin/mount.sofs', os.X_OK):
|
if not os.access('/sbin/mount.sofs', os.X_OK):
|
||||||
msg = _("Cannot execute /sbin/mount.sofs")
|
msg = _("Cannot execute /sbin/mount.sofs")
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
@lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
|
@lockutils.synchronized('mount-sofs', 'cinder-sofs', external=True)
|
||||||
|
@ -104,7 +104,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
if not os.path.isdir(sysdir):
|
if not os.path.isdir(sysdir):
|
||||||
msg = _("Cannot mount Scality SOFS, check syslog for errors")
|
msg = _("Cannot mount Scality SOFS, check syslog for errors")
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
def _size_bytes(self, size_in_g):
|
def _size_bytes(self, size_in_g):
|
||||||
|
@ -135,7 +135,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
self.configuration.scality_sofs_volume_dir)
|
self.configuration.scality_sofs_volume_dir)
|
||||||
if not os.path.isdir(voldir):
|
if not os.path.isdir(voldir):
|
||||||
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
|
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
|
||||||
LOG.warn(msg)
|
LOG.warning(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
|
@ -284,7 +284,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
"""Create a new backup from an existing volume."""
|
"""Create a new backup from an existing volume."""
|
||||||
volume = self.db.volume_get(context, backup['volume_id'])
|
volume = self.db.volume_get(context, backup['volume_id'])
|
||||||
volume_local_path = self.local_path(volume)
|
volume_local_path = self.local_path(volume)
|
||||||
LOG.info(_LI('Begin backup of volume %s.') % volume['name'])
|
LOG.info(_LI('Begin backup of volume %s.'), volume['name'])
|
||||||
|
|
||||||
qemu_img_info = image_utils.qemu_img_info(volume_local_path)
|
qemu_img_info = image_utils.qemu_img_info(volume_local_path)
|
||||||
if qemu_img_info.file_format != 'raw':
|
if qemu_img_info.file_format != 'raw':
|
||||||
|
@ -303,7 +303,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||||
|
|
||||||
def restore_backup(self, context, backup, volume, backup_service):
|
def restore_backup(self, context, backup, volume, backup_service):
|
||||||
"""Restore an existing backup to a new or existing volume."""
|
"""Restore an existing backup to a new or existing volume."""
|
||||||
LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.') %
|
LOG.info(_LI('Restoring backup %(backup)s to volume %(volume)s.'),
|
||||||
{'backup': backup['id'], 'volume': volume['name']})
|
{'backup': backup['id'], 'volume': volume['name']})
|
||||||
volume_local_path = self.local_path(volume)
|
volume_local_path = self.local_path(volume)
|
||||||
with utils.temporary_chown(volume_local_path):
|
with utils.temporary_chown(volume_local_path):
|
||||||
|
|
|
@ -228,8 +228,8 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
if not volume['provider_location']:
|
if not volume['provider_location']:
|
||||||
LOG.warn(_LW('Volume %s does not have provider_location '
|
LOG.warning(_LW('Volume %s does not have provider_location '
|
||||||
'specified, skipping.'), volume['name'])
|
'specified, skipping.'), volume['name'])
|
||||||
return
|
return
|
||||||
|
|
||||||
self._ensure_share_mounted(volume['provider_location'])
|
self._ensure_share_mounted(volume['provider_location'])
|
||||||
|
@ -239,7 +239,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
if os.path.exists(mounted_path):
|
if os.path.exists(mounted_path):
|
||||||
self._delete(mounted_path)
|
self._delete(mounted_path)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Skipping deletion of volume %s as it does not exist." %
|
LOG.debug("Skipping deletion of volume %s as it does not exist.",
|
||||||
mounted_path)
|
mounted_path)
|
||||||
|
|
||||||
info_path = self._local_path_volume_info(volume)
|
info_path = self._local_path_volume_info(volume)
|
||||||
|
@ -264,7 +264,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
volume_path = self.local_path(volume)
|
volume_path = self.local_path(volume)
|
||||||
volume_size = volume['size']
|
volume_size = volume['size']
|
||||||
|
|
||||||
LOG.debug("Creating new volume at %s." % volume_path)
|
LOG.debug("Creating new volume at %s.", volume_path)
|
||||||
|
|
||||||
if os.path.exists(volume_path):
|
if os.path.exists(volume_path):
|
||||||
msg = _('File already exists at %s.') % volume_path
|
msg = _('File already exists at %s.') % volume_path
|
||||||
|
@ -335,7 +335,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
raise exception.SmbfsNoSuitableShareFound(
|
raise exception.SmbfsNoSuitableShareFound(
|
||||||
volume_size=volume_size_in_gib)
|
volume_size=volume_size_in_gib)
|
||||||
|
|
||||||
LOG.debug('Selected %s as target smbfs share.' % target_share)
|
LOG.debug('Selected %s as target smbfs share.', target_share)
|
||||||
|
|
||||||
return target_share
|
return target_share
|
||||||
|
|
||||||
|
@ -365,13 +365,13 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
used = (total_size - total_available) / total_size
|
used = (total_size - total_available) / total_size
|
||||||
|
|
||||||
if used > used_ratio:
|
if used > used_ratio:
|
||||||
LOG.debug('%s is above smbfs_used_ratio.' % smbfs_share)
|
LOG.debug('%s is above smbfs_used_ratio.', smbfs_share)
|
||||||
return False
|
return False
|
||||||
if apparent_available <= requested_volume_size:
|
if apparent_available <= requested_volume_size:
|
||||||
LOG.debug('%s is above smbfs_oversub_ratio.' % smbfs_share)
|
LOG.debug('%s is above smbfs_oversub_ratio.', smbfs_share)
|
||||||
return False
|
return False
|
||||||
if total_allocated / total_size >= oversub_ratio:
|
if total_allocated / total_size >= oversub_ratio:
|
||||||
LOG.debug('%s reserved space is above smbfs_oversub_ratio.' %
|
LOG.debug('%s reserved space is above smbfs_oversub_ratio.',
|
||||||
smbfs_share)
|
smbfs_share)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
@ -407,7 +407,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
volume_path = self.local_path(volume)
|
volume_path = self.local_path(volume)
|
||||||
|
|
||||||
self._check_extend_volume_support(volume, size_gb)
|
self._check_extend_volume_support(volume, size_gb)
|
||||||
LOG.info(_LI('Resizing file to %sG...') % size_gb)
|
LOG.info(_LI('Resizing file to %sG...'), size_gb)
|
||||||
|
|
||||||
self._do_extend_volume(volume_path, size_gb, volume['name'])
|
self._do_extend_volume(volume_path, size_gb, volume['name'])
|
||||||
|
|
||||||
|
@ -458,7 +458,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
|
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
|
||||||
"volume_size: %(size)s" %
|
"volume_size: %(size)s",
|
||||||
{'snap': snapshot['id'],
|
{'snap': snapshot['id'],
|
||||||
'vol': volume['id'],
|
'vol': volume['id'],
|
||||||
'size': volume_size})
|
'size': volume_size})
|
||||||
|
@ -477,7 +477,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
|
||||||
snapshot['volume']['name'])
|
snapshot['volume']['name'])
|
||||||
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
|
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
|
||||||
|
|
||||||
LOG.debug("Will copy from snapshot at %s" % path_to_snap_img)
|
LOG.debug("Will copy from snapshot at %s", path_to_snap_img)
|
||||||
|
|
||||||
image_utils.convert_image(path_to_snap_img,
|
image_utils.convert_image(path_to_snap_img,
|
||||||
self.local_path(volume),
|
self.local_path(volume),
|
||||||
|
|
|
@ -86,8 +86,9 @@ def retry(exc_tuple, tries=5, delay=1, backoff=2):
|
||||||
time.sleep(_delay)
|
time.sleep(_delay)
|
||||||
_tries -= 1
|
_tries -= 1
|
||||||
_delay *= backoff
|
_delay *= backoff
|
||||||
LOG.debug('Retrying %s, (%s attempts remaining)...' %
|
LOG.debug('Retrying %(args)s, %(tries)s attempts '
|
||||||
(args, _tries))
|
'remaining...',
|
||||||
|
{'args': args, 'tries': _tries})
|
||||||
# NOTE(jdg): Don't log the params passed here
|
# NOTE(jdg): Don't log the params passed here
|
||||||
# some cmds like createAccount will have sensitive
|
# some cmds like createAccount will have sensitive
|
||||||
# info in the params, grab only the second tuple
|
# info in the params, grab only the second tuple
|
||||||
|
@ -327,7 +328,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
|
|
||||||
if not found_volume:
|
if not found_volume:
|
||||||
LOG.error(_LE('Failed to retrieve volume SolidFire-'
|
LOG.error(_LE('Failed to retrieve volume SolidFire-'
|
||||||
'ID: %s in get_by_account!') % sf_volume_id)
|
'ID: %s in get_by_account!'), sf_volume_id)
|
||||||
raise exception.VolumeNotFound(volume_id=sf_volume_id)
|
raise exception.VolumeNotFound(volume_id=sf_volume_id)
|
||||||
|
|
||||||
model_update = {}
|
model_update = {}
|
||||||
|
@ -447,7 +448,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
if len(presets) > 0:
|
if len(presets) > 0:
|
||||||
if len(presets) > 1:
|
if len(presets) > 1:
|
||||||
LOG.warning(_LW('More than one valid preset was '
|
LOG.warning(_LW('More than one valid preset was '
|
||||||
'detected, using %s') % presets[0])
|
'detected, using %s'), presets[0])
|
||||||
qos = self.sf_qos_dict[presets[0]]
|
qos = self.sf_qos_dict[presets[0]]
|
||||||
else:
|
else:
|
||||||
# look for explicit settings
|
# look for explicit settings
|
||||||
|
@ -499,9 +500,9 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
if uuid in v['name'] or uuid in alt_id:
|
if uuid in v['name'] or uuid in alt_id:
|
||||||
found_count += 1
|
found_count += 1
|
||||||
sf_volref = v
|
sf_volref = v
|
||||||
LOG.debug("Mapped SolidFire volumeID %s "
|
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
|
||||||
"to cinder ID %s.",
|
"to cinder ID %(uuid)s.",
|
||||||
v['volumeID'], uuid)
|
{'volume_id': v['volumeID'], 'uuid': uuid})
|
||||||
|
|
||||||
if found_count == 0:
|
if found_count == 0:
|
||||||
# NOTE(jdg): Previously we would raise here, but there are cases
|
# NOTE(jdg): Previously we would raise here, but there are cases
|
||||||
|
@ -510,7 +511,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
|
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
|
||||||
|
|
||||||
if found_count > 1:
|
if found_count > 1:
|
||||||
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s.") %
|
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
|
||||||
{'count': found_count,
|
{'count': found_count,
|
||||||
'uuid': uuid})
|
'uuid': uuid})
|
||||||
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
|
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
|
||||||
|
@ -749,7 +750,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
if sfaccount is None:
|
if sfaccount is None:
|
||||||
LOG.error(_LE("Account for Volume ID %s was not found on "
|
LOG.error(_LE("Account for Volume ID %s was not found on "
|
||||||
"the SolidFire Cluster while attempting "
|
"the SolidFire Cluster while attempting "
|
||||||
"delete_volume operation!") % volume['id'])
|
"delete_volume operation!"), volume['id'])
|
||||||
LOG.error(_LE("This usually means the volume was never "
|
LOG.error(_LE("This usually means the volume was never "
|
||||||
"successfully created."))
|
"successfully created."))
|
||||||
return
|
return
|
||||||
|
@ -1030,8 +1031,8 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
sfid = external_ref.get('source-id', None)
|
sfid = external_ref.get('source-id', None)
|
||||||
sfname = external_ref.get('name', None)
|
sfname = external_ref.get('name', None)
|
||||||
if sfid is None:
|
if sfid is None:
|
||||||
raise exception.SolidFireAPIException("Manage existing volume "
|
raise exception.SolidFireAPIException(_("Manage existing volume "
|
||||||
"requires 'source-id'.")
|
"requires 'source-id'."))
|
||||||
|
|
||||||
# First get the volume on the SF cluster (MUST be active)
|
# First get the volume on the SF cluster (MUST be active)
|
||||||
params = {'startVolumeID': sfid,
|
params = {'startVolumeID': sfid,
|
||||||
|
@ -1086,8 +1087,8 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
|
|
||||||
sfid = external_ref.get('source-id', None)
|
sfid = external_ref.get('source-id', None)
|
||||||
if sfid is None:
|
if sfid is None:
|
||||||
raise exception.SolidFireAPIException("Manage existing get size "
|
raise exception.SolidFireAPIException(_("Manage existing get size "
|
||||||
"requires 'id'.")
|
"requires 'id'."))
|
||||||
|
|
||||||
params = {'startVolumeID': int(sfid),
|
params = {'startVolumeID': int(sfid),
|
||||||
'limit': 1}
|
'limit': 1}
|
||||||
|
@ -1105,9 +1106,9 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||||
if sfaccount is None:
|
if sfaccount is None:
|
||||||
LOG.error(_LE("Account for Volume ID %s was not found on "
|
LOG.error(_LE("Account for Volume ID %s was not found on "
|
||||||
"the SolidFire Cluster while attempting "
|
"the SolidFire Cluster while attempting "
|
||||||
"unmanage operation!") % volume['id'])
|
"unmanage operation!"), volume['id'])
|
||||||
raise exception.SolidFireAPIException("Failed to find account "
|
raise exception.SolidFireAPIException(_("Failed to find account "
|
||||||
"for volume.")
|
"for volume."))
|
||||||
|
|
||||||
params = {'accountID': sfaccount['accountID']}
|
params = {'accountID': sfaccount['accountID']}
|
||||||
sf_vol = self._get_sf_volume(volume['id'], params)
|
sf_vol = self._get_sf_volume(volume['id'], params)
|
||||||
|
|
|
@ -87,9 +87,9 @@ class retry(object):
|
||||||
for attempt in xrange(self._count):
|
for attempt in xrange(self._count):
|
||||||
if attempt != 0:
|
if attempt != 0:
|
||||||
LOG.warning(_LW('Retrying failed call to %(func)s, '
|
LOG.warning(_LW('Retrying failed call to %(func)s, '
|
||||||
'attempt %(attempt)i.')
|
'attempt %(attempt)i.'),
|
||||||
% {'func': func_name,
|
{'func': func_name,
|
||||||
'attempt': attempt})
|
'attempt': attempt})
|
||||||
try:
|
try:
|
||||||
return fun(*args, **kwargs)
|
return fun(*args, **kwargs)
|
||||||
except self._exceptions:
|
except self._exceptions:
|
||||||
|
@ -127,9 +127,9 @@ class LVM(lvm.LVM):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error activating Volume Group'))
|
LOG.exception(_LE('Error activating Volume Group'))
|
||||||
LOG.error(_LE('Cmd :%s') % err.cmd)
|
LOG.error(_LE('Cmd :%s'), err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s') % err.stdout)
|
LOG.error(_LE('StdOut :%s'), err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s') % err.stderr)
|
LOG.error(_LE('StdErr :%s'), err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def deactivate_vg(self):
|
def deactivate_vg(self):
|
||||||
|
@ -147,9 +147,9 @@ class LVM(lvm.LVM):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error deactivating Volume Group'))
|
LOG.exception(_LE('Error deactivating Volume Group'))
|
||||||
LOG.error(_LE('Cmd :%s') % err.cmd)
|
LOG.error(_LE('Cmd :%s'), err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s') % err.stdout)
|
LOG.error(_LE('StdOut :%s'), err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s') % err.stderr)
|
LOG.error(_LE('StdErr :%s'), err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def destroy_vg(self):
|
def destroy_vg(self):
|
||||||
|
@ -165,9 +165,9 @@ class LVM(lvm.LVM):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error destroying Volume Group'))
|
LOG.exception(_LE('Error destroying Volume Group'))
|
||||||
LOG.error(_LE('Cmd :%s') % err.cmd)
|
LOG.error(_LE('Cmd :%s'), err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s') % err.stdout)
|
LOG.error(_LE('StdOut :%s'), err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s') % err.stderr)
|
LOG.error(_LE('StdErr :%s'), err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def pv_resize(self, pv_name, new_size_str):
|
def pv_resize(self, pv_name, new_size_str):
|
||||||
|
@ -183,9 +183,9 @@ class LVM(lvm.LVM):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error resizing Physical Volume'))
|
LOG.exception(_LE('Error resizing Physical Volume'))
|
||||||
LOG.error(_LE('Cmd :%s') % err.cmd)
|
LOG.error(_LE('Cmd :%s'), err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s') % err.stdout)
|
LOG.error(_LE('StdOut :%s'), err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s') % err.stderr)
|
LOG.error(_LE('StdErr :%s'), err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def extend_thin_pool(self):
|
def extend_thin_pool(self):
|
||||||
|
@ -209,9 +209,9 @@ class LVM(lvm.LVM):
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
except putils.ProcessExecutionError as err:
|
except putils.ProcessExecutionError as err:
|
||||||
LOG.exception(_LE('Error extending thin provisioning pool'))
|
LOG.exception(_LE('Error extending thin provisioning pool'))
|
||||||
LOG.error(_LE('Cmd :%s') % err.cmd)
|
LOG.error(_LE('Cmd :%s'), err.cmd)
|
||||||
LOG.error(_LE('StdOut :%s') % err.stdout)
|
LOG.error(_LE('StdOut :%s'), err.stdout)
|
||||||
LOG.error(_LE('StdErr :%s') % err.stderr)
|
LOG.error(_LE('StdErr :%s'), err.stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -577,7 +577,8 @@ class SRBDriver(driver.VolumeDriver):
|
||||||
def _attach_file(self, volume):
|
def _attach_file(self, volume):
|
||||||
name = self._get_volname(volume)
|
name = self._get_volname(volume)
|
||||||
devname = self._device_name(volume)
|
devname = self._device_name(volume)
|
||||||
LOG.debug('Attaching volume %s as %s', name, devname)
|
LOG.debug('Attaching volume %(name)s as %(devname)s',
|
||||||
|
{'name': name, 'devname': devname})
|
||||||
|
|
||||||
count = self._get_attached_count(volume)
|
count = self._get_attached_count(volume)
|
||||||
if count == 0:
|
if count == 0:
|
||||||
|
@ -621,8 +622,8 @@ class SRBDriver(driver.VolumeDriver):
|
||||||
self._do_deactivate(volume, vg)
|
self._do_deactivate(volume, vg)
|
||||||
except putils.ProcessExecutionError:
|
except putils.ProcessExecutionError:
|
||||||
LOG.warning(_LW('All attempts to recover failed detach '
|
LOG.warning(_LW('All attempts to recover failed detach '
|
||||||
'of %(volume)s failed.')
|
'of %(volume)s failed.'),
|
||||||
% {'volume': volname})
|
{'volume': volname})
|
||||||
|
|
||||||
@lockutils.synchronized('devices', 'cinder-srb-')
|
@lockutils.synchronized('devices', 'cinder-srb-')
|
||||||
def _detach_file(self, volume):
|
def _detach_file(self, volume):
|
||||||
|
@ -634,9 +635,8 @@ class SRBDriver(driver.VolumeDriver):
|
||||||
count = self._get_attached_count(volume)
|
count = self._get_attached_count(volume)
|
||||||
if count > 1:
|
if count > 1:
|
||||||
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
|
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
|
||||||
'not detaching.')
|
'not detaching.'),
|
||||||
% {'volume': volume['name'],
|
{'volume': volume['name'], 'count': count})
|
||||||
'count': count})
|
|
||||||
return
|
return
|
||||||
|
|
||||||
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
|
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
|
||||||
|
@ -649,18 +649,15 @@ class SRBDriver(driver.VolumeDriver):
|
||||||
if vg is not None:
|
if vg is not None:
|
||||||
self._do_deactivate(volume, vg)
|
self._do_deactivate(volume, vg)
|
||||||
except putils.ProcessExecutionError:
|
except putils.ProcessExecutionError:
|
||||||
msg = _LE('Could not deactivate volume groupe %s')\
|
LOG.error(_LE('Could not deactivate volume group %s'),
|
||||||
% (self._get_volname(volume))
|
self._get_volname(volume))
|
||||||
LOG.error(msg)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._do_detach(volume, vg=vg)
|
self._do_detach(volume, vg=vg)
|
||||||
except putils.ProcessExecutionError:
|
except putils.ProcessExecutionError:
|
||||||
msg = _LE('Could not detach volume '
|
LOG.error(_LE('Could not detach volume %(vol)s from device '
|
||||||
'%(vol)s from device %(dev)s.') \
|
'%(dev)s.'), {'vol': name, 'dev': devname})
|
||||||
% {'vol': name, 'dev': devname}
|
|
||||||
LOG.error(msg)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self._decrement_attached_count(volume)
|
self._decrement_attached_count(volume)
|
||||||
|
@ -748,8 +745,8 @@ class SRBDriver(driver.VolumeDriver):
|
||||||
self._destroy_lvm(volume)
|
self._destroy_lvm(volume)
|
||||||
self._detach_file(volume)
|
self._detach_file(volume)
|
||||||
|
|
||||||
LOG.debug('Deleting volume %s, attached=%s',
|
LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s',
|
||||||
volume['name'], attached)
|
{'volume_name': volume['name'], 'attached': attached})
|
||||||
|
|
||||||
self._destroy_file(volume)
|
self._destroy_file(volume)
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ class V6000Common(object):
|
||||||
"""
|
"""
|
||||||
lun_type = '0'
|
lun_type = '0'
|
||||||
|
|
||||||
LOG.debug("Creating LUN %(name)s, %(size)s GB." %
|
LOG.debug("Creating LUN %(name)s, %(size)s GB.",
|
||||||
{'name': volume['name'], 'size': volume['size']})
|
{'name': volume['name'], 'size': volume['size']})
|
||||||
|
|
||||||
if self.config.san_thin_provision:
|
if self.config.san_thin_provision:
|
||||||
|
@ -188,7 +188,7 @@ class V6000Common(object):
|
||||||
LOG.debug("Lun %s already exists, continuing.", volume['id'])
|
LOG.debug("Lun %s already exists, continuing.", volume['id'])
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(_LW("Lun create for %s failed!"), volume['id'])
|
LOG.warning(_LW("Lun create for %s failed!"), volume['id'])
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@utils.synchronized('vmem-lun')
|
@utils.synchronized('vmem-lun')
|
||||||
|
@ -213,8 +213,8 @@ class V6000Common(object):
|
||||||
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
|
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
|
||||||
|
|
||||||
except exception.ViolinBackendErrExists:
|
except exception.ViolinBackendErrExists:
|
||||||
LOG.warn(_LW("Lun %s has dependent snapshots, skipping."),
|
LOG.warning(_LW("Lun %s has dependent snapshots, skipping."),
|
||||||
volume['id'])
|
volume['id'])
|
||||||
raise exception.VolumeIsBusy(volume_name=volume['id'])
|
raise exception.VolumeIsBusy(volume_name=volume['id'])
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -232,7 +232,7 @@ class V6000Common(object):
|
||||||
volume -- volume object provided by the Manager
|
volume -- volume object provided by the Manager
|
||||||
new_size -- new (increased) size in GB to be applied
|
new_size -- new (increased) size in GB to be applied
|
||||||
"""
|
"""
|
||||||
LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB." %
|
LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB.",
|
||||||
{'id': volume['id'], 'size': volume['size'],
|
{'id': volume['id'], 'size': volume['size'],
|
||||||
'new_size': new_size})
|
'new_size': new_size})
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
self.common = v6000_common.V6000Common(self.configuration)
|
self.common = v6000_common.V6000Common(self.configuration)
|
||||||
self.lookup_service = fczm_utils.create_lookup_service()
|
self.lookup_service = fczm_utils.create_lookup_service()
|
||||||
|
|
||||||
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
|
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
|
||||||
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
||||||
|
|
||||||
def do_setup(self, context):
|
def do_setup(self, context):
|
||||||
|
@ -169,8 +169,8 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
properties['target_lun'] = lun_id
|
properties['target_lun'] = lun_id
|
||||||
properties['initiator_target_map'] = init_targ_map
|
properties['initiator_target_map'] = init_targ_map
|
||||||
|
|
||||||
LOG.debug("Return FC data for zone addition: %(properties)s."
|
LOG.debug("Return FC data for zone addition: %(properties)s.",
|
||||||
% {'properties': properties})
|
{'properties': properties})
|
||||||
|
|
||||||
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
||||||
|
|
||||||
|
@ -193,8 +193,8 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
properties['target_wwn'] = target_wwns
|
properties['target_wwn'] = target_wwns
|
||||||
properties['initiator_target_map'] = init_targ_map
|
properties['initiator_target_map'] = init_targ_map
|
||||||
|
|
||||||
LOG.debug("Return FC data for zone deletion: %(properties)s."
|
LOG.debug("Return FC data for zone deletion: %(properties)s.",
|
||||||
% {'properties': properties})
|
{'properties': properties})
|
||||||
|
|
||||||
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
else:
|
else:
|
||||||
raise exception.Error(_("No initiators found, cannot proceed"))
|
raise exception.Error(_("No initiators found, cannot proceed"))
|
||||||
|
|
||||||
LOG.debug("Exporting lun %s." % volume['id'])
|
LOG.debug("Exporting lun %s.", volume['id'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.common._send_cmd_and_verify(
|
self.common._send_cmd_and_verify(
|
||||||
|
@ -361,7 +361,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
v = self.common.vip
|
v = self.common.vip
|
||||||
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
|
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
|
||||||
|
|
||||||
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s." %
|
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s.",
|
||||||
{'wwpns': wwpns, 'igroup': igroup})
|
{'wwpns': wwpns, 'igroup': igroup})
|
||||||
|
|
||||||
resp = v.igroup.add_initiators(igroup, wwpns)
|
resp = v.igroup.add_initiators(igroup, wwpns)
|
||||||
|
@ -439,14 +439,14 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
if bn1 in resp:
|
if bn1 in resp:
|
||||||
total_gb = resp[bn1] / units.Gi
|
total_gb = resp[bn1] / units.Gi
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
|
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
|
||||||
if 'total_capacity_gb' in self.stats:
|
if 'total_capacity_gb' in self.stats:
|
||||||
total_gb = self.stats['total_capacity_gb']
|
total_gb = self.stats['total_capacity_gb']
|
||||||
|
|
||||||
if bn2 in resp:
|
if bn2 in resp:
|
||||||
free_gb = resp[bn2] / units.Gi
|
free_gb = resp[bn2] / units.Gi
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
|
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
|
||||||
if 'free_capacity_gb' in self.stats:
|
if 'free_capacity_gb' in self.stats:
|
||||||
free_gb = self.stats['free_capacity_gb']
|
free_gb = self.stats['free_capacity_gb']
|
||||||
|
|
||||||
|
@ -461,7 +461,7 @@ class V6000FCDriver(driver.FibreChannelDriver):
|
||||||
data['free_capacity_gb'] = free_gb
|
data['free_capacity_gb'] = free_gb
|
||||||
|
|
||||||
for i in data:
|
for i in data:
|
||||||
LOG.debug("stat update: %(name)s=%(data)s." %
|
LOG.debug("stat update: %(name)s=%(data)s.",
|
||||||
{'name': i, 'data': data[i]})
|
{'name': i, 'data': data[i]})
|
||||||
self.stats = data
|
self.stats = data
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||||
self.configuration.append_config_values(san.san_opts)
|
self.configuration.append_config_values(san.san_opts)
|
||||||
self.common = v6000_common.V6000Common(self.configuration)
|
self.common = v6000_common.V6000Common(self.configuration)
|
||||||
|
|
||||||
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
|
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
|
||||||
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
||||||
|
|
||||||
def do_setup(self, context):
|
def do_setup(self, context):
|
||||||
|
@ -309,7 +309,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
target_name = self._get_short_name(volume['id'])
|
target_name = self._get_short_name(volume['id'])
|
||||||
|
|
||||||
LOG.debug("Exporting lun %s." % volume['id'])
|
LOG.debug("Exporting lun %s.", volume['id'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.common._send_cmd_and_verify(
|
self.common._send_cmd_and_verify(
|
||||||
|
@ -468,14 +468,14 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||||
if bn1 in resp:
|
if bn1 in resp:
|
||||||
total_gb = resp[bn1] / units.Gi
|
total_gb = resp[bn1] / units.Gi
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
|
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
|
||||||
if 'total_capacity_gb' in self.stats:
|
if 'total_capacity_gb' in self.stats:
|
||||||
total_gb = self.stats['total_capacity_gb']
|
total_gb = self.stats['total_capacity_gb']
|
||||||
|
|
||||||
if bn2 in resp:
|
if bn2 in resp:
|
||||||
free_gb = resp[bn2] / units.Gi
|
free_gb = resp[bn2] / units.Gi
|
||||||
else:
|
else:
|
||||||
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
|
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
|
||||||
if 'free_capacity_gb' in self.stats:
|
if 'free_capacity_gb' in self.stats:
|
||||||
free_gb = self.stats['free_capacity_gb']
|
free_gb = self.stats['free_capacity_gb']
|
||||||
|
|
||||||
|
@ -490,7 +490,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||||
data['free_capacity_gb'] = free_gb
|
data['free_capacity_gb'] = free_gb
|
||||||
|
|
||||||
for i in data:
|
for i in data:
|
||||||
LOG.debug("stat update: %(name)s=%(data)s." %
|
LOG.debug("stat update: %(name)s=%(data)s.",
|
||||||
{'name': i, 'data': data[i]})
|
{'name': i, 'data': data[i]})
|
||||||
|
|
||||||
self.stats = data
|
self.stats = data
|
||||||
|
@ -563,7 +563,7 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||||
if ret_dict:
|
if ret_dict:
|
||||||
hostname = ret_dict.items()[0][1]
|
hostname = ret_dict.items()[0][1]
|
||||||
else:
|
else:
|
||||||
LOG.debug("Unable to fetch gateway hostname for %s." % mg_to_query)
|
LOG.debug("Unable to fetch gateway hostname for %s.", mg_to_query)
|
||||||
|
|
||||||
return hostname
|
return hostname
|
||||||
|
|
||||||
|
|
|
@ -223,8 +223,8 @@ class DatastoreSelector(object):
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
# TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
|
# TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException
|
||||||
# for empty datastore list.
|
# for empty datastore list.
|
||||||
LOG.warn(_LW("Unable to fetch datastores connected "
|
LOG.warning(_LW("Unable to fetch datastores connected "
|
||||||
"to host %s."), host_ref, exc_info=True)
|
"to host %s."), host_ref, exc_info=True)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not datastores:
|
if not datastores:
|
||||||
|
|
|
@ -201,10 +201,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
VERSION = '1.4.0'
|
VERSION = '1.4.0'
|
||||||
|
|
||||||
def _do_deprecation_warning(self):
|
def _do_deprecation_warning(self):
|
||||||
LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
|
LOG.warning(_LW('The VMware ESX VMDK driver is now deprecated '
|
||||||
'and will be removed in the Juno release. The VMware '
|
'and will be removed in the Juno release. The VMware '
|
||||||
'vCenter VMDK driver will remain and continue to be '
|
'vCenter VMDK driver will remain and continue to be '
|
||||||
'supported.'))
|
'supported.'))
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
|
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
|
||||||
|
@ -477,9 +477,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
LOG.error(msg, storage_profile)
|
LOG.error(msg, storage_profile)
|
||||||
raise exceptions.VimException(msg % storage_profile)
|
raise exceptions.VimException(msg % storage_profile)
|
||||||
elif storage_profile:
|
elif storage_profile:
|
||||||
LOG.warn(_LW("Ignoring storage profile %s requirement for this "
|
LOG.warning(_LW("Ignoring storage profile %s requirement for this "
|
||||||
"volume since policy based placement is "
|
"volume since policy based placement is "
|
||||||
"disabled."), storage_profile)
|
"disabled."), storage_profile)
|
||||||
|
|
||||||
size_bytes = volume['size'] * units.Gi
|
size_bytes = volume['size'] * units.Gi
|
||||||
datastore_summary = self._select_datastore_summary(size_bytes,
|
datastore_summary = self._select_datastore_summary(size_bytes,
|
||||||
|
@ -639,8 +639,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
if not backing:
|
if not backing:
|
||||||
# Create a backing in case it does not exist. It is a bad use
|
# Create a backing in case it does not exist. It is a bad use
|
||||||
# case to boot from an empty volume.
|
# case to boot from an empty volume.
|
||||||
LOG.warn(_LW("Trying to boot from an empty volume: %s."),
|
LOG.warning(_LW("Trying to boot from an empty volume: %s."),
|
||||||
volume['name'])
|
volume['name'])
|
||||||
# Create backing
|
# Create backing
|
||||||
backing = self._create_backing(volume)
|
backing = self._create_backing(volume)
|
||||||
|
|
||||||
|
@ -912,10 +912,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
self.volumeops.delete_vmdk_file(
|
self.volumeops.delete_vmdk_file(
|
||||||
descriptor_ds_file_path, dc_ref)
|
descriptor_ds_file_path, dc_ref)
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
LOG.warn(_LW("Error occurred while deleting temporary "
|
LOG.warning(_LW("Error occurred while deleting temporary "
|
||||||
"disk: %s."),
|
"disk: %s."),
|
||||||
descriptor_ds_file_path,
|
descriptor_ds_file_path, exc_info=True)
|
||||||
exc_info=True)
|
|
||||||
|
|
||||||
def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref,
|
def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref,
|
||||||
dest_path):
|
dest_path):
|
||||||
|
@ -1044,10 +1043,10 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
self.volumeops.delete_file(
|
self.volumeops.delete_file(
|
||||||
path.get_descriptor_ds_file_path(), dc_ref)
|
path.get_descriptor_ds_file_path(), dc_ref)
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
LOG.warn(_LW("Error occurred while deleting "
|
LOG.warning(_LW("Error occurred while deleting "
|
||||||
"descriptor: %s."),
|
"descriptor: %s."),
|
||||||
path.get_descriptor_ds_file_path(),
|
path.get_descriptor_ds_file_path(),
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
|
|
||||||
if dest_path != path:
|
if dest_path != path:
|
||||||
# Copy temporary disk to given destination.
|
# Copy temporary disk to given destination.
|
||||||
|
@ -1077,9 +1076,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
try:
|
try:
|
||||||
self.volumeops.delete_backing(backing)
|
self.volumeops.delete_backing(backing)
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
LOG.warn(_LW("Error occurred while deleting backing: %s."),
|
LOG.warning(_LW("Error occurred while deleting backing: %s."),
|
||||||
backing,
|
backing, exc_info=True)
|
||||||
exc_info=True)
|
|
||||||
|
|
||||||
def _create_volume_from_non_stream_optimized_image(
|
def _create_volume_from_non_stream_optimized_image(
|
||||||
self, context, volume, image_service, image_id,
|
self, context, volume, image_service, image_id,
|
||||||
|
@ -1438,8 +1436,8 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
"""
|
"""
|
||||||
# Can't attempt retype if the volume is in use.
|
# Can't attempt retype if the volume is in use.
|
||||||
if self._in_use(volume):
|
if self._in_use(volume):
|
||||||
LOG.warn(_LW("Volume: %s is in use, can't retype."),
|
LOG.warning(_LW("Volume: %s is in use, can't retype."),
|
||||||
volume['name'])
|
volume['name'])
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# If the backing doesn't exist, retype is NOP.
|
# If the backing doesn't exist, retype is NOP.
|
||||||
|
@ -1507,9 +1505,9 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
best_candidate = self.ds_sel.select_datastore(req)
|
best_candidate = self.ds_sel.select_datastore(req)
|
||||||
if not best_candidate:
|
if not best_candidate:
|
||||||
# No candidate datastores; can't retype.
|
# No candidate datastores; can't retype.
|
||||||
LOG.warn(_LW("There are no datastores matching new "
|
LOG.warning(_LW("There are no datastores matching new "
|
||||||
"requirements; can't retype volume: %s."),
|
"requirements; can't retype volume: %s."),
|
||||||
volume['name'])
|
volume['name'])
|
||||||
return False
|
return False
|
||||||
|
|
||||||
(host, rp, summary) = best_candidate
|
(host, rp, summary) = best_candidate
|
||||||
|
@ -1559,12 +1557,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
self.volumeops.rename_backing(backing,
|
self.volumeops.rename_backing(backing,
|
||||||
volume['name'])
|
volume['name'])
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
LOG.warn(_LW("Changing backing: %(backing)s "
|
LOG.warning(_LW("Changing backing: "
|
||||||
"name from %(new_name)s to "
|
"%(backing)s name from "
|
||||||
"%(old_name)s failed."),
|
"%(new_name)s to %(old_name)s "
|
||||||
{'backing': backing,
|
"failed."),
|
||||||
'new_name': tmp_name,
|
{'backing': backing,
|
||||||
'old_name': volume['name']})
|
'new_name': tmp_name,
|
||||||
|
'old_name': volume['name']})
|
||||||
|
|
||||||
# Update the backing's storage profile if needed.
|
# Update the backing's storage profile if needed.
|
||||||
if need_profile_change:
|
if need_profile_change:
|
||||||
|
@ -1802,12 +1801,12 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
|
||||||
self.volumeops.rename_backing(backing,
|
self.volumeops.rename_backing(backing,
|
||||||
volume['name'])
|
volume['name'])
|
||||||
except exceptions.VimException:
|
except exceptions.VimException:
|
||||||
LOG.warn(_LW("Cannot undo volume rename; old name "
|
LOG.warning(_LW("Cannot undo volume rename; old "
|
||||||
"was %(old_name)s and new name is "
|
"name was %(old_name)s and new "
|
||||||
"%(new_name)s."),
|
"name is %(new_name)s."),
|
||||||
{'old_name': volume['name'],
|
{'old_name': volume['name'],
|
||||||
'new_name': tmp_backing_name},
|
'new_name': tmp_backing_name},
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
finally:
|
finally:
|
||||||
# Delete the temporary backing.
|
# Delete the temporary backing.
|
||||||
self._delete_temp_backing(src)
|
self._delete_temp_backing(src)
|
||||||
|
|
|
@ -93,11 +93,10 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
|
||||||
|
|
||||||
if len(mappings) > 0:
|
if len(mappings) > 0:
|
||||||
if os.path.exists(smbfs_share):
|
if os.path.exists(smbfs_share):
|
||||||
LOG.debug('Share already mounted: %s' % smbfs_share)
|
LOG.debug('Share already mounted: %s', smbfs_share)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
LOG.debug('Share exists but is unavailable: %s '
|
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
|
||||||
% smbfs_share)
|
|
||||||
for mapping in mappings:
|
for mapping in mappings:
|
||||||
# Due to a bug in the WMI module, getting the output of
|
# Due to a bug in the WMI module, getting the output of
|
||||||
# methods returning None will raise an AttributeError
|
# methods returning None will raise an AttributeError
|
||||||
|
@ -115,7 +114,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
|
||||||
options.get('pass'))
|
options.get('pass'))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI('Mounting share: %s') % smbfs_share)
|
LOG.info(_LI('Mounting share: %s'), smbfs_share)
|
||||||
self.smb_conn.Msft_SmbMapping.Create(**smb_opts)
|
self.smb_conn.Msft_SmbMapping.Create(**smb_opts)
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
|
@ -123,7 +122,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
|
||||||
'WMI exception: %(wmi_exc)s'
|
'WMI exception: %(wmi_exc)s'
|
||||||
'Options: %(options)s') % {'smbfs_share': smbfs_share,
|
'Options: %(options)s') % {'smbfs_share': smbfs_share,
|
||||||
'options': smb_opts,
|
'options': smb_opts,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
def get_capacity_info(self, smbfs_share):
|
def get_capacity_info(self, smbfs_share):
|
||||||
|
@ -137,7 +136,7 @@ class WindowsRemoteFsClient(remotefs.RemoteFsClient):
|
||||||
ctypes.pointer(total_bytes),
|
ctypes.pointer(total_bytes),
|
||||||
ctypes.pointer(free_bytes))
|
ctypes.pointer(free_bytes))
|
||||||
if retcode == 0:
|
if retcode == 0:
|
||||||
LOG.error(_LE("Could not get share %s capacity info.") %
|
LOG.error(_LE("Could not get share %s capacity info."),
|
||||||
smbfs_share)
|
smbfs_share)
|
||||||
return 0, 0
|
return 0, 0
|
||||||
return total_bytes.value, free_bytes.value
|
return total_bytes.value, free_bytes.value
|
||||||
|
|
|
@ -23,7 +23,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _, _LI
|
||||||
from cinder.image import image_utils
|
from cinder.image import image_utils
|
||||||
from cinder.openstack.common import fileutils
|
from cinder.openstack.common import fileutils
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
@ -103,8 +103,10 @@ class WindowsSmbfsDriver(smbfs.SmbfsDriver):
|
||||||
smbfs_share)
|
smbfs_share)
|
||||||
total_allocated = self._get_total_allocated(smbfs_share)
|
total_allocated = self._get_total_allocated(smbfs_share)
|
||||||
return_value = [total_size, total_available, total_allocated]
|
return_value = [total_size, total_available, total_allocated]
|
||||||
LOG.info('Smb share %s Total size %s Total allocated %s'
|
LOG.info(_LI('Smb share %(share)s Total size %(size)s '
|
||||||
% (smbfs_share, total_size, total_allocated))
|
'Total allocated %(allocated)s'),
|
||||||
|
{'share': smbfs_share, 'size': total_size,
|
||||||
|
'allocated': total_allocated})
|
||||||
return [float(x) for x in return_value]
|
return [float(x) for x in return_value]
|
||||||
|
|
||||||
def _get_total_allocated(self, smbfs_share):
|
def _get_total_allocated(self, smbfs_share):
|
||||||
|
@ -223,7 +225,7 @@ class WindowsSmbfsDriver(smbfs.SmbfsDriver):
|
||||||
"""Copy data from snapshot to destination volume."""
|
"""Copy data from snapshot to destination volume."""
|
||||||
|
|
||||||
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
|
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
|
||||||
"volume_size: %(size)s" %
|
"volume_size: %(size)s",
|
||||||
{'snap': snapshot['id'],
|
{'snap': snapshot['id'],
|
||||||
'vol': volume['id'],
|
'vol': volume['id'],
|
||||||
'size': snapshot['volume_size']})
|
'size': snapshot['volume_size']})
|
||||||
|
|
|
@ -21,6 +21,7 @@ import os
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
import six
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LI
|
from cinder.i18n import _, _LI
|
||||||
|
@ -54,9 +55,10 @@ class WindowsUtils(object):
|
||||||
listen = wt_portal.Listen
|
listen = wt_portal.Listen
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_('check_for_setup_error: the state of the WT Portal '
|
err_msg = (_('check_for_setup_error: the state of the WT Portal '
|
||||||
'could not be verified. WMI exception: %s'))
|
'could not be verified. WMI exception: %s')
|
||||||
LOG.error(err_msg % exc)
|
% six.text_type(exc))
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg % exc)
|
LOG.error(err_msg)
|
||||||
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
if not listen:
|
if not listen:
|
||||||
err_msg = (_('check_for_setup_error: there is no ISCSI traffic '
|
err_msg = (_('check_for_setup_error: there is no ISCSI traffic '
|
||||||
|
@ -70,9 +72,10 @@ class WindowsUtils(object):
|
||||||
wt_portal = self._conn_wmi.WT_Portal()[0]
|
wt_portal = self._conn_wmi.WT_Portal()[0]
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_('get_host_information: the state of the WT Portal '
|
err_msg = (_('get_host_information: the state of the WT Portal '
|
||||||
'could not be verified. WMI exception: %s'))
|
'could not be verified. WMI exception: %s')
|
||||||
LOG.error(err_msg % exc)
|
% six.text_type(exc))
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg % exc)
|
LOG.error(err_msg)
|
||||||
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
(address, port) = (wt_portal.Address, wt_portal.Port)
|
(address, port) = (wt_portal.Address, wt_portal.Port)
|
||||||
# Getting the host information
|
# Getting the host information
|
||||||
try:
|
try:
|
||||||
|
@ -80,8 +83,9 @@ class WindowsUtils(object):
|
||||||
host = hosts[0]
|
host = hosts[0]
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_('get_host_information: the ISCSI target information '
|
err_msg = (_('get_host_information: the ISCSI target information '
|
||||||
'could not be retrieved. WMI exception: %s'))
|
'could not be retrieved. WMI exception: %s')
|
||||||
LOG.error(err_msg % exc)
|
% six.text_type(exc))
|
||||||
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
properties = {}
|
properties = {}
|
||||||
|
@ -118,7 +122,7 @@ class WindowsUtils(object):
|
||||||
'target name: %(target)s could not be established. '
|
'target name: %(target)s could not be established. '
|
||||||
'WMI exception: %(wmi_exc)s') %
|
'WMI exception: %(wmi_exc)s') %
|
||||||
{'init': initiator_name, 'target': target_name,
|
{'init': initiator_name, 'target': target_name,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -133,9 +137,9 @@ class WindowsUtils(object):
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'delete_iscsi_target: error when deleting the iscsi target '
|
'delete_iscsi_target: error when deleting the iscsi target '
|
||||||
'associated with target name: %(target)s . '
|
'associated with target name: %(target)s . WMI '
|
||||||
'WMI exception: %(wmi_exc)s') % {'target': target_name,
|
'exception: %(wmi_exc)s') % {'target': target_name,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -154,7 +158,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'create_volume: error when creating the volume name: '
|
'create_volume: error when creating the volume name: '
|
||||||
'%(vol_name)s . WMI exception: '
|
'%(vol_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'vol_name': vol_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -167,7 +172,7 @@ class WindowsUtils(object):
|
||||||
err_msg = (_("Failed to import disk: %(vhd_path)s. "
|
err_msg = (_("Failed to import disk: %(vhd_path)s. "
|
||||||
"WMI exception: %(exc)s") %
|
"WMI exception: %(exc)s") %
|
||||||
{'vhd_path': vhd_path,
|
{'vhd_path': vhd_path,
|
||||||
'exc': exc})
|
'exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -180,7 +185,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'Error changing disk status: '
|
'Error changing disk status: '
|
||||||
'%(vol_name)s . WMI exception: '
|
'%(vol_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'vol_name': vol_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -190,7 +196,7 @@ class WindowsUtils(object):
|
||||||
disk = self._conn_wmi.WT_Disk(Description=vol_name)
|
disk = self._conn_wmi.WT_Disk(Description=vol_name)
|
||||||
if not disk:
|
if not disk:
|
||||||
LOG.debug('Skipping deleting disk %s as it does not '
|
LOG.debug('Skipping deleting disk %s as it does not '
|
||||||
'exist.' % vol_name)
|
'exist.', vol_name)
|
||||||
return
|
return
|
||||||
wt_disk = disk[0]
|
wt_disk = disk[0]
|
||||||
wt_disk.Delete_()
|
wt_disk.Delete_()
|
||||||
|
@ -203,7 +209,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'delete_volume: error when deleting the volume name: '
|
'delete_volume: error when deleting the volume name: '
|
||||||
'%(vol_name)s . WMI exception: '
|
'%(vol_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'vol_name': vol_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -223,7 +230,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'create_snapshot: error when creating the snapshot name: '
|
'create_snapshot: error when creating the snapshot name: '
|
||||||
'%(vol_name)s . WMI exception: '
|
'%(vol_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'vol_name': snapshot_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'vol_name': snapshot_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -248,10 +256,10 @@ class WindowsUtils(object):
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'create_volume_from_snapshot: error when creating the volume '
|
'create_volume_from_snapshot: error when creating the volume '
|
||||||
'name: %(vol_name)s from snapshot name: %(snap_name)s. '
|
'name: %(vol_name)s from snapshot name: %(snap_name)s. WMI '
|
||||||
'WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
|
'exception: %(wmi_exc)s') % {'vol_name': vol_name,
|
||||||
'snap_name': snap_name,
|
'snap_name': snap_name,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -264,7 +272,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'delete_snapshot: error when deleting the snapshot name: '
|
'delete_snapshot: error when deleting the snapshot name: '
|
||||||
'%(snap_name)s . WMI exception: '
|
'%(snap_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'snap_name': snap_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'snap_name': snap_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -278,7 +287,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'create_iscsi_target: error when creating iscsi target: '
|
'create_iscsi_target: error when creating iscsi target: '
|
||||||
'%(tar_name)s . WMI exception: '
|
'%(tar_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'tar_name': target_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
else:
|
else:
|
||||||
|
@ -291,7 +301,7 @@ class WindowsUtils(object):
|
||||||
host = self._conn_wmi.WT_Host(HostName=target_name)
|
host = self._conn_wmi.WT_Host(HostName=target_name)
|
||||||
if not host:
|
if not host:
|
||||||
LOG.debug('Skipping removing target %s as it does not '
|
LOG.debug('Skipping removing target %s as it does not '
|
||||||
'exist.' % target_name)
|
'exist.', target_name)
|
||||||
return
|
return
|
||||||
wt_host = host[0]
|
wt_host = host[0]
|
||||||
wt_host.RemoveAllWTDisks()
|
wt_host.RemoveAllWTDisks()
|
||||||
|
@ -300,7 +310,8 @@ class WindowsUtils(object):
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'remove_iscsi_target: error when deleting iscsi target: '
|
'remove_iscsi_target: error when deleting iscsi target: '
|
||||||
'%(tar_name)s . WMI exception: '
|
'%(tar_name)s . WMI exception: '
|
||||||
'%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc})
|
'%(wmi_exc)s') % {'tar_name': target_name,
|
||||||
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -314,10 +325,10 @@ class WindowsUtils(object):
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'add_disk_to_target: error adding disk associated to volume : '
|
'add_disk_to_target: error adding disk associated to volume : '
|
||||||
'%(vol_name)s to the target name: %(tar_name)s '
|
'%(vol_name)s to the target name: %(tar_name)s . WMI '
|
||||||
'. WMI exception: %(wmi_exc)s') % {'tar_name': target_name,
|
'exception: %(wmi_exc)s') % {'tar_name': target_name,
|
||||||
'vol_name': vol_name,
|
'vol_name': vol_name,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -370,16 +381,16 @@ class WindowsUtils(object):
|
||||||
wt_disk.Extend(additional_size)
|
wt_disk.Extend(additional_size)
|
||||||
except wmi.x_wmi as exc:
|
except wmi.x_wmi as exc:
|
||||||
err_msg = (_(
|
err_msg = (_(
|
||||||
'extend: error when extending the volume: %(vol_name)s '
|
'extend: error when extending the volume: %(vol_name)s .WMI '
|
||||||
'.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name,
|
'exception: %(wmi_exc)s') % {'vol_name': vol_name,
|
||||||
'wmi_exc': exc})
|
'wmi_exc': six.text_type(exc)})
|
||||||
LOG.error(err_msg)
|
LOG.error(err_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
def local_path(self, volume, format=None):
|
def local_path(self, volume, format=None):
|
||||||
base_vhd_folder = CONF.windows_iscsi_lun_path
|
base_vhd_folder = CONF.windows_iscsi_lun_path
|
||||||
if not os.path.exists(base_vhd_folder):
|
if not os.path.exists(base_vhd_folder):
|
||||||
LOG.debug('Creating folder: %s' % base_vhd_folder)
|
LOG.debug('Creating folder: %s', base_vhd_folder)
|
||||||
os.makedirs(base_vhd_folder)
|
os.makedirs(base_vhd_folder)
|
||||||
if not format:
|
if not format:
|
||||||
format = self.get_supported_format()
|
format = self.get_supported_format()
|
||||||
|
|
|
@ -90,18 +90,15 @@ class XIOISEDriver(object):
|
||||||
LOG.debug("XIOISEDriver check_for_setup_error called.")
|
LOG.debug("XIOISEDriver check_for_setup_error called.")
|
||||||
# The san_ip must always be set
|
# The san_ip must always be set
|
||||||
if self.configuration.san_ip == "":
|
if self.configuration.san_ip == "":
|
||||||
msg = _LE("san ip must be configured!")
|
LOG.error(_LE("san ip must be configured!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# The san_login must always be set
|
# The san_login must always be set
|
||||||
if self.configuration.san_login == "":
|
if self.configuration.san_login == "":
|
||||||
msg = _LE("san_login must be configured!")
|
LOG.error(_LE("san_login must be configured!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# The san_password must always be set
|
# The san_password must always be set
|
||||||
if self.configuration.san_password == "":
|
if self.configuration.san_password == "":
|
||||||
msg = _LE("san_password must be configured!")
|
LOG.error(_LE("san_password must be configured!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -118,8 +115,7 @@ class XIOISEDriver(object):
|
||||||
if status != 200:
|
if status != 200:
|
||||||
# unsuccessful - this is fatal as we need the global id
|
# unsuccessful - this is fatal as we need the global id
|
||||||
# to build REST requests.
|
# to build REST requests.
|
||||||
msg = _LE("Array query failed - No response (%d)!") % status
|
LOG.error(_LE("Array query failed - No response (%d)!"), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Successfully fetched QUERY info. Parse out globalid along with
|
# Successfully fetched QUERY info. Parse out globalid along with
|
||||||
# ipaddress for Controller 1 and Controller 2. We assign primary
|
# ipaddress for Controller 1 and Controller 2. We assign primary
|
||||||
|
@ -134,8 +130,7 @@ class XIOISEDriver(object):
|
||||||
self.configuration.ise_qos = False
|
self.configuration.ise_qos = False
|
||||||
capabilities = xml_tree.find('capabilities')
|
capabilities = xml_tree.find('capabilities')
|
||||||
if capabilities is None:
|
if capabilities is None:
|
||||||
msg = _LE("Array query failed. No capabilities in response!")
|
LOG.error(_LE("Array query failed. No capabilities in response!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
for node in capabilities:
|
for node in capabilities:
|
||||||
if node.tag != 'capability':
|
if node.tag != 'capability':
|
||||||
|
@ -153,22 +148,19 @@ class XIOISEDriver(object):
|
||||||
support['thin-clones'] = True
|
support['thin-clones'] = True
|
||||||
# Make sure ISE support necessary features
|
# Make sure ISE support necessary features
|
||||||
if not support['clones']:
|
if not support['clones']:
|
||||||
msg = _LE("ISE FW version is not compatible with Openstack!")
|
LOG.error(_LE("ISE FW version is not compatible with Openstack!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# set up thin provisioning support
|
# set up thin provisioning support
|
||||||
self.configuration.san_thin_provision = support['thin-clones']
|
self.configuration.san_thin_provision = support['thin-clones']
|
||||||
# Fill in global id, primary and secondary ip addresses
|
# Fill in global id, primary and secondary ip addresses
|
||||||
globalid = xml_tree.find('globalid')
|
globalid = xml_tree.find('globalid')
|
||||||
if globalid is None:
|
if globalid is None:
|
||||||
msg = _LE("Array query failed. No global id in XML response!")
|
LOG.error(_LE("Array query failed. No global id in XML response!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
self.ise_globalid = globalid.text
|
self.ise_globalid = globalid.text
|
||||||
controllers = xml_tree.find('controllers')
|
controllers = xml_tree.find('controllers')
|
||||||
if controllers is None:
|
if controllers is None:
|
||||||
msg = _LE("Array query failed. No controllers in response!")
|
LOG.error(_LE("Array query failed. No controllers in response!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
for node in controllers:
|
for node in controllers:
|
||||||
if node.tag != 'controller':
|
if node.tag != 'controller':
|
||||||
|
@ -207,8 +199,7 @@ class XIOISEDriver(object):
|
||||||
# this call will populate globalid
|
# this call will populate globalid
|
||||||
self._send_query()
|
self._send_query()
|
||||||
if self.ise_globalid is None:
|
if self.ise_globalid is None:
|
||||||
msg = _LE("ISE globalid not set!")
|
LOG.error(_LE("ISE globalid not set!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return self.ise_globalid
|
return self.ise_globalid
|
||||||
|
|
||||||
|
@ -219,8 +210,7 @@ class XIOISEDriver(object):
|
||||||
self.ise_primary_ip = self.configuration.san_ip
|
self.ise_primary_ip = self.configuration.san_ip
|
||||||
if self.ise_primary_ip == '':
|
if self.ise_primary_ip == '':
|
||||||
# No IP - fatal.
|
# No IP - fatal.
|
||||||
msg = _LE("Primary IP must be set!")
|
LOG.error(_LE("Primary IP must be set!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return self.ise_primary_ip
|
return self.ise_primary_ip
|
||||||
|
|
||||||
|
@ -346,7 +336,8 @@ class XIOISEDriver(object):
|
||||||
def _call_loop(loop_args):
|
def _call_loop(loop_args):
|
||||||
remaining = loop_args['retries']
|
remaining = loop_args['retries']
|
||||||
args = loop_args['args']
|
args = loop_args['args']
|
||||||
LOG.debug("In call loop (%d) %s", remaining, args)
|
LOG.debug("In call loop (%(remaining)d) %(args)s",
|
||||||
|
{'remaining': remaining, 'args': args})
|
||||||
(remaining, response) = loop_args['func'](args, remaining)
|
(remaining, response) = loop_args['func'](args, remaining)
|
||||||
if remaining == 0:
|
if remaining == 0:
|
||||||
# We are done - let our caller handle response
|
# We are done - let our caller handle response
|
||||||
|
@ -383,7 +374,8 @@ class XIOISEDriver(object):
|
||||||
# successful, the request flag for a new QUERY will be set. The QUERY
|
# successful, the request flag for a new QUERY will be set. The QUERY
|
||||||
# will be sent on next connection attempt to figure out which
|
# will be sent on next connection attempt to figure out which
|
||||||
# controller is primary in case it has changed.
|
# controller is primary in case it has changed.
|
||||||
LOG.debug("Connect: %s %s %s", method, url, body)
|
LOG.debug("Connect: %(method)s %(url)s %(body)s",
|
||||||
|
{'method': method, 'url': url, 'body': body})
|
||||||
using_secondary = 0
|
using_secondary = 0
|
||||||
response = {}
|
response = {}
|
||||||
response['status'] = 0
|
response['status'] = 0
|
||||||
|
@ -414,9 +406,8 @@ class XIOISEDriver(object):
|
||||||
if secondary_ip is '':
|
if secondary_ip is '':
|
||||||
# if secondary is not setup yet, then assert
|
# if secondary is not setup yet, then assert
|
||||||
# connection on primary and secondary ip failed
|
# connection on primary and secondary ip failed
|
||||||
msg = (_LE("Connection to %s failed and no secondary!") %
|
LOG.error(_LE("Connection to %s failed and no secondary!"),
|
||||||
primary_ip)
|
primary_ip)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# swap primary for secondary ip in URL
|
# swap primary for secondary ip in URL
|
||||||
url = string.replace(url, primary_ip, secondary_ip)
|
url = string.replace(url, primary_ip, secondary_ip)
|
||||||
|
@ -426,9 +417,8 @@ class XIOISEDriver(object):
|
||||||
# connection failed on both IPs - break out of the loop
|
# connection failed on both IPs - break out of the loop
|
||||||
break
|
break
|
||||||
# connection on primary and secondary ip failed
|
# connection on primary and secondary ip failed
|
||||||
msg = (_LE("Could not connect to %(primary)s or %(secondary)s!") %
|
LOG.error(_LE("Could not connect to %(primary)s or %(secondary)s!"),
|
||||||
{'primary': primary_ip, 'secondary': secondary_ip})
|
{'primary': primary_ip, 'secondary': secondary_ip})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
|
|
||||||
def _param_string(self, params):
|
def _param_string(self, params):
|
||||||
|
@ -470,8 +460,7 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('GET', url, {})
|
resp = self._send_cmd('GET', url, {})
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
msg = _LW("IOnetworks GET failed (%d)") % status
|
LOG.warning(_LW("IOnetworks GET failed (%d)"), status)
|
||||||
LOG.warning(msg)
|
|
||||||
return chap
|
return chap
|
||||||
# Got a good response. Parse out CHAP info. First check if CHAP is
|
# Got a good response. Parse out CHAP info. First check if CHAP is
|
||||||
# enabled and if so parse out username and password.
|
# enabled and if so parse out username and password.
|
||||||
|
@ -501,8 +490,7 @@ class XIOISEDriver(object):
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
# Not good. Throw an exception.
|
# Not good. Throw an exception.
|
||||||
msg = _LE("Controller GET failed (%d)") % status
|
LOG.error(_LE("Controller GET failed (%d)"), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Parse out IQN that matches iscsi_ip_address
|
# Good response. Parse out IQN that matches iscsi_ip_address
|
||||||
# passed in from cinder.conf. IQN is 'hidden' in globalid field.
|
# passed in from cinder.conf. IQN is 'hidden' in globalid field.
|
||||||
|
@ -527,8 +515,7 @@ class XIOISEDriver(object):
|
||||||
if target_iqn != '':
|
if target_iqn != '':
|
||||||
return target_iqn
|
return target_iqn
|
||||||
# Did not find a matching IQN. Upsetting.
|
# Did not find a matching IQN. Upsetting.
|
||||||
msg = _LE("Failed to get IQN!")
|
LOG.error(_LE("Failed to get IQN!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
|
|
||||||
def find_target_wwns(self):
|
def find_target_wwns(self):
|
||||||
|
@ -541,8 +528,7 @@ class XIOISEDriver(object):
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
# Not good. Throw an exception.
|
# Not good. Throw an exception.
|
||||||
msg = _LE("Controller GET failed (%d)") % status
|
LOG.error(_LE("Controller GET failed (%d)"), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Parse out globalid (WWN) of endpoint that matches
|
# Good response. Parse out globalid (WWN) of endpoint that matches
|
||||||
# protocol and type (array).
|
# protocol and type (array).
|
||||||
|
@ -569,8 +555,8 @@ class XIOISEDriver(object):
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
# Not good. Throw an exception.
|
# Not good. Throw an exception.
|
||||||
msg = _LE("Failed to get allocation information (%d)!") % status
|
LOG.error(_LE("Failed to get allocation information (%d)!"),
|
||||||
LOG.error(msg)
|
status)
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Parse out LUN.
|
# Good response. Parse out LUN.
|
||||||
xml_tree = etree.fromstring(resp['content'])
|
xml_tree = etree.fromstring(resp['content'])
|
||||||
|
@ -580,8 +566,7 @@ class XIOISEDriver(object):
|
||||||
if luntag is not None:
|
if luntag is not None:
|
||||||
return luntag.text
|
return luntag.text
|
||||||
# Did not find LUN. Throw an exception.
|
# Did not find LUN. Throw an exception.
|
||||||
msg = _LE("Failed to get LUN information!")
|
LOG.error(_LE("Failed to get LUN information!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
|
|
||||||
def _get_volume_info(self, vol_name):
|
def _get_volume_info(self, vol_name):
|
||||||
|
@ -600,24 +585,21 @@ class XIOISEDriver(object):
|
||||||
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
|
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
|
||||||
resp = self._send_cmd('GET', url, {'name': vol_name})
|
resp = self._send_cmd('GET', url, {'name': vol_name})
|
||||||
if resp['status'] != 200:
|
if resp['status'] != 200:
|
||||||
msg = (_LW("Could not get status for %(name)s (%(status)d).") %
|
LOG.warning(_LW("Could not get status for %(name)s (%(status)d)."),
|
||||||
{'name': vol_name, 'status': resp['status']})
|
{'name': vol_name, 'status': resp['status']})
|
||||||
LOG.warning(msg)
|
|
||||||
return vol_info
|
return vol_info
|
||||||
# Good response. Parse down to Volume tag in list of one.
|
# Good response. Parse down to Volume tag in list of one.
|
||||||
root = etree.fromstring(resp['content'])
|
root = etree.fromstring(resp['content'])
|
||||||
volume_node = root.find('volume')
|
volume_node = root.find('volume')
|
||||||
if volume_node is None:
|
if volume_node is None:
|
||||||
msg = _LW("No volume node in XML content.")
|
LOG.warning(_LW("No volume node in XML content."))
|
||||||
LOG.warning(msg)
|
|
||||||
return vol_info
|
return vol_info
|
||||||
# Location can be found as an attribute in the volume node tag.
|
# Location can be found as an attribute in the volume node tag.
|
||||||
vol_info['location'] = volume_node.attrib['self']
|
vol_info['location'] = volume_node.attrib['self']
|
||||||
# Find status tag
|
# Find status tag
|
||||||
status = volume_node.find('status')
|
status = volume_node.find('status')
|
||||||
if status is None:
|
if status is None:
|
||||||
msg = _LW("No status payload for volume %s.") % vol_name
|
LOG.warning(_LW("No status payload for volume %s."), vol_name)
|
||||||
LOG.warning(msg)
|
|
||||||
return vol_info
|
return vol_info
|
||||||
# Fill in value and string from status tag attributes.
|
# Fill in value and string from status tag attributes.
|
||||||
vol_info['value'] = status.attrib['value']
|
vol_info['value'] = status.attrib['value']
|
||||||
|
@ -642,9 +624,8 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('GET', url, {'name': volume['name'],
|
resp = self._send_cmd('GET', url, {'name': volume['name'],
|
||||||
'hostname': hostname})
|
'hostname': hostname})
|
||||||
if resp['status'] != 200:
|
if resp['status'] != 200:
|
||||||
msg = (_LE("Could not GET allocation information (%d)!") %
|
LOG.error(_LE("Could not GET allocation information (%d)!"),
|
||||||
resp['status'])
|
resp['status'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Find the allocation based on volume name.
|
# Good response. Find the allocation based on volume name.
|
||||||
allocation_tree = etree.fromstring(resp['content'])
|
allocation_tree = etree.fromstring(resp['content'])
|
||||||
|
@ -706,13 +687,11 @@ class XIOISEDriver(object):
|
||||||
if status == 201:
|
if status == 201:
|
||||||
LOG.info(_LI("Volume %s presented."), volume['name'])
|
LOG.info(_LI("Volume %s presented."), volume['name'])
|
||||||
elif status == 409:
|
elif status == 409:
|
||||||
msg = (_LW("Volume %(name)s already presented (%(status)d)!") %
|
LOG.warning(_LW("Volume %(name)s already presented (%(status)d)!"),
|
||||||
{'name': volume['name'], 'status': status})
|
{'name': volume['name'], 'status': status})
|
||||||
LOG.warning(msg)
|
|
||||||
else:
|
else:
|
||||||
msg = (_LE("Failed to present volume %(name)s (%(status)d)!") %
|
LOG.error(_LE("Failed to present volume %(name)s (%(status)d)!"),
|
||||||
{'name': volume['name'], 'status': status})
|
{'name': volume['name'], 'status': status})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Fetch LUN. In theory the LUN should be what caller requested.
|
# Fetch LUN. In theory the LUN should be what caller requested.
|
||||||
# We try to use shortcut as location comes back in Location header.
|
# We try to use shortcut as location comes back in Location header.
|
||||||
|
@ -725,8 +704,9 @@ class XIOISEDriver(object):
|
||||||
if location != '':
|
if location != '':
|
||||||
target_lun = self._find_target_lun(location)
|
target_lun = self._find_target_lun(location)
|
||||||
# Success. Return target LUN.
|
# Success. Return target LUN.
|
||||||
LOG.debug("Volume %s presented: %s %s",
|
LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s",
|
||||||
volume['name'], hostname, target_lun)
|
{'volume': volume['name'], 'host': hostname,
|
||||||
|
'lun': target_lun})
|
||||||
return target_lun
|
return target_lun
|
||||||
|
|
||||||
def find_allocations(self, hostname):
|
def find_allocations(self, hostname):
|
||||||
|
@ -736,10 +716,9 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('GET', url, {'hostname': hostname})
|
resp = self._send_cmd('GET', url, {'hostname': hostname})
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
msg = (_LE("Failed to get allocation information: "
|
LOG.error(_LE("Failed to get allocation information: "
|
||||||
"%(host)s (%(status)d)!") %
|
"%(host)s (%(status)d)!"),
|
||||||
{'host': hostname, 'status': status})
|
{'host': hostname, 'status': status})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Count the number of allocations.
|
# Good response. Count the number of allocations.
|
||||||
allocation_tree = etree.fromstring(resp['content'])
|
allocation_tree = etree.fromstring(resp['content'])
|
||||||
|
@ -771,8 +750,7 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('GET', url, params)
|
resp = self._send_cmd('GET', url, params)
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if resp['status'] != 200:
|
if resp['status'] != 200:
|
||||||
msg = _LE("Could not find any hosts (%s)") % status
|
LOG.error(_LE("Could not find any hosts (%s)"), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Try to match up a host based on end point string.
|
# Good response. Try to match up a host based on end point string.
|
||||||
host_tree = etree.fromstring(resp['content'])
|
host_tree = etree.fromstring(resp['content'])
|
||||||
|
@ -820,7 +798,8 @@ class XIOISEDriver(object):
|
||||||
else:
|
else:
|
||||||
endpoint_str = endpoints
|
endpoint_str = endpoints
|
||||||
# Log host creation.
|
# Log host creation.
|
||||||
LOG.debug("Create host %s; %s", hostname, endpoint_str)
|
LOG.debug("Create host %(host)s; %(endpoint)s",
|
||||||
|
{'host': hostname, 'endpoint': endpoint_str})
|
||||||
# Issue REST call to create host entry of Openstack type.
|
# Issue REST call to create host entry of Openstack type.
|
||||||
params = {}
|
params = {}
|
||||||
params = {'name': hostname, 'endpoint': endpoint_str,
|
params = {'name': hostname, 'endpoint': endpoint_str,
|
||||||
|
@ -829,8 +808,7 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('POST', url, params)
|
resp = self._send_cmd('POST', url, params)
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 201 and status != 409:
|
if status != 201 and status != 409:
|
||||||
msg = _LE("POST for host create failed (%s)!") % status
|
LOG.error(_LE("POST for host create failed (%s)!"), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Successfully created host entry. Return host name.
|
# Successfully created host entry. Return host name.
|
||||||
return hostname
|
return hostname
|
||||||
|
@ -857,8 +835,7 @@ class XIOISEDriver(object):
|
||||||
if vol_info['value'] == '0':
|
if vol_info['value'] == '0':
|
||||||
LOG.debug('Source volume %s ready.', volume_name)
|
LOG.debug('Source volume %s ready.', volume_name)
|
||||||
else:
|
else:
|
||||||
msg = _LE("Source volume %s not ready!") % volume_name
|
LOG.error(_LE("Source volume %s not ready!"), volume_name)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Prepare snapshot
|
# Prepare snapshot
|
||||||
# get extra_specs and qos specs from source volume
|
# get extra_specs and qos specs from source volume
|
||||||
|
@ -866,7 +843,8 @@ class XIOISEDriver(object):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
type_id = volume['volume_type_id']
|
type_id = volume['volume_type_id']
|
||||||
extra_specs = self._get_extra_specs(ctxt, type_id)
|
extra_specs = self._get_extra_specs(ctxt, type_id)
|
||||||
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
|
LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
|
||||||
|
{'volume_name': volume['name'], 'extra_specs': extra_specs})
|
||||||
qos = self._get_qos_specs(ctxt, type_id)
|
qos = self._get_qos_specs(ctxt, type_id)
|
||||||
# Wait until snapshot/clone is prepared.
|
# Wait until snapshot/clone is prepared.
|
||||||
args['method'] = 'POST'
|
args['method'] = 'POST'
|
||||||
|
@ -883,8 +861,7 @@ class XIOISEDriver(object):
|
||||||
args, retries)
|
args, retries)
|
||||||
if resp['status'] != 202:
|
if resp['status'] != 202:
|
||||||
# clone prepare failed - bummer
|
# clone prepare failed - bummer
|
||||||
msg = _LE("Prepare clone failed for %s.") % clone['name']
|
LOG.error(_LE("Prepare clone failed for %s."), clone['name'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# clone prepare request accepted
|
# clone prepare request accepted
|
||||||
# make sure not to continue until clone prepared
|
# make sure not to continue until clone prepared
|
||||||
|
@ -896,16 +873,14 @@ class XIOISEDriver(object):
|
||||||
if PREPARED_STATUS in clone_info['details']:
|
if PREPARED_STATUS in clone_info['details']:
|
||||||
LOG.debug('Clone %s prepared.', clone['name'])
|
LOG.debug('Clone %s prepared.', clone['name'])
|
||||||
else:
|
else:
|
||||||
msg = (_LE("Clone %s not in prepared state!") % clone['name'])
|
LOG.error(_LE("Clone %s not in prepared state!"), clone['name'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Clone prepared, now commit the create
|
# Clone prepared, now commit the create
|
||||||
resp = self._send_cmd('PUT', clone_info['location'],
|
resp = self._send_cmd('PUT', clone_info['location'],
|
||||||
{clone_type: 'true'})
|
{clone_type: 'true'})
|
||||||
if resp['status'] != 201:
|
if resp['status'] != 201:
|
||||||
msg = (_LE("Commit clone failed: %(name)s (%(status)d)!") %
|
LOG.error(_LE("Commit clone failed: %(name)s (%(status)d)!"),
|
||||||
{'name': clone['name'], 'status': resp['status']})
|
{'name': clone['name'], 'status': resp['status']})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Clone create request accepted. Make sure not to return until clone
|
# Clone create request accepted. Make sure not to return until clone
|
||||||
# operational.
|
# operational.
|
||||||
|
@ -915,11 +890,9 @@ class XIOISEDriver(object):
|
||||||
clone_info = self._wait_for_completion(self._help_wait_for_status,
|
clone_info = self._wait_for_completion(self._help_wait_for_status,
|
||||||
args, retries)
|
args, retries)
|
||||||
if OPERATIONAL_STATUS in clone_info['string']:
|
if OPERATIONAL_STATUS in clone_info['string']:
|
||||||
msg = _LI("Clone %s created."), clone['name']
|
LOG.info(_LI("Clone %s created."), clone['name'])
|
||||||
LOG.info(msg)
|
|
||||||
else:
|
else:
|
||||||
msg = _LE("Commit failed for %s!") % clone['name']
|
LOG.error(_LE("Commit failed for %s!"), clone['name'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -983,8 +956,7 @@ class XIOISEDriver(object):
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 200:
|
if status != 200:
|
||||||
# Request failed. Return what we have, which isn't much.
|
# Request failed. Return what we have, which isn't much.
|
||||||
msg = _LW("Could not get pool information (%s)!") % status
|
LOG.warning(_LW("Could not get pool information (%s)!"), status)
|
||||||
LOG.warning(msg)
|
|
||||||
return (pools, vol_cnt)
|
return (pools, vol_cnt)
|
||||||
# Parse out available (free) and used. Add them up to get total.
|
# Parse out available (free) and used. Add them up to get total.
|
||||||
xml_tree = etree.fromstring(resp['content'])
|
xml_tree = etree.fromstring(resp['content'])
|
||||||
|
@ -1085,9 +1057,9 @@ class XIOISEDriver(object):
|
||||||
"""Get volume stats."""
|
"""Get volume stats."""
|
||||||
if refresh:
|
if refresh:
|
||||||
self._vol_stats = self._update_volume_stats()
|
self._vol_stats = self._update_volume_stats()
|
||||||
LOG.debug("ISE get_volume_stats (total, free): %s, %s",
|
LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s",
|
||||||
self._vol_stats['total_capacity_gb'],
|
{'total': self._vol_stats['total_capacity_gb'],
|
||||||
self._vol_stats['free_capacity_gb'])
|
'free': self._vol_stats['free_capacity_gb']})
|
||||||
return self._vol_stats
|
return self._vol_stats
|
||||||
|
|
||||||
def _get_extra_specs(self, ctxt, type_id):
|
def _get_extra_specs(self, ctxt, type_id):
|
||||||
|
@ -1158,7 +1130,8 @@ class XIOISEDriver(object):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
type_id = volume['volume_type_id']
|
type_id = volume['volume_type_id']
|
||||||
extra_specs = self._get_extra_specs(ctxt, type_id)
|
extra_specs = self._get_extra_specs(ctxt, type_id)
|
||||||
LOG.debug("Volume %s extra_specs %s", volume['name'], extra_specs)
|
LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s",
|
||||||
|
{'volume_name': volume['name'], 'extra_specs': extra_specs})
|
||||||
qos = self._get_qos_specs(ctxt, type_id)
|
qos = self._get_qos_specs(ctxt, type_id)
|
||||||
# Make create call
|
# Make create call
|
||||||
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
|
url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid())
|
||||||
|
@ -1173,9 +1146,8 @@ class XIOISEDriver(object):
|
||||||
'IOPSmax': qos['maxIOPS'],
|
'IOPSmax': qos['maxIOPS'],
|
||||||
'IOPSburst': qos['burstIOPS']})
|
'IOPSburst': qos['burstIOPS']})
|
||||||
if resp['status'] != 201:
|
if resp['status'] != 201:
|
||||||
msg = (_LE("Failed to create volume: %(name)s (%(status)s)") %
|
LOG.error(_LE("Failed to create volume: %(name)s (%(status)s)"),
|
||||||
{'name': volume['name'], 'status': resp['status']})
|
{'name': volume['name'], 'status': resp['status']})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Good response. Make sure volume is in operational state before
|
# Good response. Make sure volume is in operational state before
|
||||||
# returning. Volume creation completes asynchronously.
|
# returning. Volume creation completes asynchronously.
|
||||||
|
@ -1187,11 +1159,9 @@ class XIOISEDriver(object):
|
||||||
args, retries)
|
args, retries)
|
||||||
if OPERATIONAL_STATUS in vol_info['string']:
|
if OPERATIONAL_STATUS in vol_info['string']:
|
||||||
# Ready.
|
# Ready.
|
||||||
msg = _LI("Volume %s created"), volume['name']
|
LOG.info(_LI("Volume %s created"), volume['name'])
|
||||||
LOG.info(msg)
|
|
||||||
else:
|
else:
|
||||||
msg = _LE("Failed to create volume %s.") % volume['name']
|
LOG.error(_LE("Failed to create volume %s."), volume['name'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1223,8 +1193,7 @@ class XIOISEDriver(object):
|
||||||
# in response. Used for DELETE call below.
|
# in response. Used for DELETE call below.
|
||||||
vol_info = self._get_volume_info(volume['name'])
|
vol_info = self._get_volume_info(volume['name'])
|
||||||
if vol_info['location'] == '':
|
if vol_info['location'] == '':
|
||||||
msg = _LW("Delete volume: %s not found!") % volume['name']
|
LOG.warning(_LW("Delete volume: %s not found!"), volume['name'])
|
||||||
LOG.warning(msg)
|
|
||||||
return
|
return
|
||||||
# Make DELETE call.
|
# Make DELETE call.
|
||||||
args = {}
|
args = {}
|
||||||
|
@ -1235,8 +1204,7 @@ class XIOISEDriver(object):
|
||||||
retries = self.configuration.ise_completion_retries
|
retries = self.configuration.ise_completion_retries
|
||||||
resp = self._wait_for_completion(self._help_call_method, args, retries)
|
resp = self._wait_for_completion(self._help_call_method, args, retries)
|
||||||
if resp['status'] == 204:
|
if resp['status'] == 204:
|
||||||
msg = (_LI("Volume %s deleted."), volume['name'])
|
LOG.info(_LI("Volume %s deleted."), volume['name'])
|
||||||
LOG.info(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
|
@ -1255,8 +1223,7 @@ class XIOISEDriver(object):
|
||||||
# in response. Used for PUT call below.
|
# in response. Used for PUT call below.
|
||||||
vol_info = self._get_volume_info(volume['name'])
|
vol_info = self._get_volume_info(volume['name'])
|
||||||
if vol_info['location'] == '':
|
if vol_info['location'] == '':
|
||||||
msg = _LE("modify volume: %s does not exist!") % volume['name']
|
LOG.error(_LE("modify volume: %s does not exist!"), volume['name'])
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Make modify volume REST call using PUT.
|
# Make modify volume REST call using PUT.
|
||||||
# Location from above is used as identifier.
|
# Location from above is used as identifier.
|
||||||
|
@ -1265,9 +1232,8 @@ class XIOISEDriver(object):
|
||||||
if status == 201:
|
if status == 201:
|
||||||
LOG.debug("Volume %s modified.", volume['name'])
|
LOG.debug("Volume %s modified.", volume['name'])
|
||||||
return True
|
return True
|
||||||
msg = (_LE("Modify volume PUT failed: %(name)s (%(status)d).") %
|
LOG.error(_LE("Modify volume PUT failed: %(name)s (%(status)d)."),
|
||||||
{'name': volume['name'], 'status': status})
|
{'name': volume['name'], 'status': status})
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
|
@ -1275,9 +1241,8 @@ class XIOISEDriver(object):
|
||||||
LOG.debug("extend_volume called")
|
LOG.debug("extend_volume called")
|
||||||
ret = self._modify_volume(volume, {'size': new_size})
|
ret = self._modify_volume(volume, {'size': new_size})
|
||||||
if ret is True:
|
if ret is True:
|
||||||
msg = (_LI("volume %(name)s extended to %(size)d."),
|
LOG.info(_LI("volume %(name)s extended to %(size)d."),
|
||||||
{'name': volume['name'], 'size': new_size})
|
{'name': volume['name'], 'size': new_size})
|
||||||
LOG.info(msg)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
def retype(self, ctxt, volume, new_type, diff, host):
|
def retype(self, ctxt, volume, new_type, diff, host):
|
||||||
|
@ -1288,16 +1253,14 @@ class XIOISEDriver(object):
|
||||||
'IOPSmax': qos['maxIOPS'],
|
'IOPSmax': qos['maxIOPS'],
|
||||||
'IOPSburst': qos['burstIOPS']})
|
'IOPSburst': qos['burstIOPS']})
|
||||||
if ret is True:
|
if ret is True:
|
||||||
msg = _LI("Volume %s retyped."), volume['name']
|
LOG.info(_LI("Volume %s retyped."), volume['name'])
|
||||||
LOG.info(msg)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def manage_existing(self, volume, ise_volume_ref):
|
def manage_existing(self, volume, ise_volume_ref):
|
||||||
"""Convert an existing ISE volume to a Cinder volume."""
|
"""Convert an existing ISE volume to a Cinder volume."""
|
||||||
LOG.debug("X-IO manage_existing called")
|
LOG.debug("X-IO manage_existing called")
|
||||||
if 'source-name' not in ise_volume_ref:
|
if 'source-name' not in ise_volume_ref:
|
||||||
msg = _LE("manage_existing: No source-name in ref!")
|
LOG.error(_LE("manage_existing: No source-name in ref!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# copy the source-name to 'name' for modify volume use
|
# copy the source-name to 'name' for modify volume use
|
||||||
ise_volume_ref['name'] = ise_volume_ref['source-name']
|
ise_volume_ref['name'] = ise_volume_ref['source-name']
|
||||||
|
@ -1309,24 +1272,21 @@ class XIOISEDriver(object):
|
||||||
'IOPSmax': qos['maxIOPS'],
|
'IOPSmax': qos['maxIOPS'],
|
||||||
'IOPSburst': qos['burstIOPS']})
|
'IOPSburst': qos['burstIOPS']})
|
||||||
if ret is True:
|
if ret is True:
|
||||||
msg = _LI("Volume %s converted."), ise_volume_ref['name']
|
LOG.info(_LI("Volume %s converted."), ise_volume_ref['name'])
|
||||||
LOG.info(msg)
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def manage_existing_get_size(self, volume, ise_volume_ref):
|
def manage_existing_get_size(self, volume, ise_volume_ref):
|
||||||
"""Get size of an existing ISE volume."""
|
"""Get size of an existing ISE volume."""
|
||||||
LOG.debug("X-IO manage_existing_get_size called")
|
LOG.debug("X-IO manage_existing_get_size called")
|
||||||
if 'source-name' not in ise_volume_ref:
|
if 'source-name' not in ise_volume_ref:
|
||||||
msg = _LE("manage_existing_get_size: No source-name in ref!")
|
LOG.error(_LE("manage_existing_get_size: No source-name in ref!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
ref_name = ise_volume_ref['source-name']
|
ref_name = ise_volume_ref['source-name']
|
||||||
# get volume status including size
|
# get volume status including size
|
||||||
vol_info = self._get_volume_info(ref_name)
|
vol_info = self._get_volume_info(ref_name)
|
||||||
if vol_info['location'] == '':
|
if vol_info['location'] == '':
|
||||||
msg = (_LE("manage_existing_get_size: %s does not exist!") %
|
LOG.error(_LE("manage_existing_get_size: %s does not exist!"),
|
||||||
ref_name)
|
ref_name)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
return int(vol_info['size'])
|
return int(vol_info['size'])
|
||||||
|
|
||||||
|
@ -1335,8 +1295,8 @@ class XIOISEDriver(object):
|
||||||
LOG.debug("X-IO unmanage called")
|
LOG.debug("X-IO unmanage called")
|
||||||
vol_info = self._get_volume_info(volume['name'])
|
vol_info = self._get_volume_info(volume['name'])
|
||||||
if vol_info['location'] == '':
|
if vol_info['location'] == '':
|
||||||
msg = _LE("unmanage: Volume %s does not exist!") % volume['name']
|
LOG.error(_LE("unmanage: Volume %s does not exist!"),
|
||||||
LOG.error(msg)
|
volume['name'])
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# This is a noop. ISE does not store any Cinder specific information.
|
# This is a noop. ISE does not store any Cinder specific information.
|
||||||
|
|
||||||
|
@ -1354,8 +1314,7 @@ class XIOISEDriver(object):
|
||||||
host = self._find_host(endpoints)
|
host = self._find_host(endpoints)
|
||||||
if host['name'] == '':
|
if host['name'] == '':
|
||||||
# host still not found, this is fatal.
|
# host still not found, this is fatal.
|
||||||
msg = _LE("Host could not be found!")
|
LOG.error(_LE("Host could not be found!"))
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
elif string.upper(host['type']) != 'OPENSTACK':
|
elif string.upper(host['type']) != 'OPENSTACK':
|
||||||
# Make sure host type is marked as Openstack host
|
# Make sure host type is marked as Openstack host
|
||||||
|
@ -1363,8 +1322,7 @@ class XIOISEDriver(object):
|
||||||
resp = self._send_cmd('PUT', host['locator'], params)
|
resp = self._send_cmd('PUT', host['locator'], params)
|
||||||
status = resp['status']
|
status = resp['status']
|
||||||
if status != 201 and status != 409:
|
if status != 201 and status != 409:
|
||||||
msg = _LE("Host PUT failed (%s).") % status
|
LOG.error(_LE("Host PUT failed (%s)."), status)
|
||||||
LOG.error(msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# We have a host object.
|
# We have a host object.
|
||||||
target_lun = ''
|
target_lun = ''
|
||||||
|
@ -1422,8 +1380,7 @@ class XIOISEISCSIDriver(driver.ISCSIDriver):
|
||||||
|
|
||||||
# The iscsi_ip_address must always be set.
|
# The iscsi_ip_address must always be set.
|
||||||
if self.configuration.iscsi_ip_address == '':
|
if self.configuration.iscsi_ip_address == '':
|
||||||
err_msg = _LE("iscsi_ip_address must be set!")
|
LOG.error(_LE("iscsi_ip_address must be set!"))
|
||||||
LOG.error(err_msg)
|
|
||||||
RaiseXIODriverException()
|
RaiseXIODriverException()
|
||||||
# Setup common driver
|
# Setup common driver
|
||||||
self.driver = XIOISEDriver(configuration=self.configuration)
|
self.driver = XIOISEDriver(configuration=self.configuration)
|
||||||
|
|
|
@ -92,8 +92,8 @@ class RestResult(object):
|
||||||
self.status = self.error.code
|
self.status = self.error.code
|
||||||
self.data = httplib.responses[self.status]
|
self.data = httplib.responses[self.status]
|
||||||
|
|
||||||
LOG.debug('Response code: %s' % self.status)
|
LOG.debug('Response code: %s', self.status)
|
||||||
LOG.debug('Response data: %s' % self.data)
|
LOG.debug('Response data: %s', self.data)
|
||||||
|
|
||||||
def get_header(self, name):
|
def get_header(self, name):
|
||||||
"""Get an HTTP header with the given name from the results
|
"""Get an HTTP header with the given name from the results
|
||||||
|
@ -177,7 +177,7 @@ class RestClientURL(object):
|
||||||
self.headers['x-auth-session'] = \
|
self.headers['x-auth-session'] = \
|
||||||
result.get_header('x-auth-session')
|
result.get_header('x-auth-session')
|
||||||
self.do_logout = True
|
self.do_logout = True
|
||||||
LOG.info(_LI('ZFSSA version: %s') %
|
LOG.info(_LI('ZFSSA version: %s'),
|
||||||
result.get_header('x-zfssa-version'))
|
result.get_header('x-zfssa-version'))
|
||||||
|
|
||||||
elif result.status == httplib.NOT_FOUND:
|
elif result.status == httplib.NOT_FOUND:
|
||||||
|
@ -268,35 +268,33 @@ class RestClientURL(object):
|
||||||
retry = 0
|
retry = 0
|
||||||
response = None
|
response = None
|
||||||
|
|
||||||
LOG.debug('Request: %s %s' % (request, zfssaurl))
|
LOG.debug('Request: %s %s', (request, zfssaurl))
|
||||||
LOG.debug('Out headers: %s' % out_hdrs)
|
LOG.debug('Out headers: %s', out_hdrs)
|
||||||
if body and body != '':
|
if body and body != '':
|
||||||
LOG.debug('Body: %s' % body)
|
LOG.debug('Body: %s', body)
|
||||||
|
|
||||||
while retry < maxreqretries:
|
while retry < maxreqretries:
|
||||||
try:
|
try:
|
||||||
response = urllib2.urlopen(req, timeout=self.timeout)
|
response = urllib2.urlopen(req, timeout=self.timeout)
|
||||||
except urllib2.HTTPError as err:
|
except urllib2.HTTPError as err:
|
||||||
if err.code == httplib.NOT_FOUND:
|
if err.code == httplib.NOT_FOUND:
|
||||||
LOG.debug('REST Not Found: %s' % err.code)
|
LOG.debug('REST Not Found: %s', err.code)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('REST Not Available: %s') % err.code)
|
LOG.error(_LE('REST Not Available: %s'), err.code)
|
||||||
|
|
||||||
if err.code == httplib.SERVICE_UNAVAILABLE and \
|
if err.code == httplib.SERVICE_UNAVAILABLE and \
|
||||||
retry < maxreqretries:
|
retry < maxreqretries:
|
||||||
retry += 1
|
retry += 1
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
LOG.error(_LE('Server Busy retry request: %s') % retry)
|
LOG.error(_LE('Server Busy retry request: %s'), retry)
|
||||||
continue
|
continue
|
||||||
if (err.code == httplib.UNAUTHORIZED or
|
if (err.code == httplib.UNAUTHORIZED or
|
||||||
err.code == httplib.INTERNAL_SERVER_ERROR) and \
|
err.code == httplib.INTERNAL_SERVER_ERROR) and \
|
||||||
'/access/v1' not in zfssaurl:
|
'/access/v1' not in zfssaurl:
|
||||||
try:
|
try:
|
||||||
LOG.error(_LE('Authorizing request: '
|
LOG.error(_LE('Authorizing request: %(zfssaurl)s '
|
||||||
'%(zfssaurl)s'
|
'retry: %(retry)d .'),
|
||||||
'retry: %(retry)d .')
|
{'zfssaurl': zfssaurl, 'retry': retry})
|
||||||
% {'zfssaurl': zfssaurl,
|
|
||||||
'retry': retry})
|
|
||||||
self._authorize()
|
self._authorize()
|
||||||
req.add_header('x-auth-session',
|
req.add_header('x-auth-session',
|
||||||
self.headers['x-auth-session'])
|
self.headers['x-auth-session'])
|
||||||
|
@ -309,7 +307,7 @@ class RestClientURL(object):
|
||||||
return RestResult(err=err)
|
return RestResult(err=err)
|
||||||
|
|
||||||
except urllib2.URLError as err:
|
except urllib2.URLError as err:
|
||||||
LOG.error(_LE('URLError: %s') % err.reason)
|
LOG.error(_LE('URLError: %s'), err.reason)
|
||||||
raise RestClientError(-1, name="ERR_URLError",
|
raise RestClientError(-1, name="ERR_URLError",
|
||||||
message=err.reason)
|
message=err.reason)
|
||||||
|
|
||||||
|
|
|
@ -81,27 +81,24 @@ class ZFSSAWebDAVClient(object):
|
||||||
|
|
||||||
request.get_method = lambda: method
|
request.get_method = lambda: method
|
||||||
|
|
||||||
LOG.debug('Sending WebDAV request:%s %s %s' % (method, src_url,
|
LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
|
||||||
dst_url))
|
{'method': method, 'src': src_url, 'des': dst_url})
|
||||||
|
|
||||||
while retry < maxretries:
|
while retry < maxretries:
|
||||||
try:
|
try:
|
||||||
response = urllib2.urlopen(request, timeout=None)
|
response = urllib2.urlopen(request, timeout=None)
|
||||||
except urllib2.HTTPError as err:
|
except urllib2.HTTPError as err:
|
||||||
LOG.error(_LE('WebDAV returned with %(code)s error during '
|
LOG.error(_LE('WebDAV returned with %(code)s error during '
|
||||||
'%(method)s call.')
|
'%(method)s call.'),
|
||||||
% {'code': err.code,
|
{'code': err.code, 'method': method})
|
||||||
'method': method})
|
|
||||||
|
|
||||||
if err.code == httplib.INTERNAL_SERVER_ERROR:
|
if err.code == httplib.INTERNAL_SERVER_ERROR:
|
||||||
exception_msg = (_('WebDAV operation failed with '
|
LOG.error(_LE('WebDAV operation failed with error code: '
|
||||||
'error code: %(code)s '
|
'%(code)s reason: %(reason)s Retry attempt '
|
||||||
'reason: %(reason)s '
|
'%(retry)s in progress.'),
|
||||||
'Retry attempt %(retry)s in progress.')
|
{'code': err.code,
|
||||||
% {'code': err.code,
|
'reason': err.reason,
|
||||||
'reason': err.reason,
|
'retry': retry})
|
||||||
'retry': retry})
|
|
||||||
LOG.error(exception_msg)
|
|
||||||
if retry < maxretries:
|
if retry < maxretries:
|
||||||
retry += 1
|
retry += 1
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
|
@ -146,8 +146,8 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||||
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
|
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
|
||||||
'/shares' + mountpoint
|
'/shares' + mountpoint
|
||||||
|
|
||||||
LOG.debug('NFS mount path: %s' % self.mount_path)
|
LOG.debug('NFS mount path: %s', self.mount_path)
|
||||||
LOG.debug('WebDAV path to the share: %s' % https_path)
|
LOG.debug('WebDAV path to the share: %s', https_path)
|
||||||
|
|
||||||
self.shares = {}
|
self.shares = {}
|
||||||
mnt_opts = self.configuration.zfssa_nfs_mount_options
|
mnt_opts = self.configuration.zfssa_nfs_mount_options
|
||||||
|
@ -167,10 +167,10 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||||
try:
|
try:
|
||||||
self._ensure_share_mounted(self.mount_path)
|
self._ensure_share_mounted(self.mount_path)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('Exception during mounting %s.') % exc)
|
LOG.error(_LE('Exception during mounting %s.'), exc)
|
||||||
|
|
||||||
self._mounted_shares = [self.mount_path]
|
self._mounted_shares = [self.mount_path]
|
||||||
LOG.debug('Available shares %s' % self._mounted_shares)
|
LOG.debug('Available shares %s', self._mounted_shares)
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
"""Check that driver can login.
|
"""Check that driver can login.
|
||||||
|
@ -203,7 +203,7 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||||
snapshot['name'])
|
snapshot['name'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.debug('Error thrown during snapshot: %s creation' %
|
LOG.debug('Error thrown during snapshot: %s creation',
|
||||||
snapshot['name'])
|
snapshot['name'])
|
||||||
finally:
|
finally:
|
||||||
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
|
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
|
||||||
|
@ -232,17 +232,15 @@ class ZFSSANFSDriver(nfs.NfsDriver):
|
||||||
self.extend_volume(volume, volume['size'])
|
self.extend_volume(volume, volume['size'])
|
||||||
except Exception:
|
except Exception:
|
||||||
vol_path = self.local_path(volume)
|
vol_path = self.local_path(volume)
|
||||||
exception_msg = (_('Error in extending volume size: '
|
|
||||||
'Volume: %(volume)s '
|
|
||||||
'Vol_Size: %(vol_size)d with '
|
|
||||||
'Snapshot: %(snapshot)s '
|
|
||||||
'Snap_Size: %(snap_size)d')
|
|
||||||
% {'volume': volume['name'],
|
|
||||||
'vol_size': volume['size'],
|
|
||||||
'snapshot': snapshot['name'],
|
|
||||||
'snap_size': snapshot['volume_size']})
|
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(exception_msg)
|
LOG.error(_LE('Error in extending volume size: Volume: '
|
||||||
|
'%(volume)s Vol_Size: %(vol_size)d with '
|
||||||
|
'Snapshot: %(snapshot)s Snap_Size: '
|
||||||
|
'%(snap_size)d'),
|
||||||
|
{'volume': volume['name'],
|
||||||
|
'vol_size': volume['size'],
|
||||||
|
'snapshot': snapshot['name'],
|
||||||
|
'snap_size': snapshot['volume_size']})
|
||||||
self._execute('rm', '-f', vol_path, run_as_root=True)
|
self._execute('rm', '-f', vol_path, run_as_root=True)
|
||||||
|
|
||||||
return {'provider_location': volume['provider_location']}
|
return {'provider_location': volume['provider_location']}
|
||||||
|
|
|
@ -89,12 +89,9 @@ class ZFSSAApi(object):
|
||||||
val = json.loads(ret.data)
|
val = json.loads(ret.data)
|
||||||
|
|
||||||
if not self._is_pool_owned(val):
|
if not self._is_pool_owned(val):
|
||||||
exception_msg = (_('Error Pool ownership: '
|
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
|
||||||
'Pool %(pool)s is not owned '
|
'by %(host)s.'),
|
||||||
'by %(host)s.')
|
{'pool': pool, 'host': self.host})
|
||||||
% {'pool': pool,
|
|
||||||
'host': self.host})
|
|
||||||
LOG.error(exception_msg)
|
|
||||||
raise exception.InvalidInput(reason=pool)
|
raise exception.InvalidInput(reason=pool)
|
||||||
|
|
||||||
avail = val['pool']['usage']['available']
|
avail = val['pool']['usage']['available']
|
||||||
|
@ -464,20 +461,16 @@ class ZFSSAApi(object):
|
||||||
|
|
||||||
ret = self.rclient.put(svc, arg)
|
ret = self.rclient.put(svc, arg)
|
||||||
if ret.status != restclient.Status.ACCEPTED:
|
if ret.status != restclient.Status.ACCEPTED:
|
||||||
exception_msg = (_('Error Setting '
|
LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
|
||||||
'Volume: %(lun)s to '
|
'%(initiatorgroup)s Pool: %(pool)s Project: '
|
||||||
'InitiatorGroup: %(initiatorgroup)s '
|
'%(project)s Return code: %(ret.status)d Message: '
|
||||||
'Pool: %(pool)s '
|
'%(ret.data)s.'),
|
||||||
'Project: %(project)s '
|
{'lun': lun,
|
||||||
'Return code: %(ret.status)d '
|
'initiatorgroup': initiatorgroup,
|
||||||
'Message: %(ret.data)s.')
|
'pool': pool,
|
||||||
% {'lun': lun,
|
'project': project,
|
||||||
'initiatorgroup': initiatorgroup,
|
'ret.status': ret.status,
|
||||||
'pool': pool,
|
'ret.data': ret.data})
|
||||||
'project': project,
|
|
||||||
'ret.status': ret.status,
|
|
||||||
'ret.data': ret.data})
|
|
||||||
LOG.error(exception_msg)
|
|
||||||
|
|
||||||
def delete_lun(self, pool, project, lun):
|
def delete_lun(self, pool, project, lun):
|
||||||
"""delete iscsi lun."""
|
"""delete iscsi lun."""
|
||||||
|
@ -486,18 +479,14 @@ class ZFSSAApi(object):
|
||||||
|
|
||||||
ret = self.rclient.delete(svc)
|
ret = self.rclient.delete(svc)
|
||||||
if ret.status != restclient.Status.NO_CONTENT:
|
if ret.status != restclient.Status.NO_CONTENT:
|
||||||
exception_msg = (_('Error Deleting '
|
LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
|
||||||
'Volume: %(lun)s to '
|
'Project: %(project)s Return code: %(ret.status)d '
|
||||||
'Pool: %(pool)s '
|
'Message: %(ret.data)s.'),
|
||||||
'Project: %(project)s '
|
{'lun': lun,
|
||||||
'Return code: %(ret.status)d '
|
'pool': pool,
|
||||||
'Message: %(ret.data)s.')
|
'project': project,
|
||||||
% {'lun': lun,
|
'ret.status': ret.status,
|
||||||
'pool': pool,
|
'ret.data': ret.data})
|
||||||
'project': project,
|
|
||||||
'ret.status': ret.status,
|
|
||||||
'ret.data': ret.data})
|
|
||||||
LOG.error(exception_msg)
|
|
||||||
|
|
||||||
def create_snapshot(self, pool, project, lun, snapshot):
|
def create_snapshot(self, pool, project, lun, snapshot):
|
||||||
"""create snapshot."""
|
"""create snapshot."""
|
||||||
|
@ -633,9 +622,9 @@ class ZFSSAApi(object):
|
||||||
svc = "/api/san/v1/iscsi/initiator-groups"
|
svc = "/api/san/v1/iscsi/initiator-groups"
|
||||||
ret = self.rclient.get(svc)
|
ret = self.rclient.get(svc)
|
||||||
if ret.status != restclient.Status.OK:
|
if ret.status != restclient.Status.OK:
|
||||||
LOG.error(_LE('Error getting initiator groups.'))
|
msg = _('Error getting initiator groups.')
|
||||||
exception_msg = (_('Error getting initiator groups.'))
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
val = json.loads(ret.data)
|
val = json.loads(ret.data)
|
||||||
for initiator_group in val['groups']:
|
for initiator_group in val['groups']:
|
||||||
if initiator in initiator_group['initiators']:
|
if initiator in initiator_group['initiators']:
|
||||||
|
@ -762,7 +751,8 @@ class ZFSSANfsApi(ZFSSAApi):
|
||||||
LOG.error(exception_msg)
|
LOG.error(exception_msg)
|
||||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||||
data = json.loads(ret.data)['service']
|
data = json.loads(ret.data)['service']
|
||||||
LOG.debug('%s service state: %s' % (service, data))
|
LOG.debug('%(service)s service state: %(data)s',
|
||||||
|
{'service': service, 'data': data})
|
||||||
|
|
||||||
status = 'online' if state == 'enable' else 'disabled'
|
status = 'online' if state == 'enable' else 'disabled'
|
||||||
|
|
||||||
|
@ -833,9 +823,9 @@ class ZFSSANfsApi(ZFSSAApi):
|
||||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||||
data = json.loads(ret.data)['service']
|
data = json.loads(ret.data)['service']
|
||||||
LOG.debug('Modify %(service)s service '
|
LOG.debug('Modify %(service)s service '
|
||||||
'return data: %(data)s'
|
'return data: %(data)s',
|
||||||
% {'service': service,
|
{'service': service,
|
||||||
'data': data})
|
'data': data})
|
||||||
|
|
||||||
def create_share(self, pool, project, share, args):
|
def create_share(self, pool, project, share, args):
|
||||||
"""Create a share in the specified pool and project"""
|
"""Create a share in the specified pool and project"""
|
||||||
|
|
|
@ -209,7 +209,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
|
|
||||||
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
|
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
|
||||||
context.get_admin_context())
|
context.get_admin_context())
|
||||||
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
|
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
|
||||||
|
|
||||||
self.driver = importutils.import_object(
|
self.driver = importutils.import_object(
|
||||||
volume_driver,
|
volume_driver,
|
||||||
|
@ -226,7 +226,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
self.extra_capabilities = {}
|
self.extra_capabilities = {}
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error("Invalid JSON: %s" %
|
LOG.error(_LE("Invalid JSON: %s"),
|
||||||
self.driver.configuration.extra_capabilities)
|
self.driver.configuration.extra_capabilities)
|
||||||
|
|
||||||
def _add_to_threadpool(self, func, *args, **kwargs):
|
def _add_to_threadpool(self, func, *args, **kwargs):
|
||||||
|
@ -241,10 +241,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
# knowledge and update the DB.
|
# knowledge and update the DB.
|
||||||
try:
|
try:
|
||||||
pool = self.driver.get_pool(volume)
|
pool = self.driver.get_pool(volume)
|
||||||
except Exception as err:
|
except Exception:
|
||||||
LOG.error(_LE('Fetch volume pool name failed.'),
|
LOG.exception(_LE('Fetch volume pool name failed.'),
|
||||||
resource=volume)
|
resource=volume)
|
||||||
LOG.exception(err)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if pool:
|
if pool:
|
||||||
|
@ -295,17 +294,16 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
|
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
|
|
||||||
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
|
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
|
||||||
{'driver_name': self.driver.__class__.__name__,
|
{'driver_name': self.driver.__class__.__name__,
|
||||||
'version': self.driver.get_version()})
|
'version': self.driver.get_version()})
|
||||||
try:
|
try:
|
||||||
self.driver.do_setup(ctxt)
|
self.driver.do_setup(ctxt)
|
||||||
self.driver.check_for_setup_error()
|
self.driver.check_for_setup_error()
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Failed to initialize driver."),
|
LOG.exception(_LE("Failed to initialize driver."),
|
||||||
resource={'type': 'driver',
|
resource={'type': 'driver',
|
||||||
'id': self.__class__.__name__})
|
'id': self.__class__.__name__})
|
||||||
LOG.exception(ex)
|
|
||||||
# we don't want to continue since we failed
|
# we don't want to continue since we failed
|
||||||
# to initialize the driver correctly.
|
# to initialize the driver correctly.
|
||||||
return
|
return
|
||||||
|
@ -325,11 +323,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
try:
|
try:
|
||||||
if volume['status'] in ['in-use']:
|
if volume['status'] in ['in-use']:
|
||||||
self.driver.ensure_export(ctxt, volume)
|
self.driver.ensure_export(ctxt, volume)
|
||||||
except Exception as export_ex:
|
except Exception:
|
||||||
LOG.error(_LE("Failed to re-export volume, "
|
LOG.exception(_LE("Failed to re-export volume, "
|
||||||
"setting to ERROR."),
|
"setting to ERROR."),
|
||||||
resource=volume)
|
resource=volume)
|
||||||
LOG.exception(export_ex)
|
|
||||||
self.db.volume_update(ctxt,
|
self.db.volume_update(ctxt,
|
||||||
volume['id'],
|
volume['id'],
|
||||||
{'status': 'error'})
|
{'status': 'error'})
|
||||||
|
@ -357,10 +354,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
self.db.snapshot_update(ctxt,
|
self.db.snapshot_update(ctxt,
|
||||||
snapshot['id'],
|
snapshot['id'],
|
||||||
{'status': 'error'})
|
{'status': 'error'})
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
LOG.error(_LE("Error during re-export on driver init."),
|
LOG.exception(_LE("Error during re-export on driver init."),
|
||||||
resource=volume)
|
resource=volume)
|
||||||
LOG.exception(ex)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
self.driver.set_throttle()
|
self.driver.set_throttle()
|
||||||
|
@ -431,9 +427,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
cgsnapshot_id=cgsnapshot_id)
|
cgsnapshot_id=cgsnapshot_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
msg = _("Create manager volume flow failed.")
|
msg = _("Create manager volume flow failed.")
|
||||||
LOG.exception((msg),
|
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
|
||||||
resource={'type': 'volume',
|
|
||||||
'id': volume_id})
|
|
||||||
raise exception.CinderException(msg)
|
raise exception.CinderException(msg)
|
||||||
|
|
||||||
if snapshot_id is not None:
|
if snapshot_id is not None:
|
||||||
|
@ -525,7 +519,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
if volume_ref['attach_status'] == "attached":
|
if volume_ref['attach_status'] == "attached":
|
||||||
# Volume is still attached, need to detach first
|
# Volume is still attached, need to detach first
|
||||||
raise exception.VolumeAttached(volume_id=volume_id)
|
raise exception.VolumeAttached(volume_id=volume_id)
|
||||||
if (vol_utils.extract_host(volume_ref['host']) != self.host):
|
if vol_utils.extract_host(volume_ref['host']) != self.host:
|
||||||
raise exception.InvalidVolume(
|
raise exception.InvalidVolume(
|
||||||
reason=_("volume is not local to this node"))
|
reason=_("volume is not local to this node"))
|
||||||
|
|
||||||
|
@ -673,7 +667,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
{'volume_id': volume_id}, resource=snapshot)
|
{'volume_id': volume_id}, resource=snapshot)
|
||||||
snapshot.status = 'error'
|
snapshot.status = 'error'
|
||||||
snapshot.save(context)
|
snapshot.save(context)
|
||||||
raise exception.MetadataCopyFailure(reason=ex)
|
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
|
||||||
|
|
||||||
snapshot.status = 'available'
|
snapshot.status = 'available'
|
||||||
snapshot.progress = '100%'
|
snapshot.progress = '100%'
|
||||||
|
@ -759,13 +753,13 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
if volume['status'] == 'attaching':
|
if volume['status'] == 'attaching':
|
||||||
if (volume_metadata.get('attached_mode') and
|
if (volume_metadata.get('attached_mode') and
|
||||||
volume_metadata.get('attached_mode') != mode):
|
volume_metadata.get('attached_mode') != mode):
|
||||||
msg = _("being attached by different mode")
|
raise exception.InvalidVolume(
|
||||||
raise exception.InvalidVolume(reason=msg)
|
reason=_("being attached by different mode"))
|
||||||
|
|
||||||
if (volume['status'] == 'in-use' and not volume['multiattach']
|
if (volume['status'] == 'in-use' and not volume['multiattach']
|
||||||
and not volume['migration_status']):
|
and not volume['migration_status']):
|
||||||
msg = _("volume is already attached")
|
raise exception.InvalidVolume(
|
||||||
raise exception.InvalidVolume(reason=msg)
|
reason=_("volume is already attached"))
|
||||||
|
|
||||||
attachment = None
|
attachment = None
|
||||||
host_name_sanitized = utils.sanitize_hostname(
|
host_name_sanitized = utils.sanitize_hostname(
|
||||||
|
@ -915,7 +909,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
LOG.exception(_LE("Detach volume failed, due to "
|
LOG.exception(_LE("Detach volume failed, due to "
|
||||||
"remove-export failure."),
|
"remove-export failure."),
|
||||||
resource=volume)
|
resource=volume)
|
||||||
raise exception.RemoveExportException(volume=volume_id, reason=ex)
|
raise exception.RemoveExportException(volume=volume_id,
|
||||||
|
reason=six.text_type(ex))
|
||||||
|
|
||||||
self._notify_about_volume_usage(context, volume, "detach.end")
|
self._notify_about_volume_usage(context, volume, "detach.end")
|
||||||
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
|
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
|
||||||
|
@ -1063,10 +1058,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
try:
|
try:
|
||||||
self.driver.validate_connector(connector)
|
self.driver.validate_connector(connector)
|
||||||
except exception.InvalidConnectorException as err:
|
except exception.InvalidConnectorException as err:
|
||||||
raise exception.InvalidInput(reason=err)
|
raise exception.InvalidInput(reason=six.text_type(err))
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
err_msg = (_("Validate volume connection failed "
|
err_msg = (_("Validate volume connection failed "
|
||||||
"(error: %(err))."), {'err': six.text_type(err)})
|
"(error: %(err)).") % {'err': six.text_type(err)})
|
||||||
LOG.error(err_msg, resource=volume)
|
LOG.error(err_msg, resource=volume)
|
||||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||||
|
|
||||||
|
@ -1085,7 +1080,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
model_update)
|
model_update)
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
LOG.exception(_LE("Model update failed."), resource=volume)
|
LOG.exception(_LE("Model update failed."), resource=volume)
|
||||||
raise exception.ExportFailure(reason=ex)
|
raise exception.ExportFailure(reason=six.text_type(ex))
|
||||||
|
|
||||||
initiator_data = self._get_driver_initiator_data(context, connector)
|
initiator_data = self._get_driver_initiator_data(context, connector)
|
||||||
try:
|
try:
|
||||||
|
@ -1098,7 +1093,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
connector)
|
connector)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
err_msg = (_("Driver initialize connection failed "
|
err_msg = (_("Driver initialize connection failed "
|
||||||
"(error: %(err)s)."), {'err': six.text_type(err)})
|
"(error: %(err)s).") % {'err': six.text_type(err)})
|
||||||
LOG.error(err_msg, resource=volume)
|
LOG.error(err_msg, resource=volume)
|
||||||
|
|
||||||
self.driver.remove_export(context.elevated(), volume)
|
self.driver.remove_export(context.elevated(), volume)
|
||||||
|
@ -1271,9 +1266,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
new_volume['id'])
|
new_volume['id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
|
LOG.error(_LE("Failed to copy volume %(vol1)s to %(vol2)s"),
|
||||||
LOG.error(msg, {'vol1': volume['id'],
|
{'vol1': volume['id'], 'vol2': new_volume['id']})
|
||||||
'vol2': new_volume['id']})
|
|
||||||
self._clean_temporary_volume(ctxt, volume['id'],
|
self._clean_temporary_volume(ctxt, volume['id'],
|
||||||
new_volume['id'])
|
new_volume['id'])
|
||||||
|
|
||||||
|
@ -1337,9 +1331,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
self.db.volume_update(ctxt, volume_id,
|
self.db.volume_update(ctxt, volume_id,
|
||||||
{'migration_status': 'error'})
|
{'migration_status': 'error'})
|
||||||
|
|
||||||
msg = _("migrate_volume_completion: completing migration for "
|
LOG.debug("migrate_volume_completion: completing migration for "
|
||||||
"volume %(vol1)s (temporary volume %(vol2)s")
|
"volume %(vol1)s (temporary volume %(vol2)s",
|
||||||
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
|
{'vol1': volume_id, 'vol2': new_volume_id})
|
||||||
volume = self.db.volume_get(ctxt, volume_id)
|
volume = self.db.volume_get(ctxt, volume_id)
|
||||||
new_volume = self.db.volume_get(ctxt, new_volume_id)
|
new_volume = self.db.volume_get(ctxt, new_volume_id)
|
||||||
rpcapi = volume_rpcapi.VolumeAPI()
|
rpcapi = volume_rpcapi.VolumeAPI()
|
||||||
|
@ -1347,10 +1341,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
orig_volume_status = self._get_original_status(volume)
|
orig_volume_status = self._get_original_status(volume)
|
||||||
|
|
||||||
if error:
|
if error:
|
||||||
msg = _("migrate_volume_completion is cleaning up an error "
|
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
|
||||||
"for volume %(vol1)s (temporary volume %(vol2)s")
|
"for volume %(vol1)s (temporary volume %(vol2)s"),
|
||||||
LOG.info(msg % {'vol1': volume['id'],
|
{'vol1': volume['id'], 'vol2': new_volume['id']})
|
||||||
'vol2': new_volume['id']})
|
|
||||||
rpcapi.delete_volume(ctxt, new_volume)
|
rpcapi.delete_volume(ctxt, new_volume)
|
||||||
updates = {'migration_status': None, 'status': orig_volume_status}
|
updates = {'migration_status': None, 'status': orig_volume_status}
|
||||||
self.db.volume_update(ctxt, volume_id, updates)
|
self.db.volume_update(ctxt, volume_id, updates)
|
||||||
|
@ -1367,8 +1360,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
self.detach_volume(ctxt, volume_id, attachment['id'])
|
self.detach_volume(ctxt, volume_id, attachment['id'])
|
||||||
self.delete_volume(ctxt, volume_id)
|
self.delete_volume(ctxt, volume_id)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
msg = _LE("Delete migration source volume failed: %(err)s")
|
LOG.error(_LE("Delete migration source volume failed: %(err)s"),
|
||||||
LOG.error(msg, {'err': six.text_type(ex)}, resource=volume)
|
{'err': ex}, resource=volume)
|
||||||
|
|
||||||
# Give driver (new_volume) a chance to update things as needed
|
# Give driver (new_volume) a chance to update things as needed
|
||||||
# Note this needs to go through rpc to the host of the new volume
|
# Note this needs to go through rpc to the host of the new volume
|
||||||
|
@ -1688,12 +1681,11 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
|
|
||||||
if retyped:
|
if retyped:
|
||||||
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
|
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
|
||||||
except Exception as ex:
|
except Exception:
|
||||||
retyped = False
|
retyped = False
|
||||||
LOG.error(_LE("Volume %s: driver error when trying to retype, "
|
LOG.exception(_LE("Volume %s: driver error when trying to "
|
||||||
"falling back to generic mechanism."),
|
"retype, falling back to generic "
|
||||||
volume_ref['id'])
|
"mechanism."), volume_ref['id'])
|
||||||
LOG.exception(ex)
|
|
||||||
|
|
||||||
# We could not change the type, so we need to migrate the volume, where
|
# We could not change the type, so we need to migrate the volume, where
|
||||||
# the destination volume will be of the new type
|
# the destination volume will be of the new type
|
||||||
|
@ -1758,11 +1750,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
volume_id,
|
volume_id,
|
||||||
ref)
|
ref)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to create manage_existing flow."),
|
msg = _("Failed to create manage_existing flow.")
|
||||||
resource={'type': 'volume',
|
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
|
||||||
'id': volume_id})
|
raise exception.CinderException(msg)
|
||||||
raise exception.CinderException(
|
|
||||||
_("Failed to create manage existing flow."))
|
|
||||||
|
|
||||||
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
|
||||||
flow_engine.run()
|
flow_engine.run()
|
||||||
|
@ -2072,7 +2062,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
LOG.error(_LE("Failed to update %(volume_id)s"
|
LOG.error(_LE("Failed to update %(volume_id)s"
|
||||||
" metadata using the provided snapshot"
|
" metadata using the provided snapshot"
|
||||||
" %(snapshot_id)s metadata.") %
|
" %(snapshot_id)s metadata."),
|
||||||
{'volume_id': vol['id'],
|
{'volume_id': vol['id'],
|
||||||
'snapshot_id': vol['snapshot_id']})
|
'snapshot_id': vol['snapshot_id']})
|
||||||
self.db.volume_update(context, vol['id'],
|
self.db.volume_update(context, vol['id'],
|
||||||
|
@ -2080,7 +2070,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
if group_id:
|
if group_id:
|
||||||
self.db.consistencygroup_update(
|
self.db.consistencygroup_update(
|
||||||
context, group_id, {'status': 'error'})
|
context, group_id, {'status': 'error'})
|
||||||
raise exception.MetadataCopyFailure(reason=ex)
|
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
|
||||||
|
|
||||||
self.db.volume_update(context, vol['id'], update)
|
self.db.volume_update(context, vol['id'], update)
|
||||||
|
|
||||||
|
@ -2423,13 +2413,14 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||||
except exception.CinderException as ex:
|
except exception.CinderException as ex:
|
||||||
LOG.error(_LE("Failed updating %(snapshot_id)s"
|
LOG.error(_LE("Failed updating %(snapshot_id)s"
|
||||||
" metadata using the provided volumes"
|
" metadata using the provided volumes"
|
||||||
" %(volume_id)s metadata") %
|
" %(volume_id)s metadata"),
|
||||||
{'volume_id': volume_id,
|
{'volume_id': volume_id,
|
||||||
'snapshot_id': snapshot_id})
|
'snapshot_id': snapshot_id})
|
||||||
self.db.snapshot_update(context,
|
self.db.snapshot_update(context,
|
||||||
snapshot['id'],
|
snapshot['id'],
|
||||||
{'status': 'error'})
|
{'status': 'error'})
|
||||||
raise exception.MetadataCopyFailure(reason=ex)
|
raise exception.MetadataCopyFailure(
|
||||||
|
reason=six.text_type(ex))
|
||||||
|
|
||||||
self.db.snapshot_update(context,
|
self.db.snapshot_update(context,
|
||||||
snapshot['id'], {'status': 'available',
|
snapshot['id'], {'status': 'available',
|
||||||
|
|
Loading…
Reference in New Issue