VMAX driver - Retype, replacing SMI-S with REST

In VMAX driver version 3.0, SMI-S has been replaced with unisphere
REST. This is porting Retype from SMIS to REST.
See original https://review.openstack.org/#/c/397862/ for more
details.

Change-Id: If39be8c8038734b585353d3d714607d3bce25da1
Partially-Implements: blueprint vmax-rest
This commit is contained in:
Helen Walsh 2017-04-13 21:41:21 +01:00
parent f6d9fbadb2
commit 2f08c8dea3
6 changed files with 421 additions and 0 deletions

View File

@ -61,6 +61,7 @@ class VMAXCommonData(object):
default_sg_no_slo = 'OS-no_SLO-SG'
failed_resource = 'OS-failed-resource'
fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123'
new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123'
version = '3.0.0'
volume_wwn = '600000345'
@ -2198,6 +2199,21 @@ class VMAXProvisionTest(test.TestCase):
self.assertTrue(valid_slo)
self.assertFalse(valid_workload)
def test_get_slo_workload_settings_from_storage_group(self):
ref_settings = "Diamond+DSS"
sg_slo_settings = (
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, self.data.defaultstoragegroup_name))
self.assertEqual(ref_settings, sg_slo_settings)
# No workload
with mock.patch.object(self.provision.rest, 'get_storage_group',
return_value={'slo': 'Silver'}):
ref_settings2 = "Silver+NONE"
sg_slo_settings2 = (
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, 'no_workload_sg'))
self.assertEqual(ref_settings2, sg_slo_settings2)
class VMAXCommonTest(test.TestCase):
def setUp(self):
@ -3091,6 +3107,149 @@ class VMAXCommonTest(test.TestCase):
self.common.unmanage(volume)
self.rest.rename_volume.assert_not_called()
@mock.patch.object(common.VMAXCommon,
'_slo_workload_migration')
def test_retype(self, mock_migrate):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs_intervals_set
extra_specs['port_group_name'] = self.data.port_group_name_f
volume = self.data.test_volume
host = {'host': self.data.new_host}
self.common.retype(volume, host)
mock_migrate.assert_called_once_with(
device_id, volume, host, volume_name, extra_specs)
mock_migrate.reset_mock()
with mock.patch.object(
self.common, '_find_device_on_array', return_value=None):
self.common.retype(volume, host)
mock_migrate.assert_not_called()
def test_slo_workload_migration_valid(self):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.new_host}
with mock.patch.object(self.common, '_migrate_volume'):
self.common._slo_workload_migration(
device_id, volume, host, volume_name, extra_specs)
self.common._migrate_volume.assert_called_once_with(
extra_specs[utils.ARRAY], device_id,
extra_specs[utils.SRP], 'Silver',
'OLTP', volume_name, extra_specs)
def test_slo_workload_migration_not_valid(self):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.new_host}
with mock.patch.object(self.common,
'_is_valid_for_storage_assisted_migration',
return_value=(False, 'Silver', 'OLTP')):
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, extra_specs)
self.assertFalse(migrate_status)
def test_slo_workload_migration_same_hosts(self):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.fake_host}
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, extra_specs)
self.assertFalse(migrate_status)
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_migrate_volume_success(self, mock_remove):
with mock.patch.object(self.rest, 'is_volume_in_storagegroup',
return_value=True):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, extra_specs)
self.assertTrue(migrate_status)
mock_remove.assert_called_once_with(
self.data.array, device_id, None, extra_specs, False)
mock_remove.reset_mock()
with mock.patch.object(
self.rest, 'get_storage_groups_from_volume',
return_value=[]):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, extra_specs)
self.assertTrue(migrate_status)
mock_remove.assert_not_called()
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
with mock.patch.object(
self.masking, 'get_or_create_default_storage_group',
side_effect=exception.VolumeBackendAPIException):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, extra_specs)
self.assertFalse(migrate_status)
def test_migrate_volume_failed_vol_not_added(self):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
extra_specs = self.data.extra_specs
with mock.patch.object(
self.rest, 'is_volume_in_storagegroup',
return_value=False):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, extra_specs)
self.assertFalse(migrate_status)
def test_is_valid_for_storage_assisted_migration_true(self):
device_id = self.data.volume_details[0]['volumeId']
host = {'host': self.data.new_host}
volume_name = self.data.test_volume['name']
ref_return = (True, 'Silver', 'OLTP')
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
# No current sgs found
with mock.patch.object(self.rest, 'get_storage_groups_from_volume',
return_value=None):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_false(self):
device_id = self.data.volume_details[0]['volumeId']
volume_name = self.data.test_volume['name']
ref_return = (False, None, None)
# IndexError
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
# Wrong array
host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host2, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
# Wrong srp
host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host3, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
# Already in correct sg
host4 = {'host': self.data.fake_host}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host4, self.data.array, self.data.srp, volume_name)
self.assertEqual(ref_return, return_val)
class VMAXFCTest(test.TestCase):
def setUp(self):
@ -3302,6 +3461,14 @@ class VMAXFCTest(test.TestCase):
self.common.unmanage.assert_called_once_with(
self.data.test_volume)
def test_retype(self):
host = {'host': self.data.new_host}
with mock.patch.object(self.common, 'retype',
return_value=True):
self.driver.retype({}, self.data.test_volume, '', '', host)
self.common.retype.assert_called_once_with(
self.data.test_volume, host)
class VMAXISCSITest(test.TestCase):
def setUp(self):
@ -3524,6 +3691,14 @@ class VMAXISCSITest(test.TestCase):
self.common.unmanage.assert_called_once_with(
self.data.test_volume)
def test_retype(self):
host = {'host': self.data.new_host}
with mock.patch.object(self.common, 'retype',
return_value=True):
self.driver.retype({}, self.data.test_volume, '', '', host)
self.common.retype.assert_called_once_with(
self.data.test_volume, host)
class VMAXMaskingTest(test.TestCase):
def setUp(self):

View File

@ -1527,3 +1527,188 @@ class VMAXCommon(object):
# Rename the volume to volumeId, thus remove the 'OS-' prefix.
self.rest.rename_volume(
extra_specs[utils.ARRAY], device_id, volume_id)
def retype(self, volume, host):
"""Migrate volume to another host using retype.
:param volume: the volume object including the volume_type_id
:param host: The host dict holding the relevant target(destination)
information
:returns: boolean -- True if retype succeeded, False if error
"""
volume_name = volume.name
LOG.info("Migrating Volume %(volume)s via retype.",
{'volume': volume_name})
extra_specs = self._initial_setup(volume)
device_id = self._find_device_on_array(volume, extra_specs)
if device_id is None:
LOG.error("Volume %(name)s not found on the array. "
"No volume to migrate using retype.",
{'name': volume_name})
return False
return self._slo_workload_migration(device_id, volume, host,
volume_name, extra_specs)
def _slo_workload_migration(self, device_id, volume, host,
volume_name, extra_specs):
"""Migrate from SLO/Workload combination to another.
:param device_id: the volume device id
:param volume: the volume object
:param host: the host dict
:param volume_name: the name of the volume
:param extra_specs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
is_valid, target_slo, target_workload = (
self._is_valid_for_storage_assisted_migration(
device_id, host, extra_specs[utils.ARRAY],
extra_specs[utils.SRP], volume_name))
if not is_valid:
LOG.error(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype.",
{'name': volume_name})
return False
if volume.host != host['host']:
LOG.debug(
"Retype Volume %(name)s from source host %(sourceHost)s "
"to target host %(targetHost)s. ",
{'name': volume_name,
'sourceHost': volume.host,
'targetHost': host['host']})
return self._migrate_volume(
extra_specs[utils.ARRAY], device_id,
extra_specs[utils.SRP], target_slo,
target_workload, volume_name, extra_specs)
return False
def _migrate_volume(
self, array, device_id, srp, target_slo,
target_workload, volume_name, extra_specs):
"""Migrate from one slo/workload combination to another.
This requires moving the volume from its current SG to a
new or existing SG that has the target attributes.
:param array: the array serial number
:param device_id: the device number
:param srp: the storage resource pool
:param target_slo: the target service level
:param target_workload: the target workload
:param volume_name: the volume name
:param extra_specs: the extra specifications
:return: bool
"""
storagegroups = self.rest.get_storage_groups_from_volume(
array, device_id)
if not storagegroups:
LOG.warning(
"Volume : %(volume_name)s does not currently "
"belong to any storage groups.",
{'volume_name': volume_name})
else:
self.masking.remove_and_reset_members(
array, device_id, None, extra_specs, False)
try:
target_sg_name = self.masking.get_or_create_default_storage_group(
array, srp, target_slo, target_workload, extra_specs)
except Exception as e:
LOG.error("Failed to get or create storage group. "
"Exception received was %(e)s.", {'e': e})
return False
self.masking.add_volume_to_storage_group(
array, device_id, target_sg_name, volume_name, extra_specs)
# Check that it has been added.
vol_check = self.rest.is_volume_in_storagegroup(
array, device_id, target_sg_name)
if not vol_check:
LOG.error(
"Volume: %(volume_name)s has not been "
"added to target storage group %(storageGroup)s.",
{'volume_name': volume_name,
'storageGroup': target_sg_name})
return False
return True
def _is_valid_for_storage_assisted_migration(
self, device_id, host, source_array,
source_srp, volume_name):
"""Check if volume is suitable for storage assisted (pool) migration.
:param device_id: the volume device id
:param host: the host dict
:param source_array: the volume's current array serial number
:param source_srp: the volume's current pool name
:param volume_name: the name of the volume to be migrated
:returns: boolean -- True/False
:returns: string -- targetSlo
:returns: string -- targetWorkload
"""
false_ret = (False, None, None)
host_info = host['host']
LOG.debug("Target host is : %(info)s.", {'info': host_info})
try:
info_detail = host_info.split('#')
pool_details = info_detail[1].split('+')
target_slo = pool_details[0]
target_workload = pool_details[1]
target_srp = pool_details[2]
target_array_serial = pool_details[3]
except IndexError:
LOG.error("Error parsing array, pool, SLO and workload.")
return false_ret
if target_array_serial not in source_array:
LOG.error(
"The source array: %(source_array)s does not "
"match the target array: %(target_array)s - "
"skipping storage-assisted migration.",
{'source_array': source_array,
'target_array': target_array_serial})
return false_ret
if target_srp not in source_srp:
LOG.error(
"Only SLO/workload migration within the same SRP Pool is "
"supported in this version. The source pool: "
"%(source_pool_name)s does not match the target array: "
"%(target_pool)s. Skipping storage-assisted migration.",
{'source_pool_name': source_srp,
'target_pool': target_srp})
return false_ret
found_storage_group_list = self.rest.get_storage_groups_from_volume(
source_array, device_id)
if not found_storage_group_list:
LOG.warning("Volume: %(volume_name)s does not currently "
"belong to any storage groups.",
{'volume_name': volume_name})
else:
for found_storage_group_name in found_storage_group_list:
emc_fast_setting = (
self.provision.
get_slo_workload_settings_from_storage_group(
source_array, found_storage_group_name))
target_combination = ("%(targetSlo)s+%(targetWorkload)s"
% {'targetSlo': target_slo,
'targetWorkload': target_workload})
if target_combination in emc_fast_setting:
LOG.warning(
"No action required. Volume: %(volume_name)s is "
"already part of slo/workload combination: "
"%(targetCombination)s.",
{'volume_name': volume_name,
'targetCombination': target_combination})
return false_ret
return True, target_slo, target_workload

View File

@ -76,6 +76,7 @@ class VMAXFCDriver(driver.FibreChannelDriver):
- Volume replication 2.1 (bp add-vmax-replication)
- rename and restructure driver (bp vmax-rename-dell-emc)
3.0.0 - REST based driver
- Retype (storage-assisted migration)
"""
VERSION = "3.0.0"
@ -423,3 +424,17 @@ class VMAXFCDriver(driver.FibreChannelDriver):
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: difference between old and new volume types.
Unused in driver.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(volume, host)

View File

@ -81,6 +81,7 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
- Volume replication 2.1 (bp add-vmax-replication)
- rename and restructure driver (bp vmax-rename-dell-emc)
3.0.0 - REST based driver
- Retype (storage-assisted migration)
"""
VERSION = "3.0.0"
@ -367,3 +368,17 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: difference between old and new volume types.
Unused in driver.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(volume, host)

View File

@ -18,6 +18,8 @@ import time
from oslo_log import log as logging
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
@ -368,3 +370,28 @@ class VMAXProvision(object):
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:return: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)%. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}

View File

@ -0,0 +1,4 @@
---
features:
- |
Add retype functionality to VMAX driver version 3.0.