VMAX driver - VMAX list manageable volumes and snapshots.
VMAX Support for listing manageable volumes and snapshots. Change-Id: I73cf7940fbeb4a450fd0719a5477c82aefde8c9a Implements: blueprint vmax-list-manage-existing
This commit is contained in:
parent
106cf3cbf0
commit
d05a7a10dc
@ -683,6 +683,232 @@ class VMAXCommonData(object):
|
||||
|
||||
headroom = {"headroom": [{"headroomCapacity": 20348.29}]}
|
||||
|
||||
private_vol_rest_response_single = {
|
||||
"id": "f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0", "count": 1,
|
||||
"expirationTime": 1521650650793, "maxPageSize": 1000,
|
||||
"resultList": {"to": 1, "from": 1, "result": [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}}]}}
|
||||
private_vol_rest_response_none = {
|
||||
"id": "f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0", "count": 0,
|
||||
"expirationTime": 1521650650793, "maxPageSize": 1000,
|
||||
"resultList": {"to": 0, "from": 0, "result": []}}
|
||||
private_vol_rest_response_iterator_first = {
|
||||
"id": "f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0", "count": 1500,
|
||||
"expirationTime": 1521650650793, "maxPageSize": 1000,
|
||||
"resultList": {"to": 1, "from": 1, "result": [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00002",
|
||||
"status": "Ready", "configuration": "TDEV"}}]}}
|
||||
private_vol_rest_response_iterator_second = {
|
||||
"to": 2000, "from": 1001, "result": [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}}]}
|
||||
rest_iterator_resonse_one = {
|
||||
"to": 1000, "from": 1, "result": [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}}]}
|
||||
rest_iterator_resonse_two = {
|
||||
"to": 1500, "from": 1001, "result": [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00002",
|
||||
"status": "Ready", "configuration": "TDEV"}}]}
|
||||
|
||||
# COMMON.PY
|
||||
priv_vol_func_response_single = [
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 1026.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00001", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00001",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {
|
||||
"mirror": False, "snapVXTgt": False,
|
||||
"cloneTarget": False, "cloneSrc": False,
|
||||
"snapVXSrc": True, "snapVXSession": [
|
||||
{"srcSnapshotGenInfo": [
|
||||
{"snapshotHeader": {
|
||||
"timestamp": 1512763278000, "expired": False,
|
||||
"secured": False, "snapshotName": "testSnap1",
|
||||
"device": "00001", "generation": 0, "timeToLive": 0
|
||||
}}]}]}}]
|
||||
|
||||
priv_vol_func_response_multi = [
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 100.0, "capMB": 102400.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00001", "status": "Ready", "numStorageGroups": 0,
|
||||
"reservationInfo": {"reserved": False}, "mapped": False,
|
||||
"encapsulated": False, "formattedName": "00001",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"maskingInfo": {"masked": False},
|
||||
"timeFinderInfo": {
|
||||
"mirror": False, "snapVXTgt": False,
|
||||
"cloneTarget": False, "cloneSrc": False,
|
||||
"snapVXSrc": True, "snapVXSession": [
|
||||
{"srcSnapshotGenInfo": [
|
||||
{"snapshotHeader": {
|
||||
"timestamp": 1512763278000, "expired": False,
|
||||
"secured": False, "snapshotName": "testSnap1",
|
||||
"device": "00001", "generation": 0, "timeToLive": 0
|
||||
}}]}]}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 200.0, "capMB": 204800.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00002", "status": "Ready", "numStorageGroups": 0,
|
||||
"reservationInfo": {"reserved": False}, "mapped": False,
|
||||
"encapsulated": False, "formattedName": "00002",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"maskingInfo": {"masked": False},
|
||||
"timeFinderInfo": {
|
||||
"mirror": False, "snapVXTgt": False,
|
||||
"cloneTarget": False, "cloneSrc": False,
|
||||
"snapVXSrc": True, "snapVXSession": [
|
||||
{"srcSnapshotGenInfo": [
|
||||
{"snapshotHeader": {
|
||||
"timestamp": 1512763278000, "expired": False,
|
||||
"secured": False, "snapshotName": "testSnap2",
|
||||
"device": "00002", "generation": 0, "timeToLive": 0
|
||||
}}]}]}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 300.0, "capMB": 307200.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00003", "status": "Ready", "numStorageGroups": 0,
|
||||
"reservationInfo": {"reserved": False}, "mapped": False,
|
||||
"encapsulated": False, "formattedName": "00003",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"maskingInfo": {"masked": False},
|
||||
"timeFinderInfo": {
|
||||
"mirror": False, "snapVXTgt": False,
|
||||
"cloneTarget": False, "cloneSrc": False,
|
||||
"snapVXSrc": True, "snapVXSession": [
|
||||
{"srcSnapshotGenInfo": [
|
||||
{"snapshotHeader": {
|
||||
"timestamp": 1512763278000, "expired": False,
|
||||
"secured": False, "snapshotName": "testSnap3",
|
||||
"device": "00003", "generation": 0, "timeToLive": 0
|
||||
}}]}]}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 400.0, "capMB": 409600.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00004", "status": "Ready", "numStorageGroups": 0,
|
||||
"reservationInfo": {"reserved": False}, "mapped": False,
|
||||
"encapsulated": False, "formattedName": "00004",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"maskingInfo": {"masked": False},
|
||||
"timeFinderInfo": {
|
||||
"mirror": False, "snapVXTgt": False,
|
||||
"cloneTarget": False, "cloneSrc": False,
|
||||
"snapVXSrc": True, "snapVXSession": [
|
||||
{"srcSnapshotGenInfo": [
|
||||
{"snapshotHeader": {
|
||||
"timestamp": 1512763278000, "expired": False,
|
||||
"secured": False, "snapshotName": "testSnap4",
|
||||
"device": "00004", "generation": 0, "timeToLive": 0
|
||||
}}]}]}}]
|
||||
|
||||
priv_vol_func_response_multi_invalid = [
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 10.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00001", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00001",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {"snapVXTgt": False, "snapVXSrc": False}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 1026.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00002", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00002",
|
||||
"system_resource": False, "numSymDevMaskingViews": 1,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {"snapVXTgt": False, "snapVXSrc": False}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 1026.0,
|
||||
"serviceState": "Normal", "emulationType": "CKD",
|
||||
"volumeId": "00003", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00003",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {"snapVXTgt": False, "snapVXSrc": False}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 1026.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00004", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00004",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {"snapVXTgt": True, "snapVXSrc": False}},
|
||||
{"volumeHeader": {
|
||||
"private": False, "capGB": 1.0, "capMB": 1026.0,
|
||||
"serviceState": "Normal", "emulationType": "FBA",
|
||||
"volumeId": "00005", "status": "Ready", "mapped": False,
|
||||
"numStorageGroups": 0, "reservationInfo": {"reserved": False},
|
||||
"encapsulated": False, "formattedName": "00005",
|
||||
"system_resource": False, "numSymDevMaskingViews": 0,
|
||||
"nameModifier": "OS-vol", "configuration": "TDEV"},
|
||||
"maskingInfo": {"masked": False},
|
||||
"rdfInfo": {
|
||||
"dynamicRDF": False, "RDF": False,
|
||||
"concurrentRDF": False,
|
||||
"getDynamicRDFCapability": "RDF1_Capable", "RDFA": False},
|
||||
"timeFinderInfo": {"snapVXTgt": False, "snapVXSrc": False}}]
|
||||
|
||||
|
||||
class FakeLookupService(object):
|
||||
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
|
||||
@ -1543,6 +1769,22 @@ class VMAXUtilsTest(test.TestCase):
|
||||
self.assertFalse(self.utils.change_multiattach(
|
||||
extra_specs_ma_false, extra_specs_ma_false))
|
||||
|
||||
def test_is_volume_manageable(self):
|
||||
for volume in self.data.priv_vol_func_response_multi:
|
||||
self.assertTrue(
|
||||
self.utils.is_volume_manageable(volume))
|
||||
for volume in self.data.priv_vol_func_response_multi_invalid:
|
||||
self.assertFalse(
|
||||
self.utils.is_volume_manageable(volume))
|
||||
|
||||
def test_is_snapshot_manageable(self):
|
||||
for volume in self.data.priv_vol_func_response_multi:
|
||||
self.assertTrue(
|
||||
self.utils.is_snapshot_manageable(volume))
|
||||
for volume in self.data.priv_vol_func_response_multi_invalid:
|
||||
self.assertFalse(
|
||||
self.utils.is_snapshot_manageable(volume))
|
||||
|
||||
|
||||
class VMAXRestTest(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -2940,6 +3182,68 @@ class VMAXRestTest(test.TestCase):
|
||||
rename=True, new_snap_name=new_snap_backend_name)
|
||||
mock_modify.assert_called_once()
|
||||
|
||||
def test_get_private_volume_list_pass(self):
|
||||
array_id = self.data.array
|
||||
response = [{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}}]
|
||||
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_resource',
|
||||
return_value=self.data.private_vol_rest_response_single):
|
||||
volume = self.rest.get_private_volume_list(array_id)
|
||||
self.assertEqual(response, volume)
|
||||
|
||||
def test_get_private_volume_list_none(self):
|
||||
array_id = self.data.array
|
||||
response = []
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_resource', return_value=
|
||||
VMAXCommonData.private_vol_rest_response_none):
|
||||
vol_list = self.rest.get_private_volume_list(array_id)
|
||||
self.assertEqual(response, vol_list)
|
||||
|
||||
@mock.patch.object(
|
||||
rest.VMAXRest, 'get_iterator_page_list', return_value=
|
||||
VMAXCommonData.private_vol_rest_response_iterator_second['result'])
|
||||
@mock.patch.object(
|
||||
rest.VMAXRest, 'get_resource', return_value=
|
||||
VMAXCommonData.private_vol_rest_response_iterator_first)
|
||||
def test_get_private_volume_list_iterator(self, mock_get_resource,
|
||||
mock_iterator):
|
||||
array_id = self.data.array
|
||||
response = [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00002",
|
||||
"status": "Ready", "configuration": "TDEV"}},
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}}]
|
||||
volume = self.rest.get_private_volume_list(array_id)
|
||||
self.assertEqual(response, volume)
|
||||
|
||||
def test_get_iterator_list(self):
|
||||
with mock.patch.object(
|
||||
self.rest, '_get_request', side_effect=[
|
||||
self.data.rest_iterator_resonse_one,
|
||||
self.data.rest_iterator_resonse_two]):
|
||||
|
||||
expected_response = [
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00001",
|
||||
"status": "Ready", "configuration": "TDEV"}},
|
||||
{"volumeHeader": {
|
||||
"capGB": 1.0, "capMB": 1026.0, "volumeId": "00002",
|
||||
"status": "Ready", "configuration": "TDEV"}}]
|
||||
iterator_id = 'test_iterator_id'
|
||||
result_count = 1500
|
||||
start_position = 1
|
||||
end_position = 1000
|
||||
|
||||
actual_response = self.rest.get_iterator_page_list(
|
||||
iterator_id, result_count, start_position, end_position)
|
||||
self.assertEqual(expected_response, actual_response)
|
||||
|
||||
|
||||
class VMAXProvisionTest(test.TestCase):
|
||||
def setUp(self):
|
||||
@ -5179,6 +5483,116 @@ class VMAXCommonTest(test.TestCase):
|
||||
initiator_check = self.common._get_initiator_check_flag()
|
||||
self.assertTrue(initiator_check)
|
||||
|
||||
def test_get_manageable_volumes_success(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_single):
|
||||
vols_lists = self.common.get_manageable_volumes(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
expected_response = [
|
||||
{'reference': {'source-id': '00001'}, 'safe_to_manage': True,
|
||||
'size': 1.0, 'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
|
||||
self.assertEqual(vols_lists, expected_response)
|
||||
|
||||
def test_get_manageable_volumes_filters_set(self):
|
||||
marker, limit, offset = '00002', 2, 1
|
||||
sort_keys, sort_dirs = 'size', 'desc'
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_multi):
|
||||
vols_lists = self.common.get_manageable_volumes(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
expected_response = [
|
||||
{'reference': {'source-id': '00003'}, 'safe_to_manage': True,
|
||||
'size': 300, 'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}},
|
||||
{'reference': {'source-id': '00004'}, 'safe_to_manage': True,
|
||||
'size': 400, 'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
|
||||
self.assertEqual(vols_lists, expected_response)
|
||||
|
||||
def test_get_manageable_volumes_fail_no_vols(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=[]):
|
||||
expected_response = []
|
||||
vol_list = self.common.get_manageable_volumes(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
self.assertEqual(vol_list, expected_response)
|
||||
|
||||
def test_get_manageable_volumes_fail_no_valid_vols(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_multi_invalid):
|
||||
expected_response = []
|
||||
vol_list = self.common.get_manageable_volumes(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
self.assertEqual(vol_list, expected_response)
|
||||
|
||||
def test_get_manageable_snapshots_success(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_single):
|
||||
snap_list = self.common.get_manageable_snapshots(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
expected_response = [{
|
||||
'reference': {'source-name': 'testSnap1'},
|
||||
'safe_to_manage': True, 'size': 1,
|
||||
'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {
|
||||
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
|
||||
'timestamp': '2017/12/08, 20:01:18'},
|
||||
'source_reference': {'source-id': '00001'}}]
|
||||
self.assertEqual(snap_list, expected_response)
|
||||
|
||||
def test_get_manageable_snapshots_filters_set(self):
|
||||
marker, limit, offset = 'testSnap2', 2, 1
|
||||
sort_keys, sort_dirs = 'size', 'desc'
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_multi):
|
||||
vols_lists = self.common.get_manageable_snapshots(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
expected_response = [
|
||||
{'reference': {'source-name': 'testSnap3'},
|
||||
'safe_to_manage': True, 'size': 300, 'reason_not_safe': None,
|
||||
'cinder_id': None, 'extra_info': {
|
||||
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
|
||||
'timestamp': '2017/12/08, 20:01:18'},
|
||||
'source_reference': {'source-id': '00003'}},
|
||||
{'reference': {'source-name': 'testSnap4'},
|
||||
'safe_to_manage': True, 'size': 400, 'reason_not_safe': None,
|
||||
'cinder_id': None, 'extra_info': {
|
||||
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
|
||||
'timestamp': '2017/12/08, 20:01:18'},
|
||||
'source_reference': {'source-id': '00004'}}]
|
||||
self.assertEqual(vols_lists, expected_response)
|
||||
|
||||
def test_get_manageable_snapshots_fail_no_snaps(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=[]):
|
||||
expected_response = []
|
||||
vols_lists = self.common.get_manageable_snapshots(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
self.assertEqual(vols_lists, expected_response)
|
||||
|
||||
def test_get_manageable_snapshots_fail_no_valid_snaps(self):
|
||||
marker = limit = offset = sort_keys = sort_dirs = None
|
||||
with mock.patch.object(
|
||||
self.rest, 'get_private_volume_list',
|
||||
return_value=self.data.priv_vol_func_response_multi_invalid):
|
||||
expected_response = []
|
||||
vols_lists = self.common.get_manageable_snapshots(
|
||||
marker, limit, offset, sort_keys, sort_dirs)
|
||||
self.assertEqual(vols_lists, expected_response)
|
||||
|
||||
|
||||
class VMAXFCTest(test.TestCase):
|
||||
def setUp(self):
|
||||
|
@ -15,9 +15,11 @@
|
||||
|
||||
import ast
|
||||
from copy import deepcopy
|
||||
import math
|
||||
import os.path
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
@ -2140,6 +2142,201 @@ class VMAXCommon(object):
|
||||
"OpenStack but still remains on VMAX source "
|
||||
"%(array_id)s", {'snap_name': snap_name, 'array_id': array})
|
||||
|
||||
def get_manageable_volumes(self, marker, limit, offset, sort_keys,
|
||||
sort_dirs):
|
||||
"""Lists all manageable volumes.
|
||||
|
||||
:param marker: Begin returning volumes that appear later in the volume
|
||||
list than that represented by this reference. This
|
||||
reference should be json like. Default=None.
|
||||
:param limit: Maximum number of volumes to return. Default=None.
|
||||
:param offset: Number of volumes to skip after marker. Default=None.
|
||||
:param sort_keys: Key to sort by, sort by size or reference. Valid
|
||||
keys: size, reference. Default=None.
|
||||
:param sort_dirs: Direction to sort by. Valid dirs: asd, desc.
|
||||
Default=None.
|
||||
:return: List of dicts containing all volumes valid for management
|
||||
"""
|
||||
valid_vols = []
|
||||
manageable_vols = []
|
||||
array = self.pool_info['arrays_info'][0]["SerialNumber"]
|
||||
LOG.info("Listing manageable volumes for array %(array_id)s", {
|
||||
'array_id': array})
|
||||
volumes = self.rest.get_private_volume_list(array)
|
||||
|
||||
# No volumes returned from VMAX
|
||||
if not volumes:
|
||||
LOG.warning("There were no volumes found on the backend VMAX. "
|
||||
"You need to create some volumes before they can be "
|
||||
"managed into Cinder.")
|
||||
return manageable_vols
|
||||
|
||||
for device in volumes:
|
||||
# Determine if volume is valid for management
|
||||
if self.utils.is_volume_manageable(device):
|
||||
valid_vols.append(device['volumeHeader'])
|
||||
|
||||
# For all valid vols, extract relevant data for Cinder response
|
||||
for vol in valid_vols:
|
||||
volume_dict = {'reference': {'source-id': vol['volumeId']},
|
||||
'safe_to_manage': True,
|
||||
'size': int(math.ceil(vol['capGB'])),
|
||||
'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {
|
||||
'config': vol['configuration'],
|
||||
'emulation': vol['emulationType']}}
|
||||
manageable_vols.append(volume_dict)
|
||||
|
||||
# If volume list is populated, perform filtering on user params
|
||||
if len(manageable_vols) > 0:
|
||||
# If sort keys selected, determine if by size or reference, and
|
||||
# direction of sort
|
||||
if sort_keys:
|
||||
reverse = False
|
||||
if sort_dirs:
|
||||
if 'desc' in sort_dirs[0]:
|
||||
reverse = True
|
||||
if sort_keys[0] == 'size':
|
||||
manageable_vols = sorted(manageable_vols,
|
||||
key=lambda k: k['size'],
|
||||
reverse=reverse)
|
||||
if sort_keys[0] == 'reference':
|
||||
manageable_vols = sorted(manageable_vols,
|
||||
key=lambda k: k['reference'][
|
||||
'source-id'],
|
||||
reverse=reverse)
|
||||
|
||||
# If marker provided, return only manageable volumes after marker
|
||||
if marker:
|
||||
vol_index = None
|
||||
for vol in manageable_vols:
|
||||
if vol['reference']['source-id'] == marker:
|
||||
vol_index = manageable_vols.index(vol)
|
||||
if vol_index:
|
||||
manageable_vols = manageable_vols[vol_index:]
|
||||
else:
|
||||
msg = _("Volume marker not found, please check supplied "
|
||||
"device ID and try again.")
|
||||
raise exception.VolumeBackendAPIException(msg)
|
||||
|
||||
# If offset or limit provided, offset or limit result list
|
||||
if offset:
|
||||
manageable_vols = manageable_vols[offset:]
|
||||
if limit:
|
||||
manageable_vols = manageable_vols[:limit]
|
||||
|
||||
return manageable_vols
|
||||
|
||||
def get_manageable_snapshots(self, marker, limit, offset, sort_keys,
|
||||
sort_dirs):
|
||||
"""Lists all manageable snapshots.
|
||||
|
||||
:param marker: Begin returning volumes that appear later in the volume
|
||||
list than that represented by this reference. This
|
||||
reference should be json like. Default=None.
|
||||
:param limit: Maximum number of volumes to return. Default=None.
|
||||
:param offset: Number of volumes to skip after marker. Default=None.
|
||||
:param sort_keys: Key to sort by, sort by size or reference.
|
||||
Valid keys: size, reference. Default=None.
|
||||
:param sort_dirs: Direction to sort by. Valid dirs: asd, desc.
|
||||
Default=None.
|
||||
:return: List of dicts containing all volumes valid for management
|
||||
"""
|
||||
manageable_snaps = []
|
||||
array = self.pool_info['arrays_info'][0]["SerialNumber"]
|
||||
LOG.info("Listing manageable snapshots for array %(array_id)s", {
|
||||
'array_id': array})
|
||||
volumes = self.rest.get_private_volume_list(array)
|
||||
|
||||
# No volumes returned from VMAX
|
||||
if not volumes:
|
||||
LOG.warning("There were no volumes found on the backend VMAX. "
|
||||
"You need to create some volumes before snapshots can "
|
||||
"be created and managed into Cinder.")
|
||||
return manageable_snaps
|
||||
|
||||
for device in volumes:
|
||||
# Determine if volume is valid for management
|
||||
if self.utils.is_snapshot_manageable(device):
|
||||
# Snapshot valid, extract relevant snap info
|
||||
snap_info = device['timeFinderInfo']['snapVXSession'][0][
|
||||
'srcSnapshotGenInfo'][0]['snapshotHeader']
|
||||
# Convert timestamp to human readable format
|
||||
human_timestamp = time.strftime(
|
||||
"%Y/%m/%d, %H:%M:%S", time.localtime(
|
||||
float(six.text_type(
|
||||
snap_info['timestamp'])[:-3])))
|
||||
# If TTL is set, convert value to human readable format
|
||||
if int(snap_info['timeToLive']) > 0:
|
||||
human_ttl_timestamp = time.strftime(
|
||||
"%Y/%m/%d, %H:%M:%S", time.localtime(
|
||||
float(six.text_type(
|
||||
snap_info['timeToLive']))))
|
||||
else:
|
||||
human_ttl_timestamp = 'N/A'
|
||||
|
||||
# For all valid snaps, extract relevant data for Cinder
|
||||
# response
|
||||
snap_dict = {
|
||||
'reference': {
|
||||
'source-name': snap_info['snapshotName']},
|
||||
'safe_to_manage': True,
|
||||
'size': int(
|
||||
math.ceil(device['volumeHeader']['capGB'])),
|
||||
'reason_not_safe': None, 'cinder_id': None,
|
||||
'extra_info': {
|
||||
'generation': snap_info['generation'],
|
||||
'secured': snap_info['secured'],
|
||||
'timeToLive': human_ttl_timestamp,
|
||||
'timestamp': human_timestamp},
|
||||
'source_reference': {'source-id': snap_info['device']}}
|
||||
manageable_snaps.append(snap_dict)
|
||||
|
||||
# If snapshot list is populated, perform filtering on user params
|
||||
if len(manageable_snaps) > 0:
|
||||
# Order snapshots by source deviceID and not snapshot name
|
||||
manageable_snaps = sorted(
|
||||
manageable_snaps,
|
||||
key=lambda k: k['source_reference']['source-id'])
|
||||
# If sort keys selected, determine if by size or reference, and
|
||||
# direction of sort
|
||||
if sort_keys:
|
||||
reverse = False
|
||||
if sort_dirs:
|
||||
if 'desc' in sort_dirs[0]:
|
||||
reverse = True
|
||||
if sort_keys[0] == 'size':
|
||||
manageable_snaps = sorted(manageable_snaps,
|
||||
key=lambda k: k['size'],
|
||||
reverse=reverse)
|
||||
if sort_keys[0] == 'reference':
|
||||
manageable_snaps = sorted(manageable_snaps,
|
||||
key=lambda k: k['reference'][
|
||||
'source-name'],
|
||||
reverse=reverse)
|
||||
|
||||
# If marker provided, return only manageable volumes after marker
|
||||
if marker:
|
||||
snap_index = None
|
||||
for snap in manageable_snaps:
|
||||
if snap['reference']['source-name'] == marker:
|
||||
snap_index = manageable_snaps.index(snap)
|
||||
if snap_index:
|
||||
manageable_snaps = manageable_snaps[snap_index:]
|
||||
else:
|
||||
msg = (_("Snapshot marker %(marker)s not found, marker "
|
||||
"provided must be a valid VMAX snapshot ID") %
|
||||
{'marker': marker})
|
||||
raise exception.VolumeBackendAPIException(msg)
|
||||
|
||||
# If offset or limit provided, offset or limit result list
|
||||
if offset:
|
||||
manageable_snaps = manageable_snaps[offset:]
|
||||
if limit:
|
||||
manageable_snaps = manageable_snaps[:limit]
|
||||
|
||||
return manageable_snaps
|
||||
|
||||
def retype(self, volume, new_type, host):
|
||||
"""Migrate volume to another host using retype.
|
||||
|
||||
|
@ -93,6 +93,8 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
3.2.0 - Support for retyping replicated volumes (bp
|
||||
vmax-retype-replicated-volumes)
|
||||
- Support for multiattach volumes (bp vmax-allow-multi-attach)
|
||||
- Support for list manageable volumes and snapshots
|
||||
(bp/vmax-list-manage-existing)
|
||||
"""
|
||||
|
||||
VERSION = "3.2.0"
|
||||
@ -521,6 +523,40 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver):
|
||||
"""
|
||||
self.common.unmanage_snapshot(snapshot)
|
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
"""Lists all manageable volumes.
|
||||
|
||||
:param cinder_volumes: List of currently managed Cinder volumes.
|
||||
Unused in driver.
|
||||
:param marker: Begin returning volumes that appear later in the volume
|
||||
list than that represented by this reference.
|
||||
:param limit: Maximum number of volumes to return. Default=1000.
|
||||
:param offset: Number of volumes to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable volumes.
|
||||
"""
|
||||
return self.common.get_manageable_volumes(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
"""Lists all manageable snapshots.
|
||||
|
||||
:param cinder_snapshots: List of currently managed Cinder snapshots.
|
||||
Unused in driver.
|
||||
:param marker: Begin returning volumes that appear later in the
|
||||
snapshot list than that represented by this reference.
|
||||
:param limit: Maximum number of snapshots to return. Default=1000.
|
||||
:param offset: Number of snapshots to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable snapshots.
|
||||
"""
|
||||
return self.common.get_manageable_snapshots(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Migrate volume to another host using retype.
|
||||
|
||||
|
@ -98,6 +98,8 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
|
||||
3.2.0 - Support for retyping replicated volumes (bp
|
||||
vmax-retype-replicated-volumes)
|
||||
- Support for multiattach volumes (bp vmax-allow-multi-attach)
|
||||
- Support for list manageable volumes and snapshots
|
||||
(bp/vmax-list-manage-existing)
|
||||
"""
|
||||
|
||||
VERSION = "3.2.0"
|
||||
@ -440,6 +442,40 @@ class VMAXISCSIDriver(san.SanISCSIDriver):
|
||||
"""
|
||||
self.common.unmanage_snapshot(snapshot)
|
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
"""Lists all manageable volumes.
|
||||
|
||||
:param cinder_volumes: List of currently managed Cinder volumes.
|
||||
Unused in driver.
|
||||
:param marker: Begin returning volumes that appear later in the volume
|
||||
list than that represented by this reference.
|
||||
:param limit: Maximum number of volumes to return. Default=1000.
|
||||
:param offset: Number of volumes to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable volumes.
|
||||
"""
|
||||
return self.common.get_manageable_volumes(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
"""Lists all manageable snapshots.
|
||||
|
||||
:param cinder_snapshots: List of currently managed Cinder snapshots.
|
||||
Unused in driver.
|
||||
:param marker: Begin returning volumes that appear later in the
|
||||
snapshot list than that represented by this reference.
|
||||
:param limit: Maximum number of snapshots to return. Default=1000.
|
||||
:param offset: Number of snapshots to skip after marker.
|
||||
:param sort_keys: Results sort key. Valid keys: size, reference.
|
||||
:param sort_dirs: Results sort direction. Valid dirs: asc, desc.
|
||||
:return: List of dicts containing all manageable snapshots.
|
||||
"""
|
||||
return self.common.get_manageable_snapshots(marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def retype(self, ctxt, volume, new_type, diff, host):
|
||||
"""Migrate volume to another host using retype.
|
||||
|
||||
|
@ -1011,6 +1011,72 @@ class VMAXRest(object):
|
||||
pass
|
||||
return device_ids
|
||||
|
||||
def get_private_volume_list(self, array, params=None):
|
||||
"""Retrieve list with volume details.
|
||||
|
||||
:param array: the array serial number
|
||||
:param params: filter parameters
|
||||
:returns: list -- dicts with volume information
|
||||
"""
|
||||
volumes = []
|
||||
volume_info = self.get_resource(
|
||||
array, SLOPROVISIONING, 'volume', params=params,
|
||||
private='/private')
|
||||
try:
|
||||
volumes = volume_info['resultList']['result']
|
||||
iterator_id = volume_info['id']
|
||||
volume_count = volume_info['count']
|
||||
max_page_size = volume_info['maxPageSize']
|
||||
start_position = volume_info['resultList']['from']
|
||||
end_position = volume_info['resultList']['to']
|
||||
except (KeyError, TypeError):
|
||||
return volumes
|
||||
|
||||
if volume_count > max_page_size:
|
||||
LOG.info("More entries exist in the result list, retrieving "
|
||||
"remainder of results from iterator.")
|
||||
|
||||
start_position += 1000
|
||||
end_position += 1000
|
||||
iterator_response = self.get_iterator_page_list(
|
||||
iterator_id, volume_count, start_position, end_position)
|
||||
|
||||
volumes += iterator_response
|
||||
|
||||
return volumes
|
||||
|
||||
def get_iterator_page_list(self, iterator_id, result_count, start_position,
|
||||
end_position):
|
||||
"""Iterate through response if more than one page available.
|
||||
|
||||
:param iterator_id: the iterator ID
|
||||
:param result_count: the amount of results in the iterator
|
||||
:param start_position: position to begin iterator from
|
||||
:param end_position: position to stop iterator
|
||||
:return: list -- merged results from multiple pages
|
||||
"""
|
||||
iterator_result = []
|
||||
has_more_entries = True
|
||||
|
||||
while has_more_entries:
|
||||
if start_position <= result_count <= end_position:
|
||||
end_position = result_count
|
||||
has_more_entries = False
|
||||
|
||||
params = {'to': start_position, 'from': end_position}
|
||||
target_uri = ('/common/Iterator/%(iterator_id)s/page' % {
|
||||
'iterator_id': iterator_id})
|
||||
iterator_response = self._get_request(target_uri, 'iterator',
|
||||
params)
|
||||
try:
|
||||
iterator_result += iterator_response['result']
|
||||
start_position += 1000
|
||||
end_position += 1000
|
||||
except (KeyError, TypeError):
|
||||
pass
|
||||
|
||||
return iterator_result
|
||||
|
||||
def _modify_volume(self, array, device_id, payload):
|
||||
"""Modify a volume (PUT operation).
|
||||
|
||||
|
@ -879,3 +879,81 @@ class VMAXUtils(object):
|
||||
is_tgt_multiattach = vol_utils.is_replicated_str(
|
||||
new_type_extra_specs.get('multiattach'))
|
||||
return is_src_multiattach != is_tgt_multiattach
|
||||
|
||||
@staticmethod
|
||||
def is_volume_manageable(source_vol):
|
||||
"""Check if a volume with verbose description is valid for management.
|
||||
|
||||
:param source_vol: the verbose volume dict
|
||||
:return: bool True/False
|
||||
"""
|
||||
vol_head = source_vol['volumeHeader']
|
||||
|
||||
# VMAX disk geometry uses cylinders, so volume sizes are matched to
|
||||
# the nearest full cylinder size: 1GB = 547cyl = 1026MB
|
||||
if vol_head['capMB'] < 1026 or not vol_head['capGB'].is_integer():
|
||||
return False
|
||||
|
||||
if (vol_head['numSymDevMaskingViews'] > 0 or
|
||||
vol_head['mapped'] is True or
|
||||
source_vol['maskingInfo']['masked'] is True):
|
||||
return False
|
||||
|
||||
if (vol_head['status'] != 'Ready' or
|
||||
vol_head['serviceState'] != 'Normal' or
|
||||
vol_head['emulationType'] != 'FBA' or
|
||||
vol_head['configuration'] != 'TDEV' or
|
||||
vol_head['system_resource'] is True or
|
||||
vol_head['private'] is True or
|
||||
vol_head['encapsulated'] is True or
|
||||
vol_head['reservationInfo']['reserved'] is True):
|
||||
return False
|
||||
|
||||
for key, value in source_vol['rdfInfo'].items():
|
||||
if value is True:
|
||||
return False
|
||||
|
||||
if source_vol['timeFinderInfo']['snapVXTgt'] is True:
|
||||
return False
|
||||
|
||||
if vol_head['nameModifier'][0:3] == 'OS-':
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def is_snapshot_manageable(source_vol):
|
||||
"""Check if a volume with snapshot description is valid for management.
|
||||
|
||||
:param source_vol: the verbose volume dict
|
||||
:return: bool True/False
|
||||
"""
|
||||
vol_head = source_vol['volumeHeader']
|
||||
|
||||
if not source_vol['timeFinderInfo']['snapVXSrc']:
|
||||
return False
|
||||
|
||||
# VMAX disk geometry uses cylinders, so volume sizes are matched to
|
||||
# the nearest full cylinder size: 1GB = 547cyl = 1026MB
|
||||
if (vol_head['capMB'] < 1026 or
|
||||
not vol_head['capGB'].is_integer()):
|
||||
return False
|
||||
|
||||
if (vol_head['emulationType'] != 'FBA' or
|
||||
vol_head['configuration'] != 'TDEV' or
|
||||
vol_head['private'] is True or
|
||||
vol_head['system_resource'] is True):
|
||||
return False
|
||||
|
||||
snap_gen_info = (source_vol['timeFinderInfo']['snapVXSession'][0][
|
||||
'srcSnapshotGenInfo'][0]['snapshotHeader'])
|
||||
|
||||
if (snap_gen_info['snapshotName'][0:3] == 'OS-' or
|
||||
snap_gen_info['snapshotName'][0:5] == 'temp-'):
|
||||
return False
|
||||
|
||||
if (snap_gen_info['expired'] is True
|
||||
or snap_gen_info['generation'] > 0):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Dell EMC VMAX driver has added list manageable volumes and snapshots
|
||||
support.
|
Loading…
Reference in New Issue
Block a user