Dell PowerVault: Fix "cinder manageable-list"
The Dell PowerVault ME driver claims support for importing volumes and although it does support "cinder manage", some related functions were missing, so it did not support "cinder manageable-list" or the related snapshot functions. Partial-Bug: #1922255 Depends-on: https://review.opendev.org/c/openstack/cinder/+/809968 Change-Id: I73958099b32e44e7e4875d0eba0e2c0096a12252
This commit is contained in:
parent
659ad0fde1
commit
bff1d26f6f
@ -110,6 +110,109 @@ response_ports = '''<RESPONSE>
|
||||
<PROPERTY name="status">Up</PROPERTY></OBJECT>
|
||||
</RESPONSE>'''
|
||||
|
||||
# mccli -x array show-volumes | egrep \
|
||||
# '(RESPONSE|OBJECT|volume-name|volume-type|serial-number|wwn| \
|
||||
# storage-pool-name|size-numeric|volume-parent)'
|
||||
response_vols = '''
|
||||
<RESPONSE VERSION="L100" REQUEST="show volumes">
|
||||
<OBJECT basetype="volumes" name="volume" oid="1" format="rows">
|
||||
<PROPERTY name="storage-pool-name" type="string">A</PROPERTY>
|
||||
<PROPERTY name="volume-name" type="string">bar</PROPERTY>
|
||||
<PROPERTY name="size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="total-size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="allocated-size-numeric" type="uint64">0</PROPERTY>
|
||||
<PROPERTY name="serial-number" key="true"
|
||||
type="string">00c0ff53a30500000323416101000000</PROPERTY>
|
||||
<PROPERTY name="read-ahead-size-numeric" type="uint32">-1</PROPERTY>
|
||||
<PROPERTY name="volume-type" type="string">base</PROPERTY>
|
||||
<PROPERTY name="volume-type-numeric" type="uint32">15</PROPERTY>
|
||||
<PROPERTY name="volume-parent" type="string"></PROPERTY>
|
||||
<PROPERTY name="wwn"
|
||||
type="string">600C0FF00053A3050323416101000000</PROPERTY>
|
||||
</OBJECT>
|
||||
<OBJECT basetype="volumes" name="volume" oid="2" format="rows">
|
||||
<PROPERTY name="storage-pool-name" type="string">A</PROPERTY>
|
||||
<PROPERTY name="volume-name" type="string">foo</PROPERTY>
|
||||
<PROPERTY name="size-numeric" type="uint64">8388608</PROPERTY>
|
||||
<PROPERTY name="total-size-numeric" type="uint64">8388608</PROPERTY>
|
||||
<PROPERTY name="allocated-size-numeric" type="uint64">0</PROPERTY>
|
||||
<PROPERTY name="serial-number" key="true"
|
||||
type="string">00c0ff53a3050000df513e6101000000</PROPERTY>
|
||||
<PROPERTY name="read-ahead-size-numeric" type="uint32">-1</PROPERTY>
|
||||
<PROPERTY name="volume-type" type="string">base</PROPERTY>
|
||||
<PROPERTY name="volume-type-numeric" type="uint32">15</PROPERTY>
|
||||
<PROPERTY name="volume-parent" type="string"></PROPERTY>
|
||||
<PROPERTY name="wwn"
|
||||
type="string">600C0FF00053A305DF513E6101000000</PROPERTY>
|
||||
</OBJECT>
|
||||
<OBJECT basetype="volumes" name="volume" oid="5" format="rows">
|
||||
<PROPERTY name="storage-pool-name" type="string">A</PROPERTY>
|
||||
<PROPERTY name="volume-name" type="string">snap</PROPERTY>
|
||||
<PROPERTY name="size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="total-size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="allocated-size-numeric" type="uint64">0</PROPERTY>
|
||||
<PROPERTY name="serial-number" key="true"
|
||||
type="string">00c0ff53a3050000fbc5416101000000</PROPERTY>
|
||||
<PROPERTY name="read-ahead-size-numeric" type="uint32">-1</PROPERTY>
|
||||
<PROPERTY name="volume-type" type="string">snapshot</PROPERTY>
|
||||
<PROPERTY name="volume-type-numeric" type="uint32">13</PROPERTY>
|
||||
<PROPERTY name="volume-parent"
|
||||
type="string">00c0ff53a30500000323416101000000</PROPERTY>
|
||||
<PROPERTY name="wwn"
|
||||
type="string">600C0FF00053A305FBC5416101000000</PROPERTY>
|
||||
</OBJECT>
|
||||
<OBJECT basetype="volumes" name="volume" oid="6" format="rows">
|
||||
<PROPERTY name="storage-pool-name" type="string">A</PROPERTY>
|
||||
<PROPERTY name="volume-name" type="string">vqoINx4UbS-Cno3gDq1V</PROPERTY>
|
||||
<PROPERTY name="size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="total-size-numeric" type="uint64">2097152</PROPERTY>
|
||||
<PROPERTY name="allocated-size-numeric" type="uint64">0</PROPERTY>
|
||||
<PROPERTY name="serial-number" key="true"
|
||||
type="string">00c0ff53a305000024c6416101000000</PROPERTY>
|
||||
<PROPERTY name="read-ahead-size-numeric" type="uint32">-1</PROPERTY>
|
||||
<PROPERTY name="volume-type" type="string">base</PROPERTY>
|
||||
<PROPERTY name="volume-type-numeric" type="uint32">15</PROPERTY>
|
||||
<PROPERTY name="volume-parent" type="string"></PROPERTY>
|
||||
<PROPERTY name="wwn"
|
||||
type="string">600C0FF00053A30524C6416101000000</PROPERTY>
|
||||
</OBJECT>
|
||||
<OBJECT basetype="status" name="status" oid="7">
|
||||
</OBJECT>
|
||||
</RESPONSE>
|
||||
'''
|
||||
|
||||
response_maps = '''
|
||||
<RESPONSE VERSION="L100" REQUEST="show maps">
|
||||
<OBJECT basetype="volume-view" name="volume-view" oid="1" format="labeled">
|
||||
<PROPERTY name="volume-serial" key="true"
|
||||
type="string">00c0ff53a30500000323416101000000</PROPERTY>
|
||||
<PROPERTY name="volume-name" type="string">bar</PROPERTY>
|
||||
</OBJECT>
|
||||
</RESPONSE>
|
||||
'''
|
||||
|
||||
# The two XML samples above will produce the following result from
|
||||
# get_manageable_volumes():
|
||||
#
|
||||
# [{'cinder_id': None,
|
||||
# 'extra_info': None,
|
||||
# 'reason_not_safe': 'volume in use',
|
||||
# 'reference': {'source-name': 'bar'},
|
||||
# 'safe_to_manage': False,
|
||||
# 'size': 1},
|
||||
# {'cinder_id': 'aa820dc7-851b-4be0-a7a3-7803ab555495',
|
||||
# 'extra_info': None,
|
||||
# 'reason_not_safe': 'already managed',
|
||||
# 'reference': {'source-name': 'vqoINx4UbS-Cno3gDq1V'},
|
||||
# 'safe_to_manage': False,
|
||||
# 'size': 1},
|
||||
# {'cinder_id': None,
|
||||
# 'extra_info': None,
|
||||
# 'reason_not_safe': None,
|
||||
# 'reference': {'source-name': 'foo'},
|
||||
# 'safe_to_manage': True,
|
||||
# 'size': 4}]
|
||||
|
||||
response_ports_linear = response_ports % {'ip': 'primary-ip-address'}
|
||||
response_ports_virtual = response_ports % {'ip': 'ip-address'}
|
||||
|
||||
@ -354,6 +457,16 @@ class FakeConfiguration2(FakeConfiguration1):
|
||||
use_chap_auth = None
|
||||
|
||||
|
||||
class fake(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
for d in args:
|
||||
self.update(d)
|
||||
self.update(kwargs)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return self[attr]
|
||||
|
||||
|
||||
class TestFCSeagateCommon(test.TestCase):
|
||||
def setUp(self):
|
||||
super(TestFCSeagateCommon, self).setUp()
|
||||
@ -612,9 +725,8 @@ class TestFCSeagateCommon(test.TestCase):
|
||||
{'capabilities': {}})
|
||||
self.assertEqual((False, None), ret)
|
||||
|
||||
@mock.patch.object(STXCommon, '_get_vol_name')
|
||||
@mock.patch.object(STXClient, 'modify_volume_name')
|
||||
def test_manage_existing(self, mock_modify, mock_volume):
|
||||
def test_manage_existing(self, mock_modify):
|
||||
existing_ref = {'source-name': 'xxxx'}
|
||||
mock_modify.side_effect = [stx_exception.RequestError, None]
|
||||
self.assertRaises(exception.Invalid, self.common.manage_existing,
|
||||
@ -632,6 +744,82 @@ class TestFCSeagateCommon(test.TestCase):
|
||||
ret = self.common.manage_existing_get_size(None, existing_ref)
|
||||
self.assertEqual(1, ret)
|
||||
|
||||
@mock.patch.object(STXClient, 'modify_volume_name')
|
||||
@mock.patch.object(STXClient, '_request')
|
||||
def test_manage_existing_snapshot(self, mock_response, mock_modify):
|
||||
fake_snap = fake(test_snap)
|
||||
mock_response.side_effect = [etree.XML(response_maps),
|
||||
etree.XML(response_vols)]
|
||||
snap_ref = {'source-name': 'snap'}
|
||||
ret = self.common.manage_existing_snapshot(fake_snap, snap_ref)
|
||||
self.assertIsNone(ret)
|
||||
newname = self.common._get_snap_name(test_snap['id'])
|
||||
mock_modify.assert_called_with('snap', newname)
|
||||
|
||||
@mock.patch.object(STXClient, 'get_volume_size')
|
||||
def test_manage_existing_snapshot_get_size(self, mock_volume):
|
||||
existing_ref = {'source-name': 'xxxx'}
|
||||
mock_volume.side_effect = [stx_exception.RequestError, 1]
|
||||
self.assertRaises(exception.Invalid,
|
||||
self.common.manage_existing_get_size,
|
||||
None, existing_ref)
|
||||
ret = self.common.manage_existing_snapshot_get_size(None, existing_ref)
|
||||
self.assertEqual(1, ret)
|
||||
|
||||
@mock.patch.object(STXClient, '_request')
|
||||
def test_get_manageable_volumes(self, mock_response):
|
||||
mock_response.side_effect = [etree.XML(response_maps),
|
||||
etree.XML(response_vols)]
|
||||
cinder_volumes = [fake(id='aa820dc7-851b-4be0-a7a3-7803ab555495')]
|
||||
marker = None
|
||||
limit = 1000
|
||||
offset = 0
|
||||
sort_keys = ['size']
|
||||
sort_dirs = ['asc']
|
||||
ret = self.common.get_manageable_volumes(cinder_volumes,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
# We expect to get back 3 volumes: one manageable,
|
||||
# one already managed by Cinder, and one mapped (hence unmanageable)
|
||||
self.assertEqual(len(ret), 3)
|
||||
reasons_not_seen = {'volume in use', 'already managed'}
|
||||
manageable_vols = 0
|
||||
for vol in ret:
|
||||
if vol['reason_not_safe']:
|
||||
reasons_not_seen.discard(vol['reason_not_safe'])
|
||||
self.assertGreaterEqual(len(vol['reference']['source-name']), 3)
|
||||
if vol['safe_to_manage']:
|
||||
manageable_vols += 1
|
||||
self.assertIsNone(vol.get('cinder-id'))
|
||||
else:
|
||||
self.assertIsNotNone(vol['reason_not_safe'])
|
||||
|
||||
self.assertEqual(0, len(reasons_not_seen))
|
||||
self.assertEqual(1, manageable_vols)
|
||||
|
||||
@mock.patch.object(STXClient, '_request')
|
||||
def test_get_manageable_snapshots(self, mock_response):
|
||||
mock_response.side_effect = [etree.XML(response_maps),
|
||||
etree.XML(response_vols)]
|
||||
cinder_volumes = [fake(id='aa820dc7-851b-4be0-a7a3-7803ab555495')]
|
||||
marker = None
|
||||
limit = 1000
|
||||
offset = 0
|
||||
sort_keys = ['size']
|
||||
sort_dirs = ['asc']
|
||||
ret = self.common.get_manageable_snapshots(cinder_volumes,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
self.assertEqual(ret, [{
|
||||
'cinder_id': None,
|
||||
'extra_info': None,
|
||||
'reason_not_safe': None,
|
||||
'reference': {'source-name': 'snap'},
|
||||
'safe_to_manage': True,
|
||||
'size': 1,
|
||||
'source_reference': {'source-name': 'bar'}
|
||||
}])
|
||||
|
||||
|
||||
class TestISCSISeagateCommon(TestFCSeagateCommon):
|
||||
def setUp(self):
|
||||
|
@ -703,3 +703,62 @@ class STXClient(object):
|
||||
LOG.debug("Array firmware is %s (%s%d)\n",
|
||||
s, self._fw_type, self._fw_rev)
|
||||
return s
|
||||
|
||||
def get_volumes(self, filter_type=None):
|
||||
"""Get a list of volumes and snapshots"""
|
||||
|
||||
serial_number_to_name = {}
|
||||
result = {}
|
||||
|
||||
# first get volume mappings so we note which volumes are mapped
|
||||
mapping_list = self._request("/show/maps").xpath(
|
||||
"//OBJECT[@name='volume-view']")
|
||||
maps = {}
|
||||
for m in mapping_list:
|
||||
for el in m:
|
||||
if el.attrib['name'] == 'volume-name':
|
||||
maps[el.text] = 1
|
||||
|
||||
volume_list = self._request("/show/volumes").xpath(
|
||||
"//OBJECT[@name='volume']")
|
||||
for v in volume_list:
|
||||
vol = {}
|
||||
for el in v:
|
||||
key = el.attrib['name']
|
||||
value = el.text
|
||||
vol[key] = value
|
||||
|
||||
name = vol['volume-name']
|
||||
type = vol['volume-type']
|
||||
if type == 'base':
|
||||
type = 'volume'
|
||||
sn = vol['serial-number']
|
||||
wwn = vol['wwn']
|
||||
pool = vol['storage-pool-name']
|
||||
size = int((int(vol['size-numeric']) * 512) / 2**30)
|
||||
mapped = name in maps
|
||||
parent_sn = vol['volume-parent']
|
||||
|
||||
serial_number_to_name[sn] = name
|
||||
|
||||
if filter_type:
|
||||
if type != filter_type:
|
||||
continue
|
||||
|
||||
result[name] = {
|
||||
'name': name, # 32-byte array volume name
|
||||
'type': type, # 'volume' or 'snapshot'
|
||||
'size': size, # size in GiB (int)
|
||||
'serial': sn, # serial number
|
||||
'wwn': wwn, # world wide name
|
||||
'pool': pool, # storage pool name
|
||||
'mapped': mapped, # is mapped?
|
||||
'parent_serial': parent_sn, # parent serial number, or None
|
||||
}
|
||||
|
||||
# Now that we've seen all the volumes, we can map the parent serial
|
||||
# number to a name.
|
||||
for v in result.values():
|
||||
v['parent'] = serial_number_to_name.get(v['parent_serial'], None)
|
||||
|
||||
return result
|
||||
|
@ -127,6 +127,10 @@ class STXCommon(object):
|
||||
snapshot_name = self._encode_name(snapshot_id)
|
||||
return "s%s" % snapshot_name
|
||||
|
||||
def _get_backend_volume_name(self, id, type='volume'):
|
||||
name = self._encode_name(id)
|
||||
return "%s%s" % (type[0], name)
|
||||
|
||||
def _encode_name(self, name):
|
||||
"""Get converted array volume name.
|
||||
|
||||
@ -509,12 +513,34 @@ class STXCommon(object):
|
||||
finally:
|
||||
self.client_logout()
|
||||
|
||||
def manage_existing_snapshot(self, snapshot, existing_ref):
|
||||
"""Import an existing snapshot into Cinder."""
|
||||
|
||||
old_snap_name = existing_ref['source-name']
|
||||
new_snap_name = self._get_snap_name(snapshot.id)
|
||||
LOG.info("Renaming existing snapshot %(old_name)s to "
|
||||
"%(new_name)s", {"old_name": old_snap_name,
|
||||
"new_name": new_snap_name})
|
||||
|
||||
self.client_login()
|
||||
try:
|
||||
self.client.modify_volume_name(old_snap_name,
|
||||
new_snap_name)
|
||||
except stx_exception.RequestError as ex:
|
||||
LOG.exception("Error managing existing snapshot.")
|
||||
raise exception.Invalid(ex)
|
||||
finally:
|
||||
self.client_logout()
|
||||
|
||||
return None
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Return size of volume to be managed by manage_existing.
|
||||
|
||||
existing_ref is a dictionary of the form:
|
||||
{'source-name': <name of the volume>}
|
||||
"""
|
||||
|
||||
target_vol_name = existing_ref['source-name']
|
||||
|
||||
self.client_login()
|
||||
@ -526,3 +552,69 @@ class STXCommon(object):
|
||||
raise exception.Invalid(ex)
|
||||
finally:
|
||||
self.client_logout()
|
||||
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
||||
"""Return size of volume to be managed by manage_existing."""
|
||||
return self.manage_existing_get_size(snapshot, existing_ref)
|
||||
|
||||
def _get_manageable_vols(self, cinder_resources, resource_type,
|
||||
marker, limit, offset, sort_keys,
|
||||
sort_dirs):
|
||||
"""List volumes or snapshots on the backend."""
|
||||
|
||||
# We can't translate a backend volume name into a Cinder id
|
||||
# directly, so we create a map to do it.
|
||||
volume_name_to_id = {}
|
||||
for resource in cinder_resources:
|
||||
key = self._get_backend_volume_name(resource['id'], resource_type)
|
||||
value = resource['id']
|
||||
volume_name_to_id[key] = value
|
||||
|
||||
self.client_login()
|
||||
try:
|
||||
vols = self.client.get_volumes(filter_type=resource_type)
|
||||
except stx_exception.RequestError as ex:
|
||||
LOG.exception("Error getting manageable volumes.")
|
||||
raise exception.Invalid(ex)
|
||||
finally:
|
||||
self.client_logout()
|
||||
|
||||
entries = []
|
||||
for vol in vols.values():
|
||||
vol_info = {'reference': {'source-name': vol['name']},
|
||||
'size': vol['size'],
|
||||
'cinder_id': None,
|
||||
'extra_info': None}
|
||||
|
||||
potential_id = volume_name_to_id.get(vol['name'])
|
||||
if potential_id:
|
||||
vol_info['safe_to_manage'] = False
|
||||
vol_info['reason_not_safe'] = 'already managed'
|
||||
vol_info['cinder_id'] = potential_id
|
||||
elif vol['mapped']:
|
||||
vol_info['safe_to_manage'] = False
|
||||
vol_info['reason_not_safe'] = '%s in use' % resource_type
|
||||
else:
|
||||
vol_info['safe_to_manage'] = True
|
||||
vol_info['reason_not_safe'] = None
|
||||
|
||||
if resource_type == 'snapshot':
|
||||
origin = vol['parent']
|
||||
vol_info['source_reference'] = {'source-name': origin}
|
||||
|
||||
entries.append(vol_info)
|
||||
|
||||
return volume_utils.paginate_entries_list(entries, marker, limit,
|
||||
offset, sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
return self._get_manageable_vols(cinder_volumes, 'volume',
|
||||
marker, limit,
|
||||
offset, sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
return self._get_manageable_vols(cinder_snapshots, 'snapshot',
|
||||
marker, limit,
|
||||
offset, sort_keys, sort_dirs)
|
||||
|
@ -180,5 +180,27 @@ class STXFCDriver(cinder.volume.driver.FibreChannelDriver):
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
return self.common.manage_existing_get_size(volume, existing_ref)
|
||||
|
||||
def manage_existing_snapshot(self, snapshot, existing_ref):
|
||||
return self.common.manage_existing_snapshot(snapshot, existing_ref)
|
||||
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
||||
return self.common.manage_existing_snapshot_get_size(snapshot,
|
||||
existing_ref)
|
||||
|
||||
def unmanage(self, volume):
|
||||
pass
|
||||
|
||||
def unmanage_snapshot(self, snapshot):
|
||||
pass
|
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
return self.common.get_manageable_volumes(cinder_volumes,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit,
|
||||
offset, sort_keys, sort_dirs):
|
||||
return self.common.get_manageable_snapshots(cinder_snapshots,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
@ -206,5 +206,27 @@ class STXISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
return self.common.manage_existing_get_size(volume, existing_ref)
|
||||
|
||||
def manage_existing_snapshot(self, snapshot, existing_ref):
|
||||
return self.common.manage_existing_snapshot(snapshot, existing_ref)
|
||||
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
||||
return self.common.manage_existing_snapshot_get_size(snapshot,
|
||||
existing_ref)
|
||||
|
||||
def unmanage(self, volume):
|
||||
pass
|
||||
|
||||
def unmanage_snapshot(self, snapshot):
|
||||
pass
|
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
|
||||
sort_keys, sort_dirs):
|
||||
return self.common.get_manageable_volumes(cinder_volumes,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit,
|
||||
offset, sort_keys, sort_dirs):
|
||||
return self.common.get_manageable_snapshots(cinder_snapshots,
|
||||
marker, limit, offset,
|
||||
sort_keys, sort_dirs)
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
DellEMC PowerVault ME Series FC/iSCSI driver
|
||||
`bug #1922255 <https://bugs.launchpad.net/cinder/+bug/1922255>`_:
|
||||
Implement missing support for 'cinder manageable-list'.
|
Loading…
Reference in New Issue
Block a user