NEC driver: implement manage/unmanage functions.

Implement manage/unmanage volume and manage/unmanage snapshot
functions for the NEC volume driver.

Change-Id: Ied1591768979f3d20c58c96160a0edf067e60522
Implements: blueprint nec-manage-unmanage
This commit is contained in:
Shunei Shiono 2017-12-06 15:37:44 +09:00
parent de4b1f1354
commit db7d054d33
5 changed files with 416 additions and 3 deletions

View File

@ -187,8 +187,8 @@ xml_out = '''
<UNIT name="LD Name">4T7JpyqI3UuPlKeT9D3VQF</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">RPL</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">SV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
@ -303,6 +303,14 @@ xml_out = '''
<SECTION name="Path List">
<UNIT name="Path">1000-0090-FAA0-786A</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0000</UNIT>
<UNIT name="LDN(h)">0005</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0001</UNIT>
<UNIT name="LDN(h)">0006</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="LD Set(iSCSI)">
<SECTION name="LD Set(iSCSI) Information">
@ -1163,3 +1171,182 @@ class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase):
self.newvol, 'available')
self.assertIsNone(update_data['_name_id'])
self.assertIsNone(update_data['provider_location'])
class ManageUnmanage_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_volume(self):
ld_ok_iv = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ok_bv = {'pool_num': 0, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
ld_ng_pool = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl1 = {'pool_num': 0, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 0, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_purp = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_volume(ld_ok_iv))
self.assertTrue(self._is_manageable_volume(ld_ok_bv))
self.assertFalse(self._is_manageable_volume(ld_ng_pool))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl1))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl2))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl3))
self.assertFalse(self._is_manageable_volume(ld_ng_purp))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_volumes(self):
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT',
volumes[2]['reference']['source-name'])
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['asc'])
self.assertEqual(' :2000000991020012000A',
volumes[0]['reference']['source-name'])
self.assertEqual(10, len(volumes))
volume = {'id': '46045673-41e7-44a7-9333-02f07feab04b'}
current_volumes = []
current_volumes.append(volume)
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertFalse(volumes[2]['safe_to_manage'])
self.assertFalse(volumes[3]['safe_to_manage'])
self.assertTrue(volumes[4]['safe_to_manage'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.manage_existing(self.newvol, volumes[4]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
' :20000009910200140009')
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Specified resource is already in-use.'):
self.manage_existing(self.newvol, volumes[3]['reference'])
volume = {'source-name': 'LX:yEUHrXa5AHMjOZZLb93eP'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing(self.newvol, volume)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_get_size(self):
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
size_in_gb = self.manage_existing_get_size(self.newvol,
volumes[3]['reference'])
self.assertEqual(10, size_in_gb)
class ManageUnmanage_Snap_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_Snap_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_snapshot(self):
ld_ok_sv1 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ok_sv2 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': '---'}
ld_ng_pool = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_rpl1 = {'pool_num': 1, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 1, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl4 = {'pool_num': 1, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv1))
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_pool))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl1))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl3))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl4))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_snapshots(self):
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
volumes = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.assertEqual('LX:4T7JpyqI3UuPlKeT9D3VQF',
volumes[0]['reference']['source-name'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.newsnap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
'LX:4T7JpyqI3UuPlKeT9D3VQF')
self.newsnap.volume_id = "AAAAAAAA"
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Snapshot source is unmatch.'):
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.get_bvname.return_value = "2000000991020012000C"
self.newsnap.volume_id = "00046058-d38e-7f60-67b7-59ed6422520c"
snap = {'source-name': ' :2000000991020012000B'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing_snapshot(self.newsnap, snap)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot_get_size(self):
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
size_in_gb = self.manage_existing_snapshot_get_size(
self.newsnap,
snaps[0]['reference'])
self.assertEqual(6, size_in_gb)

View File

@ -554,6 +554,17 @@ class MStorageISMCLI(object):
LOG.debug('snap/state:%s.', query_status)
return query_status
def get_bvname(self, svname):
cmd = ('iSMsc_query -sv %s -svflg ld -summary | '
'while builtin read line;do '
'if [[ "$line" =~ "LD Name" ]]; '
'then builtin echo "$line";fi;done'
% svname[3:])
out, err, status = self._execute(cmd)
query_status = out[15:39].strip()
return query_status
def set_io_limit(self, ldname, specs, force_delete=True):
if specs['upperlimit'] is not None:
upper = int(specs['upperlimit'], 10)

View File

@ -876,6 +876,15 @@ class MStorageVolumeCommon(object):
else:
specs['upperreport'] = None
def check_accesscontrol(self, ldsets, ld):
"""Check Logical disk is in-use or not."""
set_accesscontrol = False
for ldset in ldsets.values():
if ld['ldn'] in ldset['lds']:
set_accesscontrol = True
break
return set_accesscontrol
def validates_number(self, value):
return re.match(r'^(?![-+]0+$)[-+]?([1-9][0-9]*)?[0-9](\.[0-9]+)?$',
'%s' % value) and True or False

View File

@ -15,6 +15,7 @@
# under the License.
import random
import re
import traceback
from oslo_log import log as logging
@ -26,6 +27,7 @@ from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.nec import cli
from cinder.volume.drivers.nec import volume_common
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
@ -1450,6 +1452,206 @@ class MStorageDriver(volume_common.MStorageVolumeCommon):
self._cli.unbind(ldname)
LOG.debug('LD unbound. Name=%s.', ldname)
def _is_manageable_volume(self, ld):
if ld['RPL Attribute'] == '---':
return False
if ld['Purpose'] != '---' and 'BV' not in ld['RPL Attribute']:
return False
if ld['pool_num'] not in self._properties['pool_pools']:
return False
return True
def _is_manageable_snapshot(self, ld):
if ld['RPL Attribute'] == '---':
return False
if 'SV' not in ld['RPL Attribute']:
return False
if ld['pool_num'] not in self._properties['pool_backup_pools']:
return False
return True
def _reference_to_ldname(self, resource_type, volume, existing_ref):
if resource_type == 'volume':
ldname_format = self._properties['ld_name_format']
else:
ldname_format = self._properties['ld_backupname_format']
id_name = self.get_ldname(volume.id, ldname_format)
ref_name = existing_ref['source-name']
volid = re.search(
r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}',
ref_name)
if volid:
ref_name = self.get_ldname(volid.group(0), ldname_format)
return id_name, ref_name
def _get_manageable_resources(self, resource_type, cinder_volumes, marker,
limit, offset, sort_keys, sort_dirs):
entries = []
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
cinder_ids = [resource['id'] for resource in cinder_volumes]
for ld in lds.values():
if ((resource_type == 'volume' and
not self._is_manageable_volume(ld)) or
(resource_type == 'snapshot' and
not self._is_manageable_snapshot(ld))):
continue
ld_info = {'reference': {'source-name': ld['ldname']},
'size': ld['ld_capacity'],
'cinder_id': None,
'extra_info': None}
potential_id = volume_common.convert_to_id(ld['ldname'][3:])
if potential_id in cinder_ids:
ld_info['safe_to_manage'] = False
ld_info['reason_not_safe'] = 'already managed'
ld_info['cinder_id'] = potential_id
elif self.check_accesscontrol(ldsets, ld):
ld_info['safe_to_manage'] = False
ld_info['reason_not_safe'] = '%s in use' % resource_type
else:
ld_info['safe_to_manage'] = True
ld_info['reason_not_safe'] = None
if resource_type == 'snapshot':
bvname = self._cli.get_bvname(ld['ldname'])
bv_id = volume_common.convert_to_id(bvname)
ld_info['source_reference'] = {'source-name': bv_id}
entries.append(ld_info)
return volutils.paginate_entries_list(entries, marker, limit, offset,
sort_keys, sort_dirs)
def _manage_existing_get_size(self, resource_type, volume, existing_ref):
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
id_name, ref_name = self._reference_to_ldname(resource_type,
volume,
existing_ref)
if ref_name not in lds:
reason = _('Specified resource does not exist.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
ld = lds[ref_name]
return ld['ld_capacity']
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder."""
LOG.debug('get_manageable_volumes Start.')
return self._get_manageable_resources('volume',
cinder_volumes, marker, limit,
offset, sort_keys, sort_dirs)
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
"""
LOG.debug('manage_existing Start.')
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
newname, oldname = self._reference_to_ldname('volume',
volume,
existing_ref)
if self.check_accesscontrol(ldsets, lds[oldname]):
reason = _('Specified resource is already in-use.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
if lds[oldname]['pool_num'] not in self._properties['pool_pools']:
reason = _('Volume type is unmatched.')
raise exception.ManageExistingVolumeTypeMismatch(
existing_ref=existing_ref, reason=reason)
try:
self._cli.changeldname(None, newname, oldname)
except exception.CinderException as e:
LOG.warning('Unable to manage existing volume '
'(reference = %(ref)s), (%(exception)s)',
{'ref': existing_ref['source-name'], 'exception': e})
return
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
LOG.debug('manage_existing_get_size Start.')
return self._manage_existing_get_size('volume', volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
pass
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
"""List snapshots on the backend available for management by Cinder."""
LOG.debug('get_manageable_snapshots Start.')
return self._get_manageable_resources('snapshot',
cinder_snapshots, marker, limit,
offset, sort_keys, sort_dirs)
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
Rename the backend storage object so that it matches the
snapshot['name'] which is how drivers traditionally map between a
cinder snapshot and the associated backend storage object.
"""
LOG.debug('manage_existing_snapshots Start.')
xml = self._cli.view_all(self._properties['ismview_path'])
pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
self.configs(xml))
newname, oldname = self._reference_to_ldname('snapshot',
snapshot,
existing_ref)
param_source = self.get_ldname(snapshot.volume_id,
self._properties['ld_name_format'])
ref_source = self._cli.get_bvname(oldname)
if param_source[3:] != ref_source:
reason = _('Snapshot source is unmatched.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
if (lds[oldname]['pool_num']
not in self._properties['pool_backup_pools']):
reason = _('Volume type is unmatched.')
raise exception.ManageExistingVolumeTypeMismatch(
existing_ref=existing_ref, reason=reason)
try:
self._cli.changeldname(None, newname, oldname)
except exception.CinderException as e:
LOG.warning('Unable to manage existing snapshot '
'(reference = %(ref)s), (%(exception)s)',
{'ref': existing_ref['source-name'], 'exception': e})
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing."""
LOG.debug('manage_existing_snapshot_get_size Start.')
return self._manage_existing_get_size('snapshot',
snapshot, existing_ref)
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management."""
pass
class MStorageDSVDriver(MStorageDriver):
"""M-Series Storage Snapshot helper class."""
@ -1522,7 +1724,7 @@ class MStorageDSVDriver(MStorageDriver):
ldname = self.get_ldname(snapshot.volume_id,
self._properties['ld_name_format'])
if ldname not in lds:
LOG.debug('LD(MV) `%s` already unbound?', ldname)
LOG.debug('LD(BV) `%s` already unbound?', ldname)
return
# get SV name.

View File

@ -0,0 +1,4 @@
---
features:
- Support manage/unmanage volume and manage/unmanage snapshot
functions for the NEC volume driver.