DS8K driver: specify pool and lss in extra-specs

DS8K driver needs two new properties in extra-specs, so that user
can specify pool or lss or both of them to allocate volume in their
expected area.

On the other hand, driver needs to verify the pool and lss user
set to avoid they set wrong pool or lss.

DocImpact
Implements: blueprint specify-pool-lss
Change-Id: Ibd98fa3246118aa6ad5d86ecbfe46ae78de87716
This commit is contained in:
Jia Min 2017-02-19 22:32:49 -07:00
parent c7f15a4fa5
commit b401355c6f
5 changed files with 612 additions and 199 deletions

View File

@ -50,6 +50,8 @@ TEST_LUN_ID = '00'
TEST_POOLS_STR = 'P0,P1'
TEST_POOL_ID_1 = 'P0'
TEST_POOL_ID_2 = 'P1'
TEST_POOL_NAME_1 = 'OPENSTACK_DEV_0'
TEST_POOL_NAME_2 = 'OPENSTACK_DEV_1'
TEST_SOURCE_DS8K_IP = '1.1.1.1'
TEST_TARGET_DS8K_IP = '2.2.2.2'
TEST_SOURCE_WWNN = '5000000000FFC111'
@ -67,6 +69,7 @@ TEST_PPRC_PATH_ID_2 = (TEST_TARGET_WWNN + "_" + TEST_LSS_ID_1 + ":" +
TEST_SOURCE_WWNN + "_" + TEST_LSS_ID_1)
TEST_ECKD_VOLUME_ID = '1001'
TEST_ECKD_POOL_ID = 'P10'
TEST_ECKD_POOL_NAME = 'OPENSTACK_DEV_10'
TEST_LCU_ID = '10'
TEST_ECKD_PPRC_PATH_ID = (TEST_SOURCE_WWNN + "_" + TEST_LCU_ID + ":" +
TEST_TARGET_WWNN + "_" + TEST_LCU_ID)
@ -426,7 +429,7 @@ FAKE_GET_POOL_RESPONSE_1 = {
[
{
"id": TEST_POOL_ID_1,
"name": "P0_OpenStack",
"name": TEST_POOL_NAME_1,
"node": "0",
"stgtype": "fb",
"cap": "10737418240",
@ -448,7 +451,7 @@ FAKE_GET_POOL_RESPONSE_2 = {
[
{
"id": TEST_POOL_ID_2,
"name": "P1_OpenStack",
"name": TEST_POOL_NAME_2,
"node": "1",
"stgtype": "fb",
"cap": "10737418240",
@ -470,7 +473,7 @@ FAKE_GET_ECKD_POOL_RESPONSE = {
[
{
"id": TEST_ECKD_POOL_ID,
"name": "P10_OpenStack",
"name": TEST_ECKD_POOL_NAME,
"node": "0",
"stgtype": "ckd",
"cap": "10737418240",
@ -1237,7 +1240,7 @@ class DS8KProxyTest(test.TestCase):
"""create volume should choose biggest pool."""
self.configuration.san_clustername = TEST_POOLS_STR
cmn_helper = FakeDS8KCommonHelper(self.configuration, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set())
self.assertEqual(TEST_POOL_ID_1, pool_id)
@mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss')
@ -1251,7 +1254,7 @@ class DS8KProxyTest(test.TestCase):
"configvols": "0"
}]
cmn_helper = FakeDS8KCommonHelper(self.configuration, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set())
self.assertNotEqual(TEST_LSS_ID_1, lss_id)
@mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss')
@ -1266,7 +1269,7 @@ class DS8KProxyTest(test.TestCase):
"configvols": "0"
}]
cmn_helper = FakeDS8KCommonHelper(self.configuration, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set())
self.assertEqual(TEST_LSS_ID_2, lss_id)
@mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss')
@ -1296,7 +1299,7 @@ class DS8KProxyTest(test.TestCase):
}
]
cmn_helper = FakeDS8KCommonHelper(self.configuration, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set())
self.assertEqual(TEST_LSS_ID_2, lss_id)
@mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss')
@ -1312,7 +1315,7 @@ class DS8KProxyTest(test.TestCase):
"configvols": "256"
}]
cmn_helper = FakeDS8KCommonHelper(self.configuration, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, None)
pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set())
self.assertTrue(mock_find_lss.called)
@mock.patch.object(helper.DS8KCommonHelper, '_find_lss')
@ -1488,6 +1491,68 @@ class DS8KProxyTest(test.TestCase):
ast.literal_eval(vol['provider_location'])['vol_hex_id'])
self.assertEqual('050 FB 520UV', vol['metadata']['data_type'])
def test_create_volume_when_specify_area(self):
"""create volume and put it in specific pool and lss."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {
'drivers:storage_pool_ids': TEST_POOL_ID_1,
'drivers:storage_lss_ids': TEST_LSS_ID_1
})
volume = self._create_volume(volume_type_id=vol_type.id)
lun = ds8kproxy.Lun(volume)
pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set())
self.assertEqual(TEST_POOL_ID_1, pool)
self.assertEqual(TEST_LSS_ID_1, lss)
def test_create_volume_only_specify_lss(self):
"""create volume and put it in specific lss."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {
'drivers:storage_lss_ids': TEST_LSS_ID_1
})
volume = self._create_volume(volume_type_id=vol_type.id)
lun = ds8kproxy.Lun(volume)
pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set())
# if not specify pool, choose pools set in configuration file.
self.assertTrue(pool in self.configuration.san_clustername.split(','))
self.assertEqual(TEST_LSS_ID_1, lss)
def test_create_volume_only_specify_pool(self):
"""create volume and put it in specific pool."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {
'drivers:storage_pool_ids': TEST_POOL_ID_1
})
volume = self._create_volume(volume_type_id=vol_type.id)
lun = ds8kproxy.Lun(volume)
pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set())
self.assertEqual(TEST_POOL_ID_1, pool)
def test_create_volume_but_specify_wrong_lss_id(self):
"""create volume, but specify a wrong lss id."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {
'drivers:storage_pool_ids': TEST_POOL_ID_1,
'drivers:storage_lss_ids': '100'
})
volume = self._create_volume(volume_type_id=vol_type.id)
lun = ds8kproxy.Lun(volume)
self.assertRaises(exception.InvalidParameterValue,
self.driver._find_pool_lss_pair_from_spec,
lun, set())
@mock.patch.object(helper.DS8KCommonHelper, '_create_lun')
def test_create_eckd_volume(self, mock_create_lun):
"""create volume which type is ECKD."""
@ -2185,8 +2250,8 @@ class DS8KProxyTest(test.TestCase):
@mock.patch.object(eventlet, 'sleep')
@mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy')
def test_retype_from_thin_and_replicated_to_thick(self, mock_get_flashcopy,
mock_sleep):
def test_retype_thin_replicated_vol_to_thick_vol(self, mock_get_flashcopy,
mock_sleep):
"""retype from thin-provision and replicated to thick-provision."""
self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
@ -2217,7 +2282,10 @@ class DS8KProxyTest(test.TestCase):
self.ctxt, volume, new_type, diff, host)
self.assertTrue(retyped)
def test_retype_replicated_volume_from_thin_to_thick(self):
@mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy')
@mock.patch.object(eventlet, 'sleep')
def test_retype_replicated_volume_from_thin_to_thick(self, mock_sleep,
mock_get_flashcopy):
"""retype replicated volume from thin-provision to thick-provision."""
self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
@ -2243,9 +2311,136 @@ class DS8KProxyTest(test.TestCase):
provider_location=location,
replication_driver_data=data)
self.assertRaises(exception.CinderException, self.driver.retype,
mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}]
retyped, retype_model_update = self.driver.retype(
self.ctxt, volume, new_type, diff, host)
self.assertTrue(retyped)
@mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy')
@mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool')
@mock.patch.object(eventlet, 'sleep')
def test_retype_thin_vol_to_thick_vol_in_specific_area(
self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy):
"""retype thin volume to thick volume located in specific area."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
new_type = {}
diff = {
'encryption': {},
'qos_specs': {},
'extra_specs': {
'drivers:thin_provision': ('True', 'False'),
'drivers:storage_pool_ids': (None, TEST_POOL_ID_1),
'drivers:storage_lss_ids': (None, TEST_LSS_ID_1)
}
}
host = None
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
{'drivers:thin_provision': 'False'})
location = six.text_type({'vol_hex_id': '0400'})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location)
mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}]
mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1}
retyped, retype_model_update = self.driver.retype(
self.ctxt, volume, new_type, diff, host)
location = ast.literal_eval(retype_model_update['provider_location'])
self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2])
self.assertTrue(retyped)
@mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy')
@mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool')
@mock.patch.object(eventlet, 'sleep')
def test_retype_replicated_vol_to_vol_in_specific_area(
self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy):
"""retype replicated volume to a specific area."""
self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
new_type = {}
diff = {
'encryption': {},
'qos_specs': {},
'extra_specs': {
'replication_enabled': ('<is> True', '<is> True'),
'drivers:storage_pool_ids': (None, TEST_POOL_ID_1),
'drivers:storage_lss_ids': (None, TEST_LSS_ID_1)
}
}
host = None
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
{'replication_enabled': '<is> True'})
location = six.text_type({'vol_hex_id': '0400'})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location)
mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}]
mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1}
retyped, retype_model_update = self.driver.retype(
self.ctxt, volume, new_type, diff, host)
location = ast.literal_eval(retype_model_update['provider_location'])
self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2])
self.assertTrue(retyped)
def test_retype_vol_in_specific_area_to_another_area(self):
"""retype volume from a specific area to another area."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
new_type = {}
diff = {
'encryption': {},
'qos_specs': {},
'extra_specs': {
'drivers:storage_pool_ids': (TEST_POOL_ID_1, TEST_POOL_ID_2),
'drivers:storage_lss_ids': (TEST_LSS_ID_1, TEST_LSS_ID_2)
}
}
host = None
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {
'drivers:storage_pool_ids': TEST_POOL_ID_1,
'drivers:storage_lss_ids': TEST_LSS_ID_1})
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location)
self.assertRaises(exception.VolumeDriverException,
self.driver.retype,
self.ctxt, volume, new_type, diff, host)
def test_migrate_replicated_volume(self):
"""migrate replicated volume should be failed."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
self.driver._update_stats()
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
{'replication_enabled': '<is> True'})
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
data = json.dumps(
{TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location,
replication_driver_data=data)
backend = {
'host': 'host@backend#pool_id',
'capabilities': {
'extent_pools': TEST_POOL_ID_1,
'serial_number': TEST_SOURCE_SYSTEM_UNIT,
'vendor_name': 'IBM',
'storage_protocol': 'fibre_channel'
}
}
self.assertRaises(exception.VolumeDriverException,
self.driver.migrate_volume,
self.ctxt, volume, backend)
def test_migrate_and_try_pools_in_same_rank(self):
"""migrate volume and try pool in same rank."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,

View File

@ -14,6 +14,7 @@
# under the License.
#
import collections
import copy
import distutils.version as dist_version # pylint: disable=E0611
import eventlet
import math
@ -70,6 +71,7 @@ class DS8KCommonHelper(object):
self._storage_pools = None
self._disable_thin_provision = False
self._connection_type = self._get_value('connection_type')
self._existing_lss = None
self.backend = {}
self.setup()
@ -107,9 +109,10 @@ class DS8KCommonHelper(object):
self._get_storage_information()
self._check_host_type()
self.backend['pools_str'] = self._get_value('san_clustername')
self._storage_pools = self.get_pools()
self.verify_pools(self._storage_pools)
self._get_lss_ids_for_cg()
self._verify_version()
self._verify_pools()
def update_client(self):
self._client.close()
@ -202,7 +205,7 @@ class DS8KCommonHelper(object):
% {'invalid': self.backend['rest_version'],
'valid': self.VALID_REST_VERSION_5_7_MIN}))
def _verify_pools(self):
def verify_pools(self, storage_pools):
if self._connection_type == storage.XIV_CONNECTION_TYPE_FC:
ptype = 'fb'
elif self._connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
@ -210,30 +213,30 @@ class DS8KCommonHelper(object):
else:
raise exception.InvalidParameterValue(
err=_('Param [connection_type] is invalid.'))
self._storage_pools = self.get_pools()
for pid, p in self._storage_pools.items():
if p['stgtype'] != ptype:
for pid, pool in storage_pools.items():
if pool['stgtype'] != ptype:
LOG.error('The stgtype of pool %(pool)s is %(ptype)s.',
{'pool': pid, 'ptype': p['stgtype']})
{'pool': pid, 'ptype': pool['stgtype']})
raise exception.InvalidParameterValue(
err='Param [san_clustername] is invalid.')
@proxy.logger
def get_pools(self, new_pools=None):
if new_pools is None:
pools_str = self.backend['pools_str']
def get_pools(self, specific_pools=None):
if specific_pools:
pools_str = specific_pools.replace(' ', '').upper().split(',')
else:
pools_str = new_pools
pools_str = pools_str.replace(' ', '').upper().split(',')
pools_str = self.backend['pools_str'].replace(
' ', '').upper().split(',')
pools = self._get_pools(pools_str)
unsorted_pools = self._format_pools(pools)
storage_pools = collections.OrderedDict(sorted(
unsorted_pools, key=lambda i: i[1]['capavail'], reverse=True))
if new_pools is None:
self._storage_pools = storage_pools
return storage_pools
@proxy.logger
def update_storage_pools(self, storage_pools):
self._storage_pools = storage_pools
def _format_pools(self, pools):
return ((p['id'], {
'name': p['name'],
@ -243,6 +246,34 @@ class DS8KCommonHelper(object):
'capavail': int(p['capavail'])
}) for p in pools)
def verify_lss_ids(self, specified_lss_ids):
if not specified_lss_ids:
return None
lss_ids = specified_lss_ids.upper().replace(' ', '').split(',')
# verify LSS IDs.
for lss_id in lss_ids:
if int(lss_id, 16) > 255:
raise exception.InvalidParameterValue(
_('LSS %s should be within 00-FF.') % lss_id)
# verify address group
self._existing_lss = self.get_all_lss()
ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in
self._existing_lss if lss['type'] == 'ckd')
fb_addrgrps = set((int(lss, 16) // 16) for lss in lss_ids)
intersection = ckd_addrgrps & fb_addrgrps
if intersection:
raise exception.VolumeDriverException(
message=_('LSSes in the address group %s are reserved '
'for CKD volumes') % list(intersection))
# verify whether LSSs specified have been reserved for
# consistency group or not.
if self.backend['lss_ids_for_cg']:
for lss_id in lss_ids:
if lss_id in self.backend['lss_ids_for_cg']:
raise exception.InvalidParameterValue(
_('LSS %s has been reserved for CG.') % lss_id)
return lss_ids
@proxy.logger
def find_pool_lss_pair(self, pool, find_new_pid, excluded_lss):
if pool:
@ -259,35 +290,75 @@ class DS8KCommonHelper(object):
return self.find_biggest_pool_and_lss(excluded_lss)
@proxy.logger
def find_biggest_pool_and_lss(self, excluded_lss):
def find_biggest_pool_and_lss(self, excluded_lss, specified_pool_lss=None):
if specified_pool_lss:
# pool and lss should be verified every time user create volume or
# snapshot, because they can be changed in extra-sepcs at any time.
specified_pool_ids, specified_lss_ids = specified_pool_lss
storage_pools = self.get_pools(specified_pool_ids)
self.verify_pools(storage_pools)
storage_lss = self.verify_lss_ids(specified_lss_ids)
else:
storage_pools, storage_lss = self._storage_pools, None
# pools are ordered by capacity
for pool_id, pool in self._storage_pools.items():
lss = self._find_lss(pool['node'], excluded_lss)
for pool_id, pool in storage_pools.items():
lss = self._find_lss(pool['node'], excluded_lss, storage_lss)
if lss:
return pool_id, lss
raise restclient.LssIDExhaustError(
message=_("All LSS/LCU IDs for configured pools are exhausted."))
@proxy.logger
def _find_lss(self, node, excluded_lss):
fileds = ['id', 'type', 'addrgrp', 'group', 'configvols']
existing_lss = self.get_all_lss(fileds)
LOG.info("existing LSS IDs are: %s.",
def _find_lss(self, node, excluded_lss, specified_lss_ids=None):
if specified_lss_ids:
existing_lss = self._existing_lss
else:
existing_lss = self.get_all_lss()
LOG.info("Existing LSS IDs are: %s.",
','.join([lss['id'] for lss in existing_lss]))
existing_lss_cg, nonexistent_lss_cg = (
self._classify_lss_for_cg(existing_lss))
saved_existing_lss = copy.copy(existing_lss)
# exclude LSSs that are full.
if excluded_lss:
existing_lss = [lss for lss in existing_lss
if lss['id'] not in excluded_lss]
# exclude LSSs that reserved for CG.
candidates = [lss for lss in existing_lss
if lss['id'] not in existing_lss_cg]
lss = self._find_from_existing_lss(node, candidates)
if not lss:
lss = self._find_from_nonexistent_lss(node, existing_lss,
nonexistent_lss_cg)
existing_lss = [lss for lss in existing_lss
if lss['id'] not in excluded_lss]
if not existing_lss:
LOG.info("All LSSs are full.")
return None
# user specify LSSs in extra-specs.
if specified_lss_ids:
specified_lss_ids = [lss for lss in specified_lss_ids
if lss not in excluded_lss]
if specified_lss_ids:
existing_lss = [lss for lss in existing_lss
if lss['id'] in specified_lss_ids]
nonexistent_lss_ids = (set(specified_lss_ids) -
set(lss['id'] for lss in existing_lss))
lss = None
for lss_id in nonexistent_lss_ids:
if int(lss_id, 16) % 2 == node:
lss = lss_id
break
if not lss:
lss = self._find_from_existing_lss(
node, existing_lss, True)
else:
LOG.info("All appropriate LSSs specified are full.")
return None
else:
# exclude LSSs that reserved for CG.
if self.backend['lss_ids_for_cg']:
existing_lss_cg, nonexistent_lss_cg = (
self._classify_lss_for_cg(existing_lss))
existing_lss = [lss for lss in existing_lss
if lss['id'] not in existing_lss_cg]
else:
existing_lss_cg = set()
nonexistent_lss_cg = set()
lss = self._find_from_existing_lss(node, existing_lss)
if not lss:
lss = self._find_from_nonexistent_lss(node, saved_existing_lss,
nonexistent_lss_cg)
return lss
def _classify_lss_for_cg(self, existing_lss):
@ -296,12 +367,13 @@ class DS8KCommonHelper(object):
nonexistent_lss_cg = self.backend['lss_ids_for_cg'] - existing_lss_cg
return existing_lss_cg, nonexistent_lss_cg
def _find_from_existing_lss(self, node, existing_lss):
# exclude LSSs that are used by PPRC paths.
lss_in_pprc = self.get_lss_in_pprc_paths()
if lss_in_pprc:
existing_lss = [lss for lss in existing_lss
if lss['id'] not in lss_in_pprc]
def _find_from_existing_lss(self, node, existing_lss, ignore_pprc=False):
if not ignore_pprc:
# exclude LSSs that are used by PPRC paths.
lss_in_pprc = self.get_lss_in_pprc_paths()
if lss_in_pprc:
existing_lss = [lss for lss in existing_lss
if lss['id'] not in lss_in_pprc]
# exclude wrong type of LSSs and those that are not in expected node.
existing_lss = [lss for lss in existing_lss if lss['type'] == 'fb'
and int(lss['group']) == node]
@ -317,18 +389,18 @@ class DS8KCommonHelper(object):
return lss_id
def _find_from_nonexistent_lss(self, node, existing_lss, lss_cg=None):
addrgrps = set(int(lss['addrgrp'], 16) for lss in existing_lss if
lss['type'] == 'ckd' and int(lss['group']) == node)
fulllss = set(int(lss['id'], 16) for lss in existing_lss if
lss['type'] == 'fb' and int(lss['group']) == node)
cglss = set(int(lss, 16) for lss in lss_cg) if lss_cg else set()
ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in existing_lss if
lss['type'] == 'ckd' and int(lss['group']) == node)
full_lss = set(int(lss['id'], 16) for lss in existing_lss if
lss['type'] == 'fb' and int(lss['group']) == node)
cg_lss = set(int(lss, 16) for lss in lss_cg) if lss_cg else set()
# look for an available lss from nonexistent lss
lss_id = None
for lss in range(node, LSS_SLOTS, 2):
addrgrp = lss // 16
if (addrgrp not in addrgrps and
lss not in fulllss and
lss not in cglss):
if (addrgrp not in ckd_addrgrps and
lss not in full_lss and
lss not in cg_lss):
lss_id = ("%02x" % lss).upper()
break
LOG.info('_find_from_unexisting_lss: choose %s.', lss_id)
@ -705,14 +777,17 @@ class DS8KCommonHelper(object):
self._client.send(
'POST', '/cs/flashcopies/unfreeze', {"lss_ids": lss_ids})
def get_all_lss(self, fields):
def get_all_lss(self, fields=None):
fields = (fields if fields else
['id', 'type', 'group', 'configvols'])
return self._client.fetchall('GET', '/lss', fields=fields)
def lun_exists(self, lun_id):
return self._client.statusok('GET', '/volumes/%s' % lun_id)
def get_lun(self, lun_id):
return self._client.fetchone('GET', '/volumes/%s' % lun_id)
def get_lun_pool(self, lun_id):
return self._client.fetchone(
'GET', '/volumes/%s' % lun_id, fields=['pool'])['pool']
def change_lun(self, lun_id, param):
self._client.send('PUT', '/volumes/%s' % lun_id, param)
@ -795,8 +870,7 @@ class DS8KReplicationSourceHelper(DS8KCommonHelper):
@proxy.logger
def _find_lss_for_type_replication(self, node, excluded_lss):
# prefer to choose non-existing one first.
fileds = ['id', 'type', 'addrgrp', 'group', 'configvols']
existing_lss = self.get_all_lss(fileds)
existing_lss = self.get_all_lss()
LOG.info("existing LSS IDs are %s",
','.join([lss['id'] for lss in existing_lss]))
existing_lss_cg, nonexistent_lss_cg = (
@ -825,8 +899,9 @@ class DS8KReplicationTargetHelper(DS8KReplicationSourceHelper):
self._check_host_type()
self.backend['pools_str'] = self._get_value(
'san_clustername').replace('_', ',')
self._storage_pools = self.get_pools()
self.verify_pools(self._storage_pools)
self._verify_version()
self._verify_pools()
def _get_replication_information(self):
port_pairs = []
@ -845,8 +920,7 @@ class DS8KReplicationTargetHelper(DS8KReplicationSourceHelper):
@proxy.logger
def _find_lss_for_type_replication(self, node, excluded_lss):
# prefer to choose non-existing one first.
fileds = ['id', 'type', 'addrgrp', 'group', 'configvols']
existing_lss = self.get_all_lss(fileds)
existing_lss = self.get_all_lss()
LOG.info("existing LSS IDs are %s",
','.join([lss['id'] for lss in existing_lss]))
lss_id = self._find_from_nonexistent_lss(node, existing_lss)
@ -927,11 +1001,12 @@ class DS8KECKDHelper(DS8KCommonHelper):
self._check_host_type()
self._get_lss_ids_for_cg()
self.backend['pools_str'] = self._get_value('san_clustername')
self._storage_pools = self.get_pools()
self.verify_pools(self._storage_pools)
ssid_prefix = self._get_value('ds8k_ssid_prefix')
self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF'
self.backend['device_mapping'] = self._check_and_verify_lcus()
self.backend['device_mapping'] = self._get_device_mapping()
self._verify_version()
self._verify_pools()
def _verify_version(self):
if self.backend['storage_version'] == self.INVALID_STORAGE_VERSION:
@ -959,39 +1034,38 @@ class DS8KECKDHelper(DS8KCommonHelper):
in self.backend['rest_version'] else
self.VALID_REST_VERSION_5_8_MIN)}))
@proxy.logger
def _check_and_verify_lcus(self):
def _get_device_mapping(self):
map_str = self._get_value('ds8k_devadd_unitadd_mapping')
if not map_str:
raise exception.InvalidParameterValue(
err=_('Param [ds8k_devadd_unitadd_mapping] is not '
'provided, please provide the mapping between '
'IODevice address and unit address.'))
# verify the LCU
mappings = map_str.replace(' ', '').upper().split(';')
pairs = [m.split('-') for m in mappings]
dev_mapping = {p[1]: int(p[0], 16) for p in pairs}
for lcu in dev_mapping.keys():
self.verify_lss_ids(','.join([p[1] for p in pairs]))
return {p[1]: int(p[0], 16) for p in pairs}
@proxy.logger
def verify_lss_ids(self, specified_lcu_ids):
if not specified_lcu_ids:
return None
lcu_ids = specified_lcu_ids.upper().replace(' ', '').split(',')
# verify the LCU ID.
for lcu in lcu_ids:
if int(lcu, 16) > 255:
raise exception.InvalidParameterValue(
err=(_('LCU %s in param [ds8k_devadd_unitadd_mapping]'
'is invalid, it should be within 00-FF.') % lcu))
err=_('LCU %s should be within 00-FF.') % lcu)
# verify address group
all_lss = self.get_all_lss(['id', 'type'])
fb_lss = set(lss['id'] for lss in all_lss if lss['type'] == 'fb')
fb_addrgrp = set((int(lss, 16) // 16) for lss in fb_lss)
ckd_addrgrp = set((int(lcu, 16) // 16) for lcu in dev_mapping.keys())
intersection = ckd_addrgrp & fb_addrgrp
self._existing_lss = self.get_all_lss()
fb_addrgrps = set(int(lss['id'], 16) // 16 for lss in
self._existing_lss if lss['type'] == 'fb')
ckd_addrgrps = set((int(lcu, 16) // 16) for lcu in lcu_ids)
intersection = ckd_addrgrps & fb_addrgrps
if intersection:
raise exception.VolumeDriverException(
message=(_('LCUs which first digit is %s are invalid, they '
'are for FB volume.') % ', '.join(intersection)))
message=_('LCUs in the address group %s are reserved '
'for FB volumes') % list(intersection))
# create LCU that doesn't exist
ckd_lss = set(lss['id'] for lss in all_lss if lss['type'] == 'ckd')
nonexistent_lcu = set(dev_mapping.keys()) - ckd_lss
nonexistent_lcu = set(lcu_ids) - set(
lss['id'] for lss in self._existing_lss if lss['type'] == 'ckd')
if nonexistent_lcu:
LOG.info('LCUs %s do not exist in DS8K, they will be '
'created.', ','.join(nonexistent_lcu))
@ -1001,9 +1075,9 @@ class DS8KECKDHelper(DS8KCommonHelper):
except restclient.APIException as e:
raise exception.VolumeDriverException(
message=(_('Can not create lcu %(lcu)s, '
'Exception= %(e)s.')
'Exception = %(e)s.')
% {'lcu': lcu, 'e': six.text_type(e)}))
return dev_mapping
return lcu_ids
def _format_pools(self, pools):
return ((p['id'], {
@ -1019,38 +1093,66 @@ class DS8KECKDHelper(DS8KCommonHelper):
return self.find_biggest_pool_and_lss(excluded_lss)
@proxy.logger
def _find_lss(self, node, excluded_lcu):
# all LCUs have existed, not like LSS
all_lss = self.get_all_lss(['id', 'type', 'group', 'configvols'])
existing_lcu = [lss for lss in all_lss if lss['type'] == 'ckd']
excluded_lcu = excluded_lcu or []
candidate_lcu = [lcu for lcu in existing_lcu if (
lcu['id'] in self.backend['device_mapping'].keys() and
lcu['id'] not in excluded_lcu and
lcu['group'] == str(node))]
def _find_lss(self, node, excluded_lcu, specified_lcu_ids=None):
# all LCUs have existed, unlike LSS.
if specified_lcu_ids:
for lcu_id in specified_lcu_ids:
if lcu_id not in self.backend['device_mapping'].keys():
raise exception.InvalidParameterValue(
err=_("LCU %s is not in parameter "
"ds8k_devadd_unitadd_mapping, "
"Please specify LCU in it, otherwise "
"driver can not attach volume.") % lcu_id)
all_lss = self._existing_lss
else:
all_lss = self.get_all_lss()
existing_lcu = [lcu for lcu in all_lss if
lcu['type'] == 'ckd' and
lcu['id'] in self.backend['device_mapping'].keys() and
lcu['group'] == six.text_type(node)]
LOG.info("All appropriate LCUs are %s.",
','.join([lcu['id'] for lcu in existing_lcu]))
# exclude full LCUs.
if excluded_lcu:
existing_lcu = [lcu for lcu in existing_lcu if
lcu['id'] not in excluded_lcu]
if not existing_lcu:
LOG.info("All appropriate LCUs are full.")
return None
ignore_pprc = False
if specified_lcu_ids:
# user specify LCUs in extra-specs.
existing_lcu = [lcu for lcu in existing_lcu
if lcu['id'] in specified_lcu_ids]
ignore_pprc = True
# exclude LCUs reserved for CG.
candidate_lcu = [lss for lss in candidate_lcu if lss['id']
not in self.backend['lss_ids_for_cg']]
if not candidate_lcu:
existing_lcu = [lcu for lcu in existing_lcu if lcu['id']
not in self.backend['lss_ids_for_cg']]
if not existing_lcu:
LOG.info("All appropriate LCUs have been reserved for "
"for consistency group.")
return None
# prefer to use LCU that is not in PPRC path first.
lcu_pprc = self.get_lss_in_pprc_paths() & set(
self.backend['device_mapping'].keys())
if lcu_pprc:
lcu_non_pprc = [
lcu for lcu in candidate_lcu if lcu['id'] not in lcu_pprc]
if lcu_non_pprc:
candidate_lcu = lcu_non_pprc
if not ignore_pprc:
# prefer to use LCU that is not in PPRC path first.
lcu_pprc = self.get_lss_in_pprc_paths() & set(
self.backend['device_mapping'].keys())
if lcu_pprc:
lcu_non_pprc = [
lcu for lcu in existing_lcu if lcu['id'] not in lcu_pprc]
if lcu_non_pprc:
existing_lcu = lcu_non_pprc
# get the lcu which has max number of empty slots
# return LCU which has max number of empty slots.
emptiest_lcu = sorted(
candidate_lcu, key=lambda i: int(i['configvols']))[0]
existing_lcu, key=lambda i: int(i['configvols']))[0]
if int(emptiest_lcu['configvols']) == LSS_VOL_SLOTS:
return None
return emptiest_lcu['id']
else:
return emptiest_lcu['id']
def _create_lcu(self, ssid_prefix, lcu):
self._client.send('POST', '/lss', {
@ -1098,11 +1200,12 @@ class DS8KReplicationTargetECKDHelper(DS8KECKDHelper,
self._check_host_type()
self.backend['pools_str'] = self._get_value(
'san_clustername').replace('_', ',')
self._storage_pools = self.get_pools()
self.verify_pools(self._storage_pools)
ssid_prefix = self._get_value('ds8k_ssid_prefix')
self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF'
self.backend['device_mapping'] = self._check_and_verify_lcus()
self.backend['device_mapping'] = self._get_device_mapping()
self._verify_version()
self._verify_pools()
def create_lun(self, lun):
volData = {

View File

@ -95,7 +95,9 @@ EXTRA_SPECS_DEFAULTS = {
'thin': True,
'replication_enabled': False,
'consistency': False,
'os400': ''
'os400': '',
'storage_pool_ids': '',
'storage_lss_ids': ''
}
ds8k_opts = [
@ -125,23 +127,35 @@ CONF.register_opts(ds8k_opts, group=configuration.SHARED_CONF_GROUP)
class Lun(object):
"""provide volume information for driver from volume db object."""
"""provide volume information for driver from volume db object.
Version history:
.. code-block:: none
1.0.0 - initial revision.
2.1.0 - Added support for specify pool and lss, also improve the code.
"""
VERSION = "2.1.0"
class FakeLun(object):
def __init__(self, lun, **overrides):
self.size = lun.size
self.os_id = 'fake_os_id'
self.os_id = lun.os_id
self.cinder_name = lun.cinder_name
self.is_snapshot = lun.is_snapshot
self.ds_name = lun.ds_name
self.ds_id = None
self.ds_id = lun.ds_id
self.type_thin = lun.type_thin
self.type_os400 = lun.type_os400
self.data_type = lun.data_type
self.type_replication = lun.type_replication
self.group = lun.group
if not self.is_snapshot and self.type_replication:
self.specified_pool = lun.specified_pool
self.specified_lss = lun.specified_lss
if not self.is_snapshot:
self.replica_ds_name = lun.replica_ds_name
self.replication_driver_data = (
lun.replication_driver_data.copy())
@ -149,6 +163,7 @@ class Lun(object):
self.pool_lss_pair = lun.pool_lss_pair
def update_volume(self, lun):
lun.data_type = self.data_type
volume_update = lun.get_volume_update()
volume_update['provider_location'] = six.text_type({
'vol_hex_id': self.ds_id})
@ -157,6 +172,9 @@ class Lun(object):
self.replication_driver_data)
volume_update['metadata']['replication'] = six.text_type(
self.replication_driver_data)
else:
volume_update.pop('replication_driver_data', None)
volume_update['metadata'].pop('replication', None)
volume_update['metadata']['vol_hex_id'] = self.ds_id
return volume_update
@ -169,11 +187,19 @@ class Lun(object):
).strip().upper()
self.type_thin = self.specs.get(
'drivers:thin_provision', '%s' % EXTRA_SPECS_DEFAULTS['thin']
).upper() == 'True'.upper()
).upper() == 'TRUE'
self.type_replication = self.specs.get(
'replication_enabled',
'<is> %s' % EXTRA_SPECS_DEFAULTS['replication_enabled']
).upper() == strings.METADATA_IS_TRUE
self.specified_pool = self.specs.get(
'drivers:storage_pool_ids',
EXTRA_SPECS_DEFAULTS['storage_pool_ids']
)
self.specified_lss = self.specs.get(
'drivers:storage_lss_ids',
EXTRA_SPECS_DEFAULTS['storage_lss_ids']
)
if volume.provider_location:
provider_location = ast.literal_eval(volume.provider_location)
@ -386,6 +412,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
'pools exist on the storage.')
LOG.error(msg)
raise exception.CinderException(message=msg)
self._helper.update_storage_pools(storage_pools)
else:
raise exception.VolumeDriverException(
message=(_('Backend %s is not initialized.')
@ -419,24 +446,34 @@ class DS8KProxy(proxy.IBMStorageProxy):
@proxy.logger
def _create_lun_helper(self, lun, pool=None, find_new_pid=True):
# DS8K supports ECKD ESE volume from 8.1
connection_type = self._helper.get_connection_type()
if connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
thin_provision = self._helper.get_thin_provision()
if lun.type_thin and thin_provision:
if lun.type_thin:
if self._helper.get_thin_provision():
msg = (_("Backend %s can not support ECKD ESE volume.")
% self._helper.backend['storage_unit'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if lun.type_replication:
msg = _("The primary or the secondary storage "
"can not support ECKD ESE volume.")
else:
msg = _("Backend can not support ECKD ESE volume.")
LOG.error(msg)
raise restclient.APIException(message=msg)
target_helper = self._replication._target_helper
# PPRC can not copy from ESE volume to standard volume
# or vice versa.
if target_helper.get_thin_provision():
msg = (_("Secondary storage %s can not support ECKD "
"ESE volume.")
% target_helper.backend['storage_unit'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# There is a time gap between find available LSS slot and
# lun actually occupies it.
excluded_lss = set()
while True:
try:
if lun.group and lun.group.consisgroup_enabled:
if lun.specified_pool or lun.specified_lss:
lun.pool_lss_pair = {
'source': self._find_pool_lss_pair_from_spec(
lun, excluded_lss)}
elif lun.group and lun.group.consisgroup_enabled:
lun.pool_lss_pair = {
'source': self._find_pool_lss_pair_for_cg(
lun, excluded_lss)}
@ -455,6 +492,17 @@ class DS8KProxy(proxy.IBMStorageProxy):
lun.pool_lss_pair['source'][1])
excluded_lss.add(lun.pool_lss_pair['source'][1])
def _find_pool_lss_pair_from_spec(self, lun, excluded_lss):
if lun.group and lun.group.consisgroup_enabled:
msg = _("No support for specifying pool or lss for "
"volumes that belong to consistency group.")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
pool, lss = self._helper.find_biggest_pool_and_lss(
excluded_lss, (lun.specified_pool, lun.specified_lss))
return (pool, lss)
@coordination.synchronized('{self.prefix}-consistency-group')
def _find_pool_lss_pair_for_cg(self, lun, excluded_lss):
lss_in_cache = self.consisgroup_cache.get(lun.group.id, set())
@ -640,7 +688,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
self._replication.extend_replica(lun, param)
self._replication.create_pprc_pairs(lun)
else:
raise exception.CinderException(
raise exception.VolumeDriverException(
message=(_("The volume %s has been failed over, it is "
"not suggested to extend it.") % lun.ds_id))
else:
@ -674,6 +722,11 @@ class DS8KProxy(proxy.IBMStorageProxy):
# volume not allowed to get here if cg or repl
# should probably check volume['status'] in ['available', 'in-use'],
# especially for flashcopy
lun = Lun(volume)
if lun.type_replication:
raise exception.VolumeDriverException(
message=_('Driver does not support migrate replicated '
'volume, it can be done via retype.'))
stats = self.meta['stat']
if backend['capabilities']['vendor_name'] != stats['vendor_name']:
raise exception.VolumeDriverException(_(
@ -684,8 +737,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
new_pools = self._helper.get_pools(
backend['capabilities']['extent_pools'])
lun = Lun(volume)
cur_pool_id = self._helper.get_lun(lun.ds_id)['pool']['id']
cur_pool_id = self._helper.get_lun_pool(lun.ds_id)['id']
cur_node = self._helper.get_storage_pools()[cur_pool_id]['node']
# try pools in same rank
@ -703,7 +755,6 @@ class DS8KProxy(proxy.IBMStorageProxy):
try:
new_lun = lun.shallow_copy()
self._create_lun_helper(new_lun, pid, False)
lun.data_type = new_lun.data_type
self._clone_lun(lun, new_lun)
volume_update = new_lun.update_volume(lun)
try:
@ -729,70 +780,114 @@ class DS8KProxy(proxy.IBMStorageProxy):
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def _get_volume_type(key, value):
def _check_extra_specs(key, value=None):
extra_specs = diff.get('extra_specs')
specific_type = extra_specs.get(key) if extra_specs else None
old_type = None
new_type = None
if specific_type:
old_type = (True if str(specific_type[0]).upper() == value
else False)
new_type = (True if str(specific_type[1]).upper() == value
else False)
else:
old_type = None
new_type = None
old_type, new_type = specific_type
if value:
old_type = (True if old_type and old_type.upper() == value
else False)
new_type = (True if new_type and new_type.upper() == value
else False)
return old_type, new_type
def _convert_thin_and_thick(lun, new_type):
lun = Lun(volume)
# check user specify pool or lss or not
old_specified_pool, new_specified_pool = _check_extra_specs(
'drivers:storage_pool_ids')
old_specified_lss, new_specified_lss = _check_extra_specs(
'drivers:storage_lss_ids')
# check thin or thick
old_type_thick, new_type_thick = _check_extra_specs(
'drivers:thin_provision', 'FALSE')
# check replication capability
old_type_replication, new_type_replication = _check_extra_specs(
'replication_enabled', strings.METADATA_IS_TRUE)
# start retype, please note that the order here is important
# because of rollback problem once failed to retype.
new_props = {}
if old_type_thick != new_type_thick:
new_props['type_thin'] = not new_type_thick
if (old_specified_pool == new_specified_pool and
old_specified_lss == new_specified_lss):
LOG.info("Same pool and lss.")
elif ((old_specified_pool or old_specified_lss) and
(new_specified_pool or new_specified_lss)):
raise exception.VolumeDriverException(
message=_("Retype does not support to move volume from "
"specified pool or lss to another specified "
"pool or lss."))
elif ((old_specified_pool is None and new_specified_pool) or
(old_specified_lss is None and new_specified_lss)):
storage_pools = self._helper.get_pools(new_specified_pool)
self._helper.verify_pools(storage_pools)
storage_lss = self._helper.verify_lss_ids(new_specified_lss)
vol_pool = self._helper.get_lun_pool(lun.ds_id)['id']
vol_lss = lun.ds_id[:2].upper()
# if old volume is in the specified LSS, but it is needed
# to be changed from thin to thick or vice versa, driver
# needs to make sure the new volume will be created in the
# specified LSS.
if ((storage_lss and vol_lss not in storage_lss) or
new_props.get('type_thin')):
new_props['specified_pool'] = new_specified_pool
new_props['specified_lss'] = new_specified_lss
elif vol_pool not in storage_pools.keys():
vol_node = int(vol_lss, 16) % 2
new_pool_id = None
for pool_id, pool in storage_pools.items():
if vol_node == pool['node']:
new_pool_id = pool_id
break
if new_pool_id:
self._helper.change_lun(lun.ds_id, {'pool': new_pool_id})
else:
raise exception.VolumeDriverException(
message=_("Can not change the pool volume allocated."))
new_lun = None
if new_props:
new_lun = lun.shallow_copy()
new_lun.type_thin = new_type
self._create_lun_helper(new_lun)
for key, value in new_props.items():
setattr(new_lun, key, value)
self._clone_lun(lun, new_lun)
volume_update = None
if new_lun:
# if new lun meets all requirements of retype sucessfully,
# exception happens during clean up can be ignored.
if new_type_replication:
new_lun.type_replication = True
new_lun = self._replication.enable_replication(new_lun, True)
elif old_type_replication:
new_lun.type_replication = False
try:
self._replication.delete_replica(lun)
except Exception:
pass
try:
self._helper.delete_lun(lun)
except Exception:
pass
lun.ds_id = new_lun.ds_id
lun.data_type = new_lun.data_type
lun.type_thin = new_type
return lun
lun = Lun(volume)
# check thin or thick
old_type_thin, new_type_thin = _get_volume_type(
'drivers:thin_provision', 'True'.upper())
# check replication capability
old_type_replication, new_type_replication = _get_volume_type(
'replication_enabled', strings.METADATA_IS_TRUE)
# start retype
if old_type_thin != new_type_thin:
if old_type_replication:
if not new_type_replication:
lun = self._replication.delete_replica(lun)
lun = _convert_thin_and_thick(lun, new_type_thin)
else:
raise exception.CinderException(
message=(_("The volume %s is in replication "
"relationship, it is not supported to "
"retype from thin to thick or vice "
"versa.") % lun.ds_id))
else:
lun = _convert_thin_and_thick(lun, new_type_thin)
if new_type_replication:
lun.type_replication = True
lun = self._replication.enable_replication(lun)
volume_update = new_lun.update_volume(lun)
else:
# if driver does not create new lun, don't delete source
# lun when failed to enable replication or delete replica.
if not old_type_replication and new_type_replication:
lun.type_replication = True
lun = self._replication.enable_replication(lun)
elif old_type_replication and not new_type_replication:
lun = self._replication.delete_replica(lun)
lun.type_replication = False
return True, lun.get_volume_update()
volume_update = lun.get_volume_update()
return True, volume_update
@proxy._trace_time
@proxy.logger
@ -935,7 +1030,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
new_lun = self._clone_lun_for_group(group, lun)
self._helper.delete_lun(lun)
volume_update = new_lun.update_volume(lun)
volume_update['id'] = lun.os_id
volume_update['id'] = new_lun.os_id
add_volumes_update.append(volume_update)
return add_volumes_update
@ -943,7 +1038,6 @@ class DS8KProxy(proxy.IBMStorageProxy):
lun.group = Group(group)
new_lun = lun.shallow_copy()
new_lun.type_replication = False
self._create_lun_helper(new_lun)
self._clone_lun(lun, new_lun)
return new_lun

View File

@ -374,7 +374,18 @@ class MetroMirrorManager(object):
class Replication(object):
"""Metro Mirror and Global Mirror will be used by it."""
"""Metro Mirror and Global Mirror will be used by it.
Version history:
.. code-block:: none
1.0.0 - initial revision.
2.1.0 - ignore exception during cleanup when creating or deleting
replica failed.
"""
VERSION = "2.1.0"
def __init__(self, source_helper, target_device):
self._source_helper = source_helper
@ -405,13 +416,6 @@ class Replication(object):
"%(secondary)s")
% {'primary': src_conn_type,
'secondary': tgt_conn_type}))
# PPRC can not copy from ESE volume to standard volume or vice versus.
if src_conn_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
src_thin = self._source_helper.get_thin_provision()
tgt_thin = self._target_helper.get_thin_provision()
if src_thin != tgt_thin:
self._source_helper.disable_thin_provision()
self._target_helper.disable_thin_provision()
def check_physical_links(self):
self._mm_manager.check_physical_links()
@ -480,19 +484,31 @@ class Replication(object):
self._mm_manager.create_pprc_pairs(lun)
except restclient.APIException:
with excutils.save_and_reraise_exception():
self.delete_replica(lun)
if delete_source:
self._source_helper.delete_lun(lun)
try:
self.delete_replica(lun)
if delete_source:
self._source_helper.delete_lun(lun)
except restclient.APIException as ex:
LOG.info("Failed to cleanup replicated volume %(id)s, "
"Exception: %(ex)s.",
{'id': lun.ds_id, 'ex': ex})
lun.replication_status = 'enabled'
return lun
@proxy.logger
def delete_replica(self, lun):
def delete_replica(self, lun, delete_source=False):
if lun.ds_id is not None:
try:
self._mm_manager.delete_pprc_pairs(lun)
self._delete_replica(lun)
except restclient.APIException as e:
if delete_source:
try:
self._source_helper.delete_lun(lun)
except restclient.APIException as ex:
LOG.info("Failed to delete source volume %(id)s, "
"Exception: %(ex)s.",
{'id': lun.ds_id, 'ex': ex})
raise exception.VolumeDriverException(
message=(_('Failed to delete the target volume for '
'volume %(volume)s, Exception: %(ex)s.')

View File

@ -0,0 +1,5 @@
---
features:
- DS8K driver adds two new properties into extra-specs so that
user can specify pool or lss or both of them to allocate volume
in their expected area.