diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py index 0c9175ea4bf..c0dede1821d 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py @@ -351,7 +351,11 @@ class StorwizeSVCManagementSimulator(object): 'noconsistgrp', 'global', 'access', - 'start' + 'start', + 'thin', + 'removehostmappings', + 'removefcmaps', + 'removercrelationships' ] one_param_args = [ 'chapsecret', @@ -389,6 +393,9 @@ class StorwizeSVCManagementSimulator(object): 'cycleperiodseconds', 'masterchange', 'auxchange', + 'pool', + 'site', + 'buffersize', ] no_or_one_param_args = [ 'autoexpand', @@ -508,17 +515,19 @@ class StorwizeSVCManagementSimulator(object): # Print mostly made-up stuff in the correct syntax def _cmd_lssystem(self, **kwargs): - rows = [None] * 3 + rows = [None] * 4 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'storwize-svc-sim'] rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] + rows[3] = ['topology', ''] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lssystem_aux(self, **kwargs): - rows = [None] * 3 + rows = [None] * 4 rows[0] = ['id', 'ABCDEF0123456789'] rows[1] = ['name', 'aux-svc-sim'] rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] + rows[3] = ['topology', ''] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax, assume -bytes passed @@ -529,7 +538,7 @@ class StorwizeSVCManagementSimulator(object): 'vdisk_count', 'capacity', 'extent_size', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'overallocation', 'warning', - 'easy_tier', 'easy_tier_status']) + 'easy_tier', 'easy_tier_status', 'site_id']) for i in range(pool_num): row_data = [str(i + 1), self._flags['storwize_svc_volpool_name'][i], 'online', @@ -537,16 +546,24 @@ class StorwizeSVCManagementSimulator(object): '3573412790272', '256', '3529926246400', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', - 'inactive'] + 'inactive', ''] rows.append(row_data) rows.append([str(pool_num + 1), 'openstack2', 'online', '1', '0', '3573412790272', '256', '3529432325160', '1693247906775', '26843545600', - '38203734097', '47', '80', 'auto', 'inactive']) + '38203734097', '47', '80', 'auto', 'inactive', '']) rows.append([str(pool_num + 2), 'openstack3', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', - '38203734097', '47', '80', 'auto', 'inactive']) + '38203734097', '47', '80', 'auto', 'inactive', '']) + rows.append([str(pool_num + 3), 'hyperswap1', 'online', + '1', '0', '3573412790272', '256', + '3529432325160', '1693247906775', '26843545600', + '38203734097', '47', '80', 'auto', 'inactive', '1']) + rows.append([str(pool_num + 4), 'hyperswap2', 'online', + '1', '0', '3573412790272', '128', + '3529432325160', '1693247906775', '26843545600', + '38203734097', '47', '80', 'auto', 'inactive', '2']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: @@ -560,8 +577,12 @@ class StorwizeSVCManagementSimulator(object): row = each_row break elif pool_name == 'openstack2': - row = rows[-2] + row = rows[-4] elif pool_name == 'openstack3': + row = rows[-4] + elif pool_name == 'hyperswap1': + row = rows[-2] + elif pool_name == 'hyperswap2': row = rows[-1] else: return self._errors['CMMVC5754E'] @@ -598,17 +619,17 @@ class StorwizeSVCManagementSimulator(object): 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', - 'enclosure_serial_number'] + 'enclosure_serial_number', 'site_id'] rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', 'io_grp0', 'yes', '123456789ABCDEF0', '100', 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', - '0123ABC'] - rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0', + '0123ABC', '1'] + rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '1', 'io_grp0', 'no', '123456789ABCDEF1', '100', 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', - '0123ABC'] + '0123ABC', '2'] if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': rows[0].pop(2) @@ -1169,6 +1190,10 @@ port_speed!N/A host_info['iscsi_names'] = [] host_info['wwpns'] = [] + if 'site' in kwargs: + host_info['site_name'] = kwargs['site'].strip('\'\"') + else: + host_info['site_name'] = '' out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info @@ -1191,10 +1216,6 @@ port_speed!N/A # Change host properties def _cmd_chhost(self, **kwargs): - if 'chapsecret' not in kwargs: - return self._errors['CMMVC5707E'] - secret = kwargs['obj'].strip('\'\"') - if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') @@ -1202,7 +1223,17 @@ port_speed!N/A if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] - self._hosts_list[host_name]['chapsecret'] = secret + if 'chapsecret' in kwargs: + secret = kwargs['chapsecret'].strip('\'\"') + self._hosts_list[host_name]['chapsecret'] = secret + + if 'site' in kwargs: + site_name = kwargs['site'].strip('\'\"') + self._hosts_list[host_name]['site_name'] = site_name + + if 'chapsecret' not in kwargs and 'site' not in kwargs: + return self._errors['CMMVC5707E'] + return ('', '') # Remove a host @@ -1225,7 +1256,8 @@ port_speed!N/A def _cmd_lshost(self, **kwargs): if 'obj' not in kwargs: rows = [] - rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) + rows.append(['id', 'name', 'port_count', 'iogrp_count', + 'status', 'site_name']) found = False # Sort hosts by names to give predictable order for tests @@ -1236,7 +1268,7 @@ port_speed!N/A if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): rows.append([host['id'], host['host_name'], '1', '4', - 'offline']) + 'offline', host['site_name']]) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) @@ -1263,6 +1295,7 @@ port_speed!N/A rows.append(['mask', '1111']) rows.append(['iogrp_count', '4']) rows.append(['status', 'online']) + rows.append(['site_name', host['site_name']]) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) @@ -1447,6 +1480,7 @@ port_speed!N/A fcmap_info['progress'] = '0' fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False fcmap_info['status'] = 'idle_or_copied' + fcmap_info['rc_controlled'] = 'no' # Add fcmap to consistency group if 'consistgrp' in kwargs: @@ -1607,7 +1641,7 @@ port_speed!N/A source['name'], target['id'], target['name'], '', '', v['status'], v['progress'], v['copyrate'], '100', 'off', '', '', 'no', '', - 'no']) + v['rc_controlled']]) for d in to_delete: del self._fcmappings_list[d] @@ -1962,6 +1996,7 @@ port_speed!N/A host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = connector['host'] host_info['iscsi_names'] = [] + host_info['site_name'] = '' host_info['wwpns'] = [] if 'initiator' in connector: host_info['iscsi_names'].append(connector['initiator']) @@ -2331,7 +2366,8 @@ port_speed!N/A if 'cluster' not in kwargs: return self._errors['CMMVC5707E'] aux_cluster = kwargs['cluster'].strip('\'\"') - if aux_cluster != aux_sys['name']: + if (aux_cluster != aux_sys['name'] and + aux_cluster != master_sys['name']): return self._errors['CMMVC5754E'] rccg_info = {} @@ -2651,6 +2687,320 @@ port_speed!N/A msg = _("The copy should be primary or secondary") raise exception.InvalidInput(reason=msg) + def create_site_volume_and_fcmapping(self, kwargs, name, sitepool, + fcmapping=False, source=None): + + sitepool_id = self._get_mdiskgrp_id(sitepool) + site_volume_info = {} + site_volume_info['id'] = self._find_unused_id(self._volumes_list) + site_volume_info['uid'] = ('ABCDEF' * 3) + ( + '0' * 14) + site_volume_info['id'] + + site_volume_info['mdisk_grp_name'] = sitepool + site_volume_info['mdisk_grp_id'] = str(sitepool_id) + + if 'name' in kwargs or 'obj' in kwargs: + site_volume_info['name'] = name + else: + site_volume_info['name'] = name + site_volume_info['id'] + # Assume size and unit are given, store it in bytes + if "size" in kwargs: + capacity = int(kwargs['size']) + unit = kwargs['unit'] + site_volume_info['capacity'] = self._convert_units_bytes( + capacity, unit) + else: + site_volume_info['capacity'] = source['capacity'] + site_volume_info['IO_group_id'] = '0' + site_volume_info['IO_group_name'] = 'io_grp0' + site_volume_info['RC_name'] = '' + site_volume_info['RC_id'] = '' + + if 'buffersize' in kwargs: + site_volume_info['formatted'] = 'no' + # Fake numbers + site_volume_info['used_capacity'] = '786432' + site_volume_info['real_capacity'] = '21474816' + site_volume_info['free_capacity'] = '38219264' + if 'warning' in kwargs: + site_volume_info['warning'] = kwargs['warning'].rstrip('%') + else: + site_volume_info['warning'] = '80' + + if 'noautoexpand' in kwargs: + site_volume_info['autoexpand'] = 'off' + else: + site_volume_info['autoexpand'] = 'on' + + if 'compressed' in kwargs: + site_volume_info['compressed_copy'] = 'yes' + else: + site_volume_info['compressed_copy'] = 'no' + + if 'thin' in kwargs: + site_volume_info['formatted'] = 'no' + # Fake numbers + site_volume_info['used_capacity'] = '786432' + site_volume_info['real_capacity'] = '21474816' + site_volume_info['free_capacity'] = '38219264' + if 'grainsize' in kwargs: + site_volume_info['grainsize'] = kwargs['grainsize'] + else: + site_volume_info['grainsize'] = '32' + else: + site_volume_info['used_capacity'] = site_volume_info['capacity'] + site_volume_info['real_capacity'] = site_volume_info['capacity'] + site_volume_info['free_capacity'] = '0' + site_volume_info['warning'] = '' + site_volume_info['autoexpand'] = '' + site_volume_info['grainsize'] = '' + site_volume_info['compressed_copy'] = 'no' + site_volume_info['formatted'] = 'yes' + + vol_cp = {'id': '0', + 'status': 'online', + 'sync': 'yes', + 'primary': 'yes', + 'mdisk_grp_id': str(sitepool_id), + 'mdisk_grp_name': sitepool, + 'easy_tier': 'on', + 'compressed_copy': site_volume_info['compressed_copy']} + site_volume_info['copies'] = {'0': vol_cp} + + if site_volume_info['name'] in self._volumes_list: + return self._errors['CMMVC6035E'] + else: + self._volumes_list[site_volume_info['name']] = site_volume_info + + # create a flashcopy mapping for site volume and site flashcopy volume + if fcmapping: + site_fcmap_info = {} + site_fcmap_info['source'] = source['name'] + site_fcmap_info['target'] = site_volume_info['name'] + site_fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) + site_fcmap_info['name'] = 'fcmap' + site_fcmap_info['id'] + site_fcmap_info['copyrate'] = '50' + site_fcmap_info['progress'] = '0' + site_fcmap_info['autodelete'] = (True if 'autodelete' in kwargs + else False) + site_fcmap_info['status'] = 'idle_or_copied' + site_fcmap_info['rc_controlled'] = 'yes' + + self._fcmappings_list[site_fcmap_info['id']] = site_fcmap_info + + return site_volume_info + + def _cmd_mkvolume(self, **kwargs): + pool = kwargs['pool'].strip('\'\"') + pool_split = pool.split(':') + if len(pool_split) != 2: + raise exception.InvalidInput( + reason=_('pool %s is invalid for hyperswap ' + 'volume') % kwargs['pool']) + else: + site1pool = pool_split[0] + site2pool = pool_split[1] + + if pool == kwargs['pool']: + raise exception.InvalidInput( + reason=_('pool missing quotes %s') % kwargs['pool']) + + if 'name' in kwargs: + site1name = kwargs['name'].strip('\'\"') + site1fcname = 'fcsite1' + kwargs['name'].strip('\'\"') + site2name = 'site2' + kwargs['name'].strip('\'\"') + site2fcname = 'fcsite2' + kwargs['name'].strip('\'\"') + else: + site1name = 'vdisk' + site1fcname = 'fcsite1vdisk' + site2name = 'site2vdisk' + site2fcname = 'fcsite2vdisk' + + # create hyperswap volume on site1 + site1_volume_info = self.create_site_volume_and_fcmapping( + kwargs, site1name, site1pool, False, None) + # create flashcopy volume on site1 + self.create_site_volume_and_fcmapping(kwargs, site1fcname, site1pool, + True, site1_volume_info) + # create hyperswap volume on site2 + site2_volume_info = self.create_site_volume_and_fcmapping( + kwargs, site2name, site2pool, False, site1_volume_info) + # create flashcopy volume on site2 + self.create_site_volume_and_fcmapping(kwargs, site2fcname, site2pool, + True, site2_volume_info) + + # Create remote copy for site1volume and site2volume + master_sys = self._system_list['storwize-svc-sim'] + aux_sys = self._system_list['storwize-svc-sim'] + rcrel_info = {} + rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) + rcrel_info['name'] = 'rcrel' + rcrel_info['id'] + rcrel_info['master_cluster_id'] = master_sys['id'] + rcrel_info['master_cluster_name'] = master_sys['name'] + rcrel_info['master_vdisk_id'] = site1_volume_info['id'] + rcrel_info['master_vdisk_name'] = site1_volume_info['name'] + rcrel_info['aux_cluster_id'] = aux_sys['id'] + rcrel_info['aux_cluster_name'] = aux_sys['name'] + rcrel_info['aux_vdisk_id'] = site2_volume_info['id'] + rcrel_info['aux_vdisk_name'] = site2_volume_info['name'] + rcrel_info['primary'] = 'master' + rcrel_info['consistency_group_id'] = '' + rcrel_info['consistency_group_name'] = '' + rcrel_info['state'] = 'inconsistent_stopped' + rcrel_info['bg_copy_priority'] = '50' + rcrel_info['progress'] = '0' + rcrel_info['freeze_time'] = '' + rcrel_info['status'] = 'online' + rcrel_info['sync'] = '' + rcrel_info['copy_type'] = 'activeactive' + rcrel_info['cycling_mode'] = '' + rcrel_info['cycle_period_seconds'] = '300' + rcrel_info['master_change_vdisk_id'] = '' + rcrel_info['master_change_vdisk_name'] = '' + rcrel_info['aux_change_vdisk_id'] = '' + rcrel_info['aux_change_vdisk_name'] = '' + + self._rcrelationship_list[rcrel_info['name']] = rcrel_info + site1_volume_info['RC_name'] = rcrel_info['name'] + site1_volume_info['RC_id'] = rcrel_info['id'] + site2_volume_info['RC_name'] = rcrel_info['name'] + site2_volume_info['RC_id'] = rcrel_info['id'] + return ('Hyperswap volume, id [%s], successfully created' % + (site1_volume_info['id']), '') + + def _cmd_addvolumecopy(self, **kwargs): + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\"') + site1_volume_info = self._volumes_list[vol_name] + site1pool = site1_volume_info['mdisk_grp_name'] + site2pool = kwargs['pool'].strip('\'\"') + site1fcname = 'fcsite1' + vol_name + site2name = 'site2' + vol_name + site2fcname = 'fcsite2' + vol_name + + # create flashcopy volume on site1 + self.create_site_volume_and_fcmapping(kwargs, site1fcname, site1pool, + True, site1_volume_info) + # create hyperswap volume on site2 + site2_volume_info = self.create_site_volume_and_fcmapping( + kwargs, site2name, site1pool, False, site1_volume_info) + # create flashcopy volume on site2 + self.create_site_volume_and_fcmapping(kwargs, site2fcname, site2pool, + True, site2_volume_info) + + # create remote copy for site1volume and site2volume + master_sys = self._system_list['storwize-svc-sim'] + aux_sys = self._system_list['storwize-svc-sim'] + rcrel_info = {} + rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) + rcrel_info['name'] = 'rcrel' + rcrel_info['id'] + rcrel_info['master_cluster_id'] = master_sys['id'] + rcrel_info['master_cluster_name'] = master_sys['name'] + rcrel_info['master_vdisk_id'] = site1_volume_info['id'] + rcrel_info['master_vdisk_name'] = site1_volume_info['name'] + rcrel_info['aux_cluster_id'] = aux_sys['id'] + rcrel_info['aux_cluster_name'] = aux_sys['name'] + rcrel_info['aux_vdisk_id'] = site2_volume_info['id'] + rcrel_info['aux_vdisk_name'] = site2_volume_info['name'] + rcrel_info['primary'] = 'master' + rcrel_info['consistency_group_id'] = '' + rcrel_info['consistency_group_name'] = '' + rcrel_info['state'] = 'inconsistent_stopped' + rcrel_info['bg_copy_priority'] = '50' + rcrel_info['progress'] = '0' + rcrel_info['freeze_time'] = '' + rcrel_info['status'] = 'online' + rcrel_info['sync'] = '' + rcrel_info['copy_type'] = 'activeactive' + rcrel_info['cycling_mode'] = '' + rcrel_info['cycle_period_seconds'] = '300' + rcrel_info['master_change_vdisk_id'] = '' + rcrel_info['master_change_vdisk_name'] = '' + rcrel_info['aux_change_vdisk_id'] = '' + rcrel_info['aux_change_vdisk_name'] = '' + + self._rcrelationship_list[rcrel_info['name']] = rcrel_info + site1_volume_info['RC_name'] = rcrel_info['name'] + site1_volume_info['RC_id'] = rcrel_info['id'] + site2_volume_info['RC_name'] = rcrel_info['name'] + site2_volume_info['RC_id'] = rcrel_info['id'] + return ('', '') + + def _cmd_rmvolumecopy(self, **kwargs): + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\"') + site1_volume_info = self._volumes_list[vol_name] + site2_volume_info = self._volumes_list['site2' + vol_name] + site1_volume_fc_info = self._volumes_list['fcsite1' + vol_name] + site2_volume_fc_info = self._volumes_list['fcsite2' + vol_name] + + del self._rcrelationship_list[self._volumes_list[vol_name]['RC_name']] + site1fcmap = None + site2fcmap = None + for fcmap in self._fcmappings_list.values(): + if ((fcmap['source'] == vol_name) and + (fcmap['target'] == 'fcsite1' + vol_name)): + site1fcmap = fcmap + continue + elif ((fcmap['source'] == 'site2' + vol_name) and + (fcmap['target'] == 'fcsite2' + vol_name)): + site2fcmap = fcmap + continue + + if site1fcmap: + del self._fcmappings_list[site1fcmap['id']] + del site1_volume_fc_info + if site2fcmap: + del self._fcmappings_list[site2fcmap['id']] + del site2_volume_fc_info + + del site2_volume_info + site1_volume_info['RC_name'] = '' + site1_volume_info['RC_id'] = '' + return ('', '') + + def _cmd_rmvolume(self, **kwargs): + removehostmappings = True if 'removehostmappings' in kwargs else False + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\"') + + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + + site1fcmap = None + site2fcmap = None + for fcmap in self._fcmappings_list.values(): + if ((fcmap['source'] == vol_name) and + (fcmap['target'] == 'fcsite1' + vol_name)): + site1fcmap = fcmap + continue + elif ((fcmap['source'] == 'site2' + vol_name) and + (fcmap['target'] == 'fcsite2' + vol_name)): + site2fcmap = fcmap + continue + if site1fcmap: + del self._fcmappings_list[site1fcmap['id']] + if site2fcmap: + del self._fcmappings_list[site2fcmap['id']] + + if not removehostmappings: + for mapping in self._mappings_list.values(): + if mapping['vol'] == vol_name: + return self._errors['CMMVC5840E'] + + del self._rcrelationship_list[self._volumes_list[vol_name]['RC_name']] + del self._volumes_list[vol_name] + del self._volumes_list['fcsite1' + vol_name] + del self._volumes_list['site2' + vol_name] + del self._volumes_list['fcsite2' + vol_name] + return ('', '') + class StorwizeSVCISCSIFakeDriver(storwize_svc_iscsi.StorwizeSVCISCSIDriver): def __init__(self, *args, **kwargs): @@ -2840,6 +3190,76 @@ class StorwizeSVCISCSIDriverTestCase(test.TestCase): snapshot, connector) + def test_storwize_initialize_iscsi_connection_with_host_site(self): + connector = {'host': 'storwize-svc-host', + 'wwnns': ['20000090fa17311e', '20000090fa17311f'], + 'wwpns': ['ff00000000000000', 'ff00000000000001'], + 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} + + # host_site is None + volume0_iSCSI = self._create_volume() + vol_type_iSCSI_0 = volume_types.create(self.ctxt, 'iSCSI0', None) + volume0_iSCSI['volume_type_id'] = vol_type_iSCSI_0['id'] + self.iscsi_driver.initialize_connection(volume0_iSCSI, connector) + + # host_site is site1 + volume_iSCSI = self._create_volume() + extra_spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site1'} + vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) + volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] + self.iscsi_driver.initialize_connection(volume_iSCSI, connector) + + # host_site is site2, different with site1. + volume1_iSCSI = self._create_volume() + extra_spec_1 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site2'} + vol_type_iSCSI_1 = volume_types.create(self.ctxt, 'iSCSI1', + extra_spec_1) + volume1_iSCSI['volume_type_id'] = vol_type_iSCSI_1['id'] + self.assertRaises(exception.VolumeDriverException, + self.iscsi_driver.initialize_connection, + volume1_iSCSI, + connector) + + # host_site is None. + volume2_iSCSI = self._create_volume() + vol_type_iSCSI_2 = volume_types.create(self.ctxt, 'iSCSI2', None) + volume2_iSCSI['volume_type_id'] = vol_type_iSCSI_2['id'] + self.iscsi_driver.initialize_connection(volume2_iSCSI, connector) + + # create new host with host_site, the host site should be update + connector2 = {'host': 'STORWIZE-SVC-HOST', + 'wwnns': ['30000090fa17311e', '30000090fa17311f'], + 'wwpns': ['ffff000000000000', 'ffff000000000001'], + 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} + # attach hyperswap volume without host_site + volume3_iSCSI = self._create_volume() + extra_spec_3 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + vol_type_iSCSI_3 = volume_types.create(self.ctxt, 'iSCSI3', + extra_spec_3) + volume3_iSCSI['volume_type_id'] = vol_type_iSCSI_3['id'] + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'is_volume_hyperswap') as hyperswap: + hyperswap.return_value = True + self.assertRaises(exception.VolumeDriverException, + self.iscsi_driver.initialize_connection, + volume3_iSCSI, + connector2) + + # attach hyperswap volume with host_site + volume4_iSCSI = self._create_volume() + extra_spec_4 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site2'} + vol_type_iSCSI_4 = volume_types.create(self.ctxt, 'iSCSI4', + extra_spec_4) + volume4_iSCSI['volume_type_id'] = vol_type_iSCSI_4['id'] + self.iscsi_driver.initialize_connection(volume4_iSCSI, connector2) + @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_do_terminate_connection') @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, @@ -3152,10 +3572,13 @@ class StorwizeSVCISCSIDriverTestCase(test.TestCase): # Test no preferred node if self.USESIM: - self.sim.error_injection('lsvdisk', 'no_pref_node') - self.assertRaises(exception.VolumeBackendAPIException, - self.iscsi_driver.initialize_connection, - volume1, self._connector) + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'is_volume_hyperswap') as hyperswap: + hyperswap.return_value = False + self.sim.error_injection('lsvdisk', 'no_pref_node') + self.assertRaises(exception.VolumeBackendAPIException, + self.iscsi_driver.initialize_connection, + volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just @@ -3376,7 +3799,7 @@ class StorwizeSVCFcDriverTestCase(test.TestCase): self.assertIsNotNone(host_name) def test_storwize_fc_connection_snapshot(self): - # create a iSCSI volume + # create a fc volume snapshot volume_fc = self._create_volume() snapshot = self._generate_snap_info(volume_fc.id) self.fc_driver.create_snapshot(snapshot) @@ -3596,6 +4019,73 @@ class StorwizeSVCFcDriverTestCase(test.TestCase): self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) + def test_storwize_initialize_fc_connection_with_host_site(self): + connector = {'host': 'storwize-svc-host', + 'wwnns': ['20000090fa17311e', '20000090fa17311f'], + 'wwpns': ['ff00000000000000', 'ff00000000000001'], + 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} + + # host_site is None + volume0_fc = self._create_volume() + vol_type_fc_0 = volume_types.create(self.ctxt, 'FC0', None) + volume0_fc['volume_type_id'] = vol_type_fc_0['id'] + self.fc_driver.initialize_connection(volume0_fc, connector) + + # host_site is site1 + volume_fc = self._create_volume() + extra_spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site1'} + vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) + volume_fc['volume_type_id'] = vol_type_fc['id'] + self.fc_driver.initialize_connection(volume_fc, connector) + + # host_site is site2, different with site1. + volume1_fc = self._create_volume() + extra_spec_1 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site2'} + vol_type_fc_1 = volume_types.create(self.ctxt, 'FC1', extra_spec_1) + volume1_fc['volume_type_id'] = vol_type_fc_1['id'] + self.assertRaises(exception.VolumeDriverException, + self.fc_driver.initialize_connection, + volume1_fc, + connector) + + # host_site is None. + volume2_fc = self._create_volume() + vol_type_fc_2 = volume_types.create(self.ctxt, 'FC2', None) + volume2_fc['volume_type_id'] = vol_type_fc_2['id'] + self.fc_driver.initialize_connection(volume2_fc, connector) + + # create new host with host_site + connector2 = {'host': 'STORWIZE-SVC-HOST', + 'wwnns': ['30000090fa17311e', '30000090fa17311f'], + 'wwpns': ['ffff000000000000', 'ffff000000000001'], + 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} + # attach hyperswap volume without host_site + volume3_fc = self._create_volume() + extra_spec_3 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + vol_type_fc_3 = volume_types.create(self.ctxt, 'FC3', extra_spec_3) + volume3_fc['volume_type_id'] = vol_type_fc_3['id'] + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'is_volume_hyperswap') as is_volume_hyperswap: + is_volume_hyperswap.return_value = True + self.assertRaises(exception.VolumeDriverException, + self.fc_driver.initialize_connection, + volume3_fc, + connector2) + + # attach hyperswap volume with host_site + volume4_fc = self._create_volume() + extra_spec_4 = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'host_site': 'site2'} + vol_type_fc_4 = volume_types.create(self.ctxt, 'FC4', extra_spec_4) + volume4_fc['volume_type_id'] = vol_type_fc_4['id'] + self.fc_driver.initialize_connection(volume4_fc, connector2) + @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, '_do_terminate_connection') @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, @@ -3905,10 +4395,13 @@ class StorwizeSVCFcDriverTestCase(test.TestCase): # Test no preferred node if self.USESIM: - self.sim.error_injection('lsvdisk', 'no_pref_node') - self.assertRaises(exception.VolumeBackendAPIException, - self.fc_driver.initialize_connection, - volume1, self._connector) + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'is_volume_hyperswap') as hyperswap: + hyperswap.return_value = False + self.sim.error_injection('lsvdisk', 'no_pref_node') + self.assertRaises(exception.VolumeBackendAPIException, + self.fc_driver.initialize_connection, + volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just @@ -4295,6 +4788,25 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): vol_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) return vol_type + def _create_hyperswap_type(self, type_name): + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'hyperswap2', + 'host_site': 'site1'} + hyper_type = self._create_volume_type(spec, type_name) + return hyper_type + + def _create_hyperswap_volume(self, hyper_type, **kwargs): + pool = 'hyperswap1' + prop = {'host': 'openstack@svc#%s' % pool, + 'size': 1} + prop['volume_type_id'] = hyper_type.id + for p in prop.keys(): + if p not in kwargs: + kwargs[p] = prop[p] + vol = testutils.create_volume(self.ctxt, **kwargs) + self.driver.create_volume(vol) + return vol + def _generate_vol_info(self, vol_type=None, size=10): pool = _get_test_pool() prop = {'size': size, @@ -4402,6 +4914,9 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): 'stretched_cluster': None, 'nofmtdisk': False, 'mirror_pool': None, + 'volume_topology': None, + 'peer_pool': None, + 'host_site': None, 'cycle_period_seconds': 300, } return opt @@ -4411,12 +4926,11 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): '_get_vdisk_params') def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params, add_vdisk_qos): - vol = testutils.create_volume(self.ctxt) fake_opts = self._get_default_opts() # If the qos is empty, chvdisk should not be called # for create_volume. get_vdisk_params.return_value = fake_opts - self.driver.create_volume(vol) + vol = self._create_volume() self._assert_vol_exists(vol['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol) @@ -5279,7 +5793,14 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): 'update_vdisk_qos') def test_storwize_svc_retype_need_copy(self, update_vdisk_qos, disable_vdisk_qos): - self.driver.do_setup(None) + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'standard', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} @@ -5476,6 +5997,18 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + vol_type_ref = volume_types.create(self.ctxt, 'hypertype', spec) + group = testutils.create_group( + self.ctxt, name='cggroup', + group_type_id=rccg_type.id, + volume_type_ids=[vol_type_ref['id']]) + + model_update = self.driver.create_group(self.ctxt, group) + self.assertEqual(fields.GroupStatus.ERROR, + model_update['status']) + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') @@ -5485,7 +6018,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): def test_storwize_delete_group(self, _del_rep_grp, is_grp_a_cg_rep_type, is_grp_a_cg_snapshot_type): is_grp_a_cg_snapshot_type.side_effect = [True, True, False, True] - is_grp_a_cg_rep_type.side_effect = [False, False] + is_grp_a_cg_rep_type.side_effect = [False, False, False, False] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, @@ -5514,7 +6047,8 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): is_grp_a_cg_snapshot_type): """Test group update.""" is_grp_a_cg_snapshot_type.side_effect = [False, True, True, False] - is_grp_a_cg_rep_type.side_effect = [False, False, True, True] + is_grp_a_cg_rep_type.side_effect = [False, False, False, + False, True, True] group = mock.MagicMock() self.assertRaises(NotImplementedError, self.driver.update_group, self.ctxt, group, None, None) @@ -5611,7 +6145,19 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): self.driver.create_group_from_src, self.ctxt, group, [vol1]) + hyper_specs = {'hyperswap_group_enabled': ' True'} + hyper_type_ref = group_types.create(self.ctxt, 'hypergroup', + hyper_specs) group = self._create_group_in_db(volume_type_ids=[type_ref.id], + group_type_id=hyper_type_ref.id) + vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, + group_id=group.id) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_group_from_src, + self.ctxt, group, vol1, None, None, + None, None) + + group = self._create_group_in_db(volume_type_id=type_ref.id, group_type_id=cg_type_ref.id) # create volumes in db @@ -6409,6 +6955,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): vol1) # Create volume with cg_snapshot group id will success. vol2 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, + host='openstack@svc#openstack', group_id=group2.id) self.driver.create_volume(vol2) @@ -6422,6 +6969,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): # Create cloned volume with cg_snapshot group id will success. vol4 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group2.id, + host='openstack@svc#openstack', source_volid=vol2.id) self.driver.create_cloned_volume(vol4, vol2) @@ -6437,9 +6985,673 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase): # Create volume from snapshot with cg_snapshot group id will success. vol6 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group2.id, + host='openstack@svc#openstack', snapshot_id=snapshot.id) self.driver.create_volume_from_snapshot(vol6, snapshot) + @ mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') + def test_storwize_svc_select_iogrp_with_pool_site(self, lsmdiskgrp): + opts = {} + state = self.driver._state + lsmdiskgrp.side_effect = [{'site_id': ''}, + {'site_id': '1'}, + {'site_id': '2'}, + {'site_id': '2'}] + state['storage_nodes']['1']['site_id'] = '1' + state['storage_nodes']['1']['IO_group'] = '0' + state['storage_nodes']['2']['site_id'] = '1' + state['storage_nodes']['2']['IO_group'] = '1' + + pool = 'openstack2' + opts['iogrp'] = '0,1' + state['available_iogrps'] = [0, 1, 2, 3] + iog = self.driver._helpers.select_io_group(state, opts, pool) + self.assertEqual(0, iog) + + pool = 'openstack2' + opts['iogrp'] = '0,1' + state['available_iogrps'] = [0, 1, 2, 3] + iog = self.driver._helpers.select_io_group(state, opts, pool) + self.assertEqual(0, iog) + + pool = 'openstack3' + opts['iogrp'] = '0,1' + state['available_iogrps'] = [0, 1, 2, 3] + self.assertRaises(exception.InvalidInput, + self.driver._helpers.select_io_group, + state, opts, pool) + state['storage_nodes']['2']['site_id'] = '2' + + pool = 'openstack2' + opts['iogrp'] = '0,1' + state['available_iogrps'] = [0, 1, 2, 3] + iog = self.driver._helpers.select_io_group(state, opts, pool) + self.assertEqual(1, iog) + + # test hyperswap volume + def test_create_hyperswap_volume(self): + # create hyperswap volume on code_level less than 7.7.0.0 + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + invalid_release_type = self._create_volume_type( + spec, 'invalid_release_type') + vol = self._generate_vol_info(invalid_release_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # create hyperswap on svc topology not 'hyperswap' + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'standard', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + invalid_topo_type = self._create_volume_type( + spec, 'invalid_topo_type') + vol = self._generate_vol_info(invalid_topo_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + # create hyperswap volume vith invalid pool + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'invalid_pool'} + invalid_pool_type = self._create_volume_type(spec, + 'invalid_pool_type') + vol = self._generate_vol_info(invalid_pool_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # create hyperswap volume vith easytier off + spec = {'drivers:volume_topology': 'hyperswap', + 'drivers:easytier': False} + easytier_type = self._create_volume_type(spec, + 'easytier_type') + vol = self._generate_vol_info(easytier_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # create hyperswap volume without peer_pool + spec = {'drivers:volume_topology': 'hyperswap'} + no_peerpool_type = self._create_volume_type(spec, + 'no_peerpool_type') + vol = self._generate_vol_info(no_peerpool_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # Create hyperswap volume, there is no site_id on peer_pool + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack'} + same_pool_type = self._create_volume_type(spec, + 'same_pool_type') + vol = self._generate_vol_info(same_pool_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # Create hyperswap volume, pool and peer pool are on the same site + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'hyperswap1'} + same_site_type = self._create_volume_type(spec, + 'same_site_type') + vol = testutils.create_volume(self.ctxt, + host='openstack@svc#hyperswap1', + volume_type_id=same_site_type.id) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # create hyperswap volume with strech cluster + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'mirror_pool': 'openstack1'} + invalid_vol_type = self._create_volume_type(spec, + 'invalid_hyperswap_type') + vol = self._generate_vol_info(invalid_vol_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + # create hyperswap volume with replication + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1', + 'replication_enabled': ' True', + 'replication_type': ' metro'} + invalid_vol_type = self._create_volume_type(spec, + 'invalid_hyperswap_type_2') + vol = self._generate_vol_info(invalid_vol_type) + self.assertRaises(exception.InvalidInput, + self.driver.create_volume, vol) + + hyper_type = self._create_hyperswap_type('test_hyperswap_type') + vol = self._create_hyperswap_volume(hyper_type) + self._assert_vol_exists(vol.name, True) + self._assert_vol_exists('site2' + vol.name, True) + self._assert_vol_exists('fcsite1' + vol.name, True) + self._assert_vol_exists('fcsite2' + vol.name, True) + is_volume_hyperswap = self.driver._helpers.is_volume_hyperswap( + vol.name) + self.assertEqual(is_volume_hyperswap, True) + self.driver.delete_volume(vol) + self._assert_vol_exists(vol.name, False) + self._assert_vol_exists('site2' + vol.name, False) + self._assert_vol_exists('fcsite1' + vol.name, False) + self._assert_vol_exists('fcsite2' + vol.name, False) + + def test_create_snapshot_to_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyper_type = self._create_hyperswap_type('test_hyperswap_type') + vol = self._create_hyperswap_volume(hyper_type) + self._assert_vol_exists(vol.name, True) + + snap = testutils.create_snapshot(self.ctxt, vol.id) + self.assertRaises(exception.VolumeDriverException, + self.driver.create_snapshot, snap) + + self.driver.delete_volume(vol) + self._assert_vol_exists(vol.name, False) + + def test_create_cloned_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyper_type = self._create_hyperswap_type('test_hyperswap_type') + vol = self._create_hyperswap_volume(hyper_type) + self._assert_vol_exists(vol.name, True) + + vol2 = testutils.create_volume(self.ctxt, + host = 'openstack@svc#hyperswap1', + volume_type_id = vol.volume_type_id) + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_vdisk_attributes') as vdisk_attr: + vdisk_attr.return_value = None + self.assertRaises(exception.VolumeDriverException, + self.driver.create_cloned_volume, + vol2, vol) + self.driver.create_cloned_volume(vol2, vol) + self._assert_vol_exists(vol2.name, True) + self._assert_vol_exists('site2' + vol2.name, True) + self._assert_vol_exists('fcsite1' + vol2.name, True) + self._assert_vol_exists('fcsite2' + vol2.name, True) + is_volume_hyperswap = self.driver._helpers.is_volume_hyperswap( + vol2.name) + self.assertEqual(is_volume_hyperswap, True) + + self.driver.delete_volume(vol) + self._assert_vol_exists(vol.name, False) + self.driver.delete_volume(vol2) + self._assert_vol_exists(vol2.name, False) + + def test_extend_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyper_type = self._create_hyperswap_type('test_hyperswap_type') + vol = self._create_hyperswap_volume(hyper_type) + self._assert_vol_exists(vol.name, True) + self.assertRaises(exception.InvalidInput, + self.driver.extend_volume, vol, '16') + + def test_migrate_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyper_type = self._create_hyperswap_type('test_hyperswap_type') + vol = self._create_hyperswap_volume(hyper_type) + self._assert_vol_exists(vol.name, True) + + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack2') + cap = {'location_info': loc, 'extent_size': '256'} + host = {'host': 'openstack@svc#openstack2', 'capabilities': cap} + ctxt = context.get_admin_context() + self.assertRaises(exception.InvalidInput, + self.driver.migrate_volume, ctxt, vol, host) + self._delete_volume(vol) + + def test_manage_existing_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') + hyper_volume = self._create_hyperswap_volume(hyperswap_vol_type) + self._assert_vol_exists(hyper_volume.name, True) + + spec1 = {} + non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') + non_hyper_volume = self._create_volume() + + # test volume is hyperswap volume but volume type is non-hyper type + new_volume = self._generate_vol_info() + + ref = {'source-name': hyper_volume['name']} + new_volume['volume_type_id'] = non_hyper_type['id'] + new_volume['volume_type'] = non_hyper_type + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver.manage_existing, new_volume, ref) + + # test volume is non hyperswap volume but volum type is hyper type + ref = {'source-name': non_hyper_volume['name']} + new_volume['volume_type_id'] = hyperswap_vol_type['id'] + new_volume['volume_type'] = hyperswap_vol_type + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver.manage_existing, new_volume, ref) + + # Test hyperswap volume peer_pool and backend peer_pool does not match + new_volume = testutils.create_volume(self.ctxt, + host='openstack@svc#hyperswap1') + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'hyperswap1'} + hyper_type_2 = self._create_volume_type(spec, 'hyper_type_2') + ref = {'source-name': hyper_volume['name']} + new_volume['volume_type_id'] = hyper_type_2['id'] + new_volume['volume_type'] = hyper_type_2 + self.assertRaises(exception.ManageExistingVolumeTypeMismatch, + self.driver.manage_existing, new_volume, ref) + + # test volume type match + uid_of_master = self._get_vdisk_uid(hyper_volume.name) + + new_volume = testutils.create_volume(self.ctxt, + host='openstack@svc#hyperswap1') + ref = {'source-name': hyper_volume['name']} + new_volume['volume_type_id'] = hyperswap_vol_type['id'] + new_volume['volume_type'] = hyperswap_vol_type + self.driver.manage_existing(new_volume, ref) + + # Check the uid of the volume which has been renamed. + uid_of_master_volume = self._get_vdisk_uid(new_volume['name']) + self.assertEqual(uid_of_master, uid_of_master_volume) + + self.driver.delete_volume(hyper_volume) + + def test_retype_hyperswap_volume(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') + + spec1 = {'drivers:iogrp': '0,1'} + non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') + + volume = testutils.create_volume(self.ctxt, + volume_type_id=non_hyper_type.id, + host='openstack@svc#hyperswap1') + self.driver.create_volume(volume) + host = {'host': 'openstack@svc#hyperswap1'} + + # Retype from non hyperswap volume type to + # hyperswap volume type without peer_pool + spec = {'drivers:volume_topology': 'hyperswap'} + hyper_type_no_peer = self._create_volume_type(spec, + 'hypertypenopeer') + diff, _equal = volume_types.volume_types_diff( + self.ctxt, non_hyper_type['id'], hyper_type_no_peer['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, hyper_type_no_peer, diff, host) + + spec = {'drivers:volume_topology': 'hyperswap', + 'drivers:easytier': False} + easytier_type = self._create_volume_type(spec, + 'easytier_type') + diff, _equal = volume_types.volume_types_diff( + self.ctxt, non_hyper_type['id'], easytier_type['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, easytier_type, diff, host) + + # retype from normal volume with snapshot to hyperswap volume + snap = testutils.create_snapshot(self.ctxt, volume.id) + self.driver.create_snapshot(snap) + diff, _equal = volume_types.volume_types_diff( + self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, hyperswap_vol_type, + diff, host) + self.driver.delete_snapshot(snap) + + # Retype from non-hyperswap volume to hyperswap volume + diff, _equal = volume_types.volume_types_diff( + self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) + self.driver.retype( + self.ctxt, volume, hyperswap_vol_type, diff, host) + volume['volume_type_id'] = hyperswap_vol_type['id'] + volume['volume_type'] = hyperswap_vol_type + self._assert_vol_exists(volume.name, True) + self._assert_vol_exists('site2' + volume.name, True) + self._assert_vol_exists('fcsite1' + volume.name, True) + self._assert_vol_exists('fcsite2' + volume.name, True) + is_volume_hyperswap = self.driver._helpers.is_volume_hyperswap( + volume.name) + self.assertEqual(is_volume_hyperswap, True) + + # Retype from hyperswap volume to non hyperswap volume---move site2 + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) + self.driver.retype( + self.ctxt, volume, non_hyper_type, diff, host) + volume['volume_type_id'] = non_hyper_type['id'] + volume['volume_type'] = non_hyper_type + self.driver.delete_volume(volume) + + # Retype from hyperswap volume to non hyperswap volume---move site1 + host2 = {'host': 'openstack@svc#hyperswap2'} + volume = self._create_hyperswap_volume(hyperswap_vol_type) + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) + self.driver.retype( + self.ctxt, volume, non_hyper_type, diff, host2) + volume['volume_type_id'] = non_hyper_type['id'] + volume['volume_type'] = non_hyper_type + self.driver.delete_volume(volume) + + # Retype a hyperswap volume to hyperswap volume with keys change + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'hyperswap2', + 'drivers:warning': '50'} + warning_type = self._create_volume_type(spec, + 'warning_type') + volume = self._create_hyperswap_volume(hyperswap_vol_type) + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], warning_type['id']) + self.driver.retype(self.ctxt, volume, warning_type, diff, host) + + def test_retype_hyperswap_volume_failure_case(self): + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') + host = {'host': 'openstack@svc#hyperswap1'} + # Retype a hyperswap volume to hyperswap volume with peer_pool changes + spec = {'drivers:volume_topology': 'hyperswap'} + peer_type = self._create_volume_type(spec, + 'peer_type') + volume = self._create_hyperswap_volume(hyperswap_vol_type) + self._assert_vol_exists(volume.name, True) + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], peer_type['id']) + self.assertRaises(exception.InvalidInput, + self.driver.retype, + self.ctxt, volume, peer_type, diff, + host) + + # Retype a hyperswap volume to hyperswap volume with iogrp changes + spec = {'drivers:volume_topology': 'hyperswap', + 'drivers:iogrp': '1'} + hyperswap_vol_type_2 = self._create_volume_type(spec, + 'hyperswap_type_2') + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'select_io_group') as select_io_group: + select_io_group.return_value = {1} + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], + hyperswap_vol_type_2['id']) + + self.assertRaises(exception.InvalidInput, + self.driver.retype, + self.ctxt, volume, hyperswap_vol_type_2, diff, + host) + + host2 = {'host': 'openstack@svc#hyperswap2'} + # Retype a hyperswap volume to hyperswap volume with pool change + spec = {'drivers:volume_topology': 'hyperswap', + 'drivers:iogrp': '0,1'} + hyperswap_type_3 = self._create_volume_type(spec, + 'hyperswap_type_3') + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], hyperswap_type_3['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, hyperswap_type_3, diff, host2) + + # Retype a hyperswap volume in-use + inuse_type = self._create_hyperswap_type('in-use_type') + volume.previous_status = 'in-use' + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], + inuse_type['id']) + self.assertRaises(exception.InvalidInput, + self.driver.retype, + self.ctxt, volume, inuse_type, diff, + host) + + # retype from hyperswap volume to replication volume + spec3 = {'replication_enabled': ' True', + 'replication_type': ' metro'} + replication_type = self._create_volume_type(spec3, + 'test_replication_type') + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], replication_type['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, replication_type, diff, host) + + # retype from hyperswap volume to streched cluster volume + spec4 = {'mirror_pool': 'openstack1'} + mirror_type = self._create_volume_type(spec4, + 'test_mirror_type') + diff, _equal = volume_types.volume_types_diff( + self.ctxt, hyperswap_vol_type['id'], mirror_type['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, volume, mirror_type, diff, host) + + # retype from streched cluster volume to hyperswap volume + host3 = {'host': 'openstack@svc#openstack'} + mirror_volume = self._create_volume(volume_type_id=mirror_type.id) + diff, _equal = volume_types.volume_types_diff( + self.ctxt, mirror_type['id'], hyperswap_vol_type['id']) + self.assertRaises(exception.InvalidInput, self.driver.retype, + self.ctxt, mirror_volume, hyperswap_vol_type, diff, + host3) + + @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') + def test_storwize_hyperswap_group_create(self, is_grp_a_cg_snapshot_type): + """Test group create.""" + is_grp_a_cg_snapshot_type.side_effect = [False, False, False, False] + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + vol_type_ref = volume_types.create(self.ctxt, 'nonhypertype', None) + group_specs = {'hyperswap_group_enabled': ' True'} + group_type_ref = group_types.create(self.ctxt, 'testgroup', + group_specs) + group = testutils.create_group(self.ctxt, + group_type_id=group_type_ref['id'], + volume_type_ids=[vol_type_ref['id']]) + + # create hyperswap group with nonhyper volume type + model_update = self.driver.create_group(self.ctxt, group) + self.assertEqual(fields.GroupStatus.ERROR, + model_update['status']) + + # create hyperswap group with hyper volume type. + spec = {'drivers:volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'} + vol_type_ref = volume_types.create(self.ctxt, 'hypertype', spec) + hyper_group = testutils.create_group( + self.ctxt, name='hypergroup', + group_type_id=group_type_ref['id'], + volume_type_ids=[vol_type_ref['id']]) + + model_update = self.driver.create_group(self.ctxt, hyper_group) + self.assertEqual(fields.GroupStatus.AVAILABLE, + model_update['status']) + + @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') + def test_storwize_hyperswap_group_delete(self, is_grp_a_cg_snapshot_type): + """Test group create.""" + is_grp_a_cg_snapshot_type.side_effect = [False, False, False] + + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + group_specs = {'hyperswap_group_enabled': ' True'} + group_type_ref = group_types.create(self.ctxt, 'testgroup', + group_specs) + + # create hyperswap group with hyper volume type. + vol_type_ref = self._create_hyperswap_type( + 'hyper_type') + hyper_group = testutils.create_group( + self.ctxt, name='hypergroup', + group_type_id=group_type_ref['id'], + volume_type_ids=[vol_type_ref['id']]) + + model_update = self.driver.create_group(self.ctxt, hyper_group) + self.assertEqual(fields.GroupStatus.AVAILABLE, + model_update['status']) + + vol1 = self._create_hyperswap_volume(vol_type_ref) + vol2 = self._create_hyperswap_volume(vol_type_ref) + ctxt = context.get_admin_context() + self.db.volume_update(ctxt, vol1['id'], {'group_id': hyper_group.id}) + self.db.volume_update(ctxt, vol2['id'], {'group_id': hyper_group.id}) + volumes = self.db.volume_get_all_by_generic_group( + self.ctxt.elevated(), hyper_group.id) + + model_update = self.driver.delete_group(self.ctxt, hyper_group, + volumes) + self.assertEqual(fields.GroupStatus.DELETED, + model_update[0]['status']) + for volume in model_update[1]: + self.assertEqual('deleted', volume['status']) + + @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') + def test_storwize_hyperswap_group_update(self, is_grp_a_cg_snapshot_type): + """Test group create.""" + is_grp_a_cg_snapshot_type.side_effect = [False, False, False, + False, False] + with mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_system_info') as get_system_info: + fake_system_info = {'code_level': (7, 7, 0, 0), + 'topology': 'hyperswap', + 'system_name': 'storwize-svc-sim', + 'system_id': '0123456789ABCDEF'} + get_system_info.return_value = fake_system_info + self.driver.do_setup(None) + + group_specs = {'hyperswap_group_enabled': ' True'} + group_type_ref = group_types.create(self.ctxt, 'testgroup', + group_specs) + + # create hyperswap group with hyper volume type. + volume_type_ref = self._create_hyperswap_type( + 'hyper_type') + hyper_group = testutils.create_group( + self.ctxt, name='hypergroup', + group_type_id=group_type_ref['id'], + volume_type_ids=[volume_type_ref['id']]) + + model_update = self.driver.create_group(self.ctxt, hyper_group) + self.assertEqual(fields.GroupStatus.AVAILABLE, + model_update['status']) + + vol1 = self._create_hyperswap_volume(volume_type_ref) + vol2 = self._create_hyperswap_volume(volume_type_ref) + ctxt = context.get_admin_context() + self.db.volume_update(ctxt, vol1['id'], {'group_id': hyper_group.id}) + self.db.volume_update(ctxt, vol2['id'], {'group_id': hyper_group.id}) + add_volumes = [vol1, vol2] + del_volumes = [] + + # add hyperswap volume + (model_update, add_volumes_update, + remove_volumes_update) = self.driver.update_group(self.ctxt, + hyper_group, + add_volumes, + del_volumes) + self.assertEqual(fields.GroupStatus.AVAILABLE, + model_update['status']) + self.assertIsNone(add_volumes_update) + self.assertIsNone(remove_volumes_update) + + # del hyperswap volume from volume group + add_volumes = [] + del_volumes = [vol1, vol2] + (model_update, add_volumes_update, + remove_volumes_update) = self.driver.update_group(self.ctxt, + hyper_group, + add_volumes, + del_volumes) + self.assertEqual(fields.GroupStatus.AVAILABLE, + model_update['status']) + self.assertIsNone(add_volumes_update) + self.assertIsNone(remove_volumes_update) + + # add non-hyper volume + non_type_ref = volume_types.create(self.ctxt, 'nonhypertype', None) + add_vol3 = self._create_volume(volume_type_id=non_type_ref['id']) + (model_update, add_volumes_update, + remove_volumes_update) = self.driver.update_group(self.ctxt, + hyper_group, + [add_vol3], []) + self.assertEqual(fields.GroupStatus.ERROR, + model_update['status']) + self.assertIsNone(add_volumes_update) + self.assertIsNone(remove_volumes_update) + class CLIResponseTestCase(test.TestCase): def test_empty(self): @@ -6573,14 +7785,16 @@ class StorwizeHelpersTestCase(test.TestCase): for i in range(7): self.assertTrue(self.storwize_svc_common.replication_licensed()) + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_count_by_io_group') - def test_select_io_group(self, get_vdisk_count_by_io_group): + def test_select_io_group(self, get_vdisk_count_by_io_group, lsmdiskgrp): # given io groups opts = {} # system io groups state = {} + lsmdiskgrp.return_value = {} fake_iog_vdc1 = {0: 100, 1: 50, 2: 50, 3: 300} fake_iog_vdc2 = {0: 2, 1: 1, 2: 200} fake_iog_vdc3 = {0: 2, 2: 200} @@ -6592,31 +7806,32 @@ class StorwizeHelpersTestCase(test.TestCase): fake_iog_vdc3, fake_iog_vdc4, fake_iog_vdc5] + pool = _get_test_pool(False) opts['iogrp'] = '0,2' state['available_iogrps'] = [0, 1, 2, 3] - iog = self.storwize_svc_common.select_io_group(state, opts) + iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertTrue(iog in state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = '0' state['available_iogrps'] = [0, 1, 2] - iog = self.storwize_svc_common.select_io_group(state, opts) + iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertTrue(iog in state['available_iogrps']) self.assertEqual(0, iog) opts['iogrp'] = '1,2' state['available_iogrps'] = [0, 2] - iog = self.storwize_svc_common.select_io_group(state, opts) + iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertTrue(iog in state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = ' 0, 1, 2 ' state['available_iogrps'] = [0, 1, 2, 3] - iog = self.storwize_svc_common.select_io_group(state, opts) + iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertTrue(iog in state['available_iogrps']) # since vdisk count in all iogroups is same, it will pick the first self.assertEqual(0, iog) @@ -6624,7 +7839,7 @@ class StorwizeHelpersTestCase(test.TestCase): opts['iogrp'] = '0,1,2, 3' state['available_iogrps'] = [0, 1, 2, 3] - iog = self.storwize_svc_common.select_io_group(state, opts) + iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertTrue(iog in state['available_iogrps']) self.assertEqual(1, iog) @@ -7650,11 +8865,22 @@ class StorwizeSVCReplicationTestCase(test.TestCase): self.driver.delete_volume(gmcv_volume) self._validate_replic_vol_deletion(gmcv_volume, True) + volume, model_update = self._create_test_volume(self.mm_type) + self.assertEqual(fields.ReplicationStatus.ENABLED, + model_update['replication_status']) + self._validate_replic_vol_creation(volume) + non_replica_vol, model_update = self._create_test_volume( self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) + gmcv_volume, model_update = self._create_test_volume( + self.gmcv_with_cps600_type) + self.assertEqual(fields.ReplicationStatus.ENABLED, + model_update['replication_status']) + self._validate_replic_vol_creation(gmcv_volume, True) + volumes = [volume, non_replica_vol, gmcv_volume] # Delete volume in failover state self.driver.failover_host( diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py index 8216c12ea99..e891b031a0e 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py @@ -47,6 +47,7 @@ REPLICA_AUX_VOL_PREFIX = 'aux_' REPLICA_CHG_VOL_PREFIX = 'chg_' RCCG_PREFIX = 'rccg-' +HYPERCG_PREFIX = 'hycg-' # remote mirror copy status REP_CONSIS_SYNC = 'consistent_synchronized' diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index a1b10595799..95ce33fdfa4 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -125,6 +125,13 @@ storwize_svc_opts = [ default=None, help='Specifies the name of the pool in which mirrored copy ' 'is stored. Example: "pool2"'), + cfg.StrOpt('storwize_peer_pool', + default=None, + help='Specifies the name of the peer pool for hyperswap ' + 'volume, the peer pool must exist on the other site.'), + cfg.StrOpt('storwize_preferred_host_site', + default=None, + help='Specifies the preferred host site name.'), cfg.IntOpt('cycle_period_seconds', default=300, min=60, max=86400, @@ -237,9 +244,11 @@ class StorwizeSSH(object): port.append(port_name) return port - def mkhost(self, host_name, port_type, port_name): + def mkhost(self, host_name, port_type, port_name, site=None): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['svctask', 'mkhost', '-force'] + port + if site: + ssh_cmd += ['-site', '"%s"' % site] ssh_cmd += ['-name', '"%s"' % host_name] return self.run_ssh_check_created(ssh_cmd) @@ -261,6 +270,10 @@ class StorwizeSSH(object): log_cmd = 'svctask chhost -chapsecret *** %s' % host self.run_ssh_assert_no_output(ssh_cmd, log_cmd) + def chhost(self, host, site): + ssh_cmd = ['svctask', 'chhost', '-site', '"%s"' % site, '"%s"' % host] + self.run_ssh_assert_no_output(ssh_cmd) + def lsiscsiauth(self): ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) @@ -703,6 +716,29 @@ class StorwizeSSH(object): copy_id, '-vdisk', vdisk] self.run_ssh_assert_no_output(ssh_cmd) + def mkvolume(self, name, size, units, pool, params): + ssh_cmd = ['svctask', 'mkvolume', '-name', name, '-pool', + '"%s"' % pool, '-size', size, '-unit', units] + params + return self.run_ssh_check_created(ssh_cmd) + + def rmvolume(self, volume, force=True): + ssh_cmd = ['svctask', 'rmvolume'] + if force: + ssh_cmd += ['-removehostmappings', '-removefcmaps', + '-removercrelationships'] + ssh_cmd += ['"%s"' % volume] + self.run_ssh_assert_no_output(ssh_cmd) + + def addvolumecopy(self, name, pool, params): + ssh_cmd = ['svctask', 'addvolumecopy', '-pool', + '"%s"' % pool] + params + ['"%s"' % name] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmvolumecopy(self, name, pool): + ssh_cmd = ['svctask', 'rmvolumecopy', '-pool', + '"%s"' % pool, '"%s"' % name] + self.run_ssh_assert_no_output(ssh_cmd) + class StorwizeHelpers(object): @@ -770,6 +806,7 @@ class StorwizeHelpers(object): raise exception.VolumeBackendAPIException(data=msg) code_level = match_obj.group().split('.') return {'code_level': tuple([int(x) for x in code_level]), + 'topology': resp['topology'], 'system_name': resp['name'], 'system_id': resp['id']} @@ -815,7 +852,7 @@ class StorwizeHelpers(object): raise exception.VolumeBackendAPIException(data=msg) return res - def select_io_group(self, state, opts): + def select_io_group(self, state, opts, pool): selected_iog = 0 iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts) if len(iog_list) == 0: @@ -824,6 +861,25 @@ class StorwizeHelpers(object): 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': state['available_iogrps']}) + + site_iogrp = [] + pool_data = self.get_pool_attrs(pool) + if 'site_id' in pool_data and pool_data['site_id']: + for node in state['storage_nodes'].values(): + if pool_data['site_id'] == node['site_id']: + site_iogrp.append(node['IO_group']) + site_iogrp = list(map(int, site_iogrp)) + iog_list = list(set(site_iogrp).intersection(iog_list)) + if len(iog_list) == 0: + raise exception.InvalidInput( + reason=_('The storage system topology is hyperswap or ' + 'stretched, The site_id of pool %(pool)s is ' + '%(site_id)s, the available I/O groups on this ' + 'site is %(site_iogrp)s, but the given I/O' + ' group(s) is %(iogrp)s.') + % {'pool': pool, 'site_id': pool_data['site_id'], + 'site_iogrp': site_iogrp, 'iogrp': opts['iogrp']}) + iog_vdc = self.get_vdisk_count_by_io_group() LOG.debug("IO group current balance %s", iog_vdc) min_vdisk_count = iog_vdc[iog_list[0]] @@ -864,6 +920,8 @@ class StorwizeHelpers(object): node['ipv6'] = [] node['enabled_protocols'] = [] nodes[node['id']] = node + node['site_id'] = (node_data['site_id'] + if 'site_id' in node_data else None) except KeyError: self.handle_keyerror('lsnode', node_data) return nodes @@ -1037,7 +1095,7 @@ class StorwizeHelpers(object): LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name - def create_host(self, connector, iscsi=False): + def create_host(self, connector, iscsi=False, site=None): """Create a new host on the storage system. We create a host name and associate it with the given connection @@ -1091,7 +1149,8 @@ class StorwizeHelpers(object): # Create a host with one port port = ports.pop(0) - self.ssh.mkhost(host_name, port[0], port[1]) + # Host site_id is necessary for hyperswap volume. + self.ssh.mkhost(host_name, port[0], port[1], site) # Add any additional ports to the host for port in ports: @@ -1101,6 +1160,9 @@ class StorwizeHelpers(object): {'host': connector['host'], 'host_name': host_name}) return host_name + def update_host(self, host_name, site_name): + self.ssh.chhost(host_name, site=site_name) + def delete_host(self, host_name): self.ssh.rmhost(host_name) @@ -1184,6 +1246,9 @@ class StorwizeHelpers(object): 'replication': False, 'nofmtdisk': config.storwize_svc_vol_nofmtdisk, 'mirror_pool': config.storwize_svc_mirror_pool, + 'volume_topology': None, + 'peer_pool': config.storwize_peer_pool, + 'host_site': config.storwize_preferred_host_site, 'cycle_period_seconds': config.cycle_period_seconds} return opt @@ -1426,6 +1491,65 @@ class StorwizeHelpers(object): self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params) LOG.debug('Leave: _create_vdisk: volume %s.', name) + def _get_hyperswap_volume_create_params(self, opts): + # Storwize/svc use cli command mkvolume to create hyperswap volume. + # You must specify -thin with grainsize. + # You must specify either -thin or -compressed with warning. + params = [] + LOG.debug('The I/O groups of a hyperswap volume will be selected by ' + 'storage.') + if opts['rsize'] != -1: + params.extend(['-buffersize', '%s%%' % str(opts['rsize']), + '-warning', + '%s%%' % six.text_type(opts['warning'])]) + if not opts['autoexpand']: + params.append('-noautoexpand') + if opts['compression']: + params.append('-compressed') + else: + params.append('-thin') + params.extend(['-grainsize', six.text_type(opts['grainsize'])]) + return params + + def create_hyperswap_volume(self, vol_name, size, units, pool, opts): + vol_name = '"%s"' % vol_name + params = self._get_hyperswap_volume_create_params(opts) + self.ssh.mkvolume(vol_name, six.text_type(size), units, pool, params) + + def convert_volume_to_hyperswap(self, vol_name, opts, state): + vol_name = '%s' % vol_name + if not self.is_system_topology_hyperswap(state): + reason = _('Convert volume to hyperswap failed, the system is ' + 'below release 7.6.0.0 or it is not hyperswap ' + 'topology.') + raise exception.VolumeDriverException(reason=reason) + else: + attr = self.get_vdisk_attributes(vol_name) + if attr is None: + msg = (_('convert_volume_to_hyperswap: Failed to get ' + 'attributes for volume %s.') % vol_name) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + pool = attr['mdisk_grp_name'] + self.check_hyperswap_pool(pool, opts['peer_pool']) + hyper_pool = '%s' % opts['peer_pool'] + params = self._get_hyperswap_volume_create_params(opts) + self.ssh.addvolumecopy(vol_name, hyper_pool, params) + + def convert_hyperswap_volume_to_normal(self, vol_name, peer_pool): + vol_name = '%s' % vol_name + hyper_pool = '%s' % peer_pool + self.ssh.rmvolumecopy(vol_name, hyper_pool) + + def delete_hyperswap_volume(self, volume, force): + """Ensures that vdisk is not part of FC mapping and deletes it.""" + if not self.is_vdisk_defined(volume): + LOG.warning('Tried to delete non-existent volume %s.', volume) + return + self.ensure_vdisk_no_fc_mappings(volume, allow_snaps=True, + allow_fctgt = True) + self.ssh.rmvolume(volume, force=force) + def get_vdisk_attributes(self, vdisk): attrs = self.ssh.lsvdisk(vdisk) return attrs @@ -1737,6 +1861,7 @@ class StorwizeHelpers(object): for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) # We should ignore GMCV flash copies + # Hyperswap flash copies are also ignored. if not attrs or 'yes' == attrs['rc_controlled']: continue source = attrs['source_vdisk_name'] @@ -2094,15 +2219,16 @@ class StorwizeHelpers(object): self.ssh.chvdisk(vdisk, ['-' + param, value]) def change_vdisk_options(self, vdisk, changes, opts, state): + change_value = {'warning': '', 'easytier': '', 'autoexpand': ''} if 'warning' in opts: - opts['warning'] = '%s%%' % str(opts['warning']) + change_value['warning'] = '%s%%' % str(opts['warning']) if 'easytier' in opts: - opts['easytier'] = 'on' if opts['easytier'] else 'off' + change_value['easytier'] = 'on' if opts['easytier'] else 'off' if 'autoexpand' in opts: - opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off' + change_value['autoexpand'] = 'on' if opts['autoexpand'] else 'off' for key in changes: - self.ssh.chvdisk(vdisk, ['-' + key, opts[key]]) + self.ssh.chvdisk(vdisk, ['-' + key, change_value[key]]) def change_vdisk_iogrp(self, vdisk, state, iogrp): if state['code_level'] < (6, 4, 0, 0): @@ -2150,6 +2276,60 @@ class StorwizeHelpers(object): def migratevdisk(self, vdisk, dest_pool, copy_id='0'): self.ssh.migratevdisk(vdisk, dest_pool, copy_id) + def is_system_topology_hyperswap(self, state): + """Returns True if the system version higher than 7.5 and the system + + topology is hyperswap. + """ + if state['code_level'] < (7, 6, 0, 0): + LOG.debug('Hyperswap failure as the storage' + 'code_level is %(code_level)s, below ' + 'the required 7.6.0.0.', + {'code_level': state['code_level']}) + else: + if state['topology'] == 'hyperswap': + return True + else: + LOG.debug('Hyperswap failure as the storage system ' + 'topology is not hyperswap.') + return False + + def check_hyperswap_pool(self, pool, peer_pool): + # Check the hyperswap pools. + if not peer_pool: + raise exception.InvalidInput( + reason=_('The peer pool is necessary for hyperswap volume, ' + 'please configure the peer pool.')) + pool_attr = self.get_pool_attrs(pool) + peer_pool_attr = self.get_pool_attrs(peer_pool) + if not peer_pool_attr: + raise exception.InvalidInput( + reason=_('The hyperswap peer pool %s ' + 'is invalid.') % peer_pool) + + if not pool_attr['site_id'] or not peer_pool_attr['site_id']: + raise exception.InvalidInput( + reason=_('The site_id of pools is necessary for hyperswap ' + 'volume, but there is no site_id in the pool or ' + 'peer pool.')) + + if pool_attr['site_id'] == peer_pool_attr['site_id']: + raise exception.InvalidInput( + reason=_('The hyperswap volume must be configured in two ' + 'independent sites, the pool %(pool)s is on the ' + 'same site as peer_pool %(peer_pool)s. ') % + {'pool': pool, 'peer_pool': peer_pool}) + + def is_volume_hyperswap(self, vol_name): + """Returns True if the volume rcrelationship is activeactive.""" + is_hyper_volume = False + vol_attrs = self.get_vdisk_attributes(vol_name) + if vol_attrs and vol_attrs['RC_name']: + relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) + if relationship[0]['copy_type'] == 'activeactive': + is_hyper_volume = True + return is_hyper_volume + class CLIResponse(object): """Parse SVC CLI output and generate iterable.""" @@ -2628,13 +2808,38 @@ class StorwizeSVCCommonDriver(san.SanDriver, rep_type = self._get_volume_replicated_type(ctxt, volume) pool = utils.extract_host(volume['host'], 'pool') - if opts['mirror_pool'] and rep_type: - reason = _('Create mirror volume with replication enabled is ' - 'not supported.') - raise exception.InvalidInput(reason=reason) - opts['iogrp'] = self._helpers.select_io_group(self._state, opts) - self._helpers.create_vdisk(volume['name'], str(volume['size']), - 'gb', pool, opts) + model_update = None + + if opts['volume_topology'] == 'hyperswap': + LOG.debug('Volume %s to be created is a hyperswap volume.', + volume.name) + if not self._helpers.is_system_topology_hyperswap(self._state): + reason = _('Create hyperswap volume failed, the system is ' + 'below release 7.6.0.0 or it is not hyperswap ' + 'topology.') + raise exception.InvalidInput(reason=reason) + if opts['mirror_pool'] or rep_type: + reason = _('Create hyperswap volume with streched cluster or ' + 'replication enabled is not supported.') + raise exception.InvalidInput(reason=reason) + if not opts['easytier']: + raise exception.InvalidInput( + reason=_('The default easytier of hyperswap volume is ' + 'on, it does not support easytier off.')) + self._helpers.check_hyperswap_pool(pool, opts['peer_pool']) + hyperpool = '%s:%s' % (pool, opts['peer_pool']) + self._helpers.create_hyperswap_volume(volume.name, + volume.size, 'gb', + hyperpool, opts) + else: + if opts['mirror_pool'] and rep_type: + reason = _('Create mirror volume with replication enabled is ' + 'not supported.') + raise exception.InvalidInput(reason=reason) + opts['iogrp'] = self._helpers.select_io_group(self._state, + opts, pool) + self._helpers.create_vdisk(volume['name'], str(volume['size']), + 'gb', pool, opts) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) @@ -2657,6 +2862,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.debug('enter: delete_volume: volume %s', volume['name']) ctxt = context.get_admin_context() + hyper_volume = self._helpers.is_volume_hyperswap(volume.name) + if hyper_volume: + LOG.debug('Volume %s to be deleted is a hyperswap ' + 'volume.', volume.name) + self._helpers.delete_hyperswap_volume(volume.name, False) + return + rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: if self._aux_backend_helpers: @@ -2714,6 +2926,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, pool = utils.extract_host(source_vol['host'], 'pool') opts = self._get_vdisk_params(source_vol['volume_type_id']) + + if opts['volume_topology'] == 'hyperswap': + msg = _('create_snapshot: Create snapshot to a ' + 'hyperswap volume is not allowed.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], snapshot['volume_id'], self.configuration, opts, False, pool=pool) @@ -2787,6 +3006,19 @@ class StorwizeSVCCommonDriver(san.SanDriver, if opts['qos']: self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos']) + if opts['volume_topology'] == 'hyperswap': + LOG.debug('The source volume %s to be cloned is a hyperswap ' + 'volume.', src_volume.name) + # Ensures the vdisk is not part of FC mapping. + # Otherwize convert it to hyperswap volume will be failed. + self._helpers.ensure_vdisk_no_fc_mappings(tgt_volume['name'], + allow_snaps=True, + allow_fctgt=False) + + self._helpers.convert_volume_to_hyperswap(tgt_volume['name'], + opts, + self._state) + ctxt = context.get_admin_context() model_update = {'replication_status': fields.ReplicationStatus.NOT_CAPABLE} @@ -2806,6 +3038,12 @@ class StorwizeSVCCommonDriver(san.SanDriver, def _extend_volume_op(self, volume, new_size, old_size=None): LOG.debug('enter: _extend_volume_op: volume %s', volume['id']) volume_name = self._get_target_vol(volume) + if self._helpers.is_volume_hyperswap(volume_name): + msg = _('_extend_volume_op: Extending a hyperswap volume is ' + 'not supported.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name, allow_snaps=False) if not ret: @@ -3930,6 +4168,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host['host']}) + # hyperswap volume doesn't support migrate + if self._helpers.is_volume_hyperswap(volume['name']): + msg = _('migrate_volume: Migrating a hyperswap volume is ' + 'not supported.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + false_ret = (False, None) dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: @@ -3948,8 +4193,14 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._helpers.migratevdisk(volume.name, dest_pool, copies['primary']['copy_id']) else: - self.add_vdisk_copy(volume.name, dest_pool, vol_type, - auto_delete=True) + self._check_volume_copy_ops() + if self._state['code_level'] < (7, 6, 0, 0): + new_op = self.add_vdisk_copy(volume.name, dest_pool, + vol_type) + self._add_vdisk_copy_op(ctxt, volume, new_op) + else: + self.add_vdisk_copy(volume.name, dest_pool, vol_type, + auto_delete=True) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume.id, 'host': host['host']}) @@ -4021,6 +4272,98 @@ class StorwizeSVCCommonDriver(san.SanDriver, self._helpers.change_relationship_cycleperiod(volume.name, new_cps) + def _check_hyperswap_retype_params(self, volume, new_opts, old_opts, + change_mirror, new_rep_type, + old_rep_type, old_pool, + new_pool, old_io_grp): + if new_opts['mirror_pool'] or old_opts['mirror_pool']: + msg = (_('Unable to retype volume %s: current action needs ' + 'volume-copy, it is not allowed for hyperswap ' + 'type.') % volume.name) + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if new_rep_type or old_rep_type: + msg = _('Retype between replicated volume and hyperswap volume' + ' is not allowed.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if (old_io_grp not in + StorwizeHelpers._get_valid_requested_io_groups( + self._state, new_opts)): + msg = _('Unable to retype: it is not allowed to change ' + 'hyperswap type and IO group at the same time.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if new_opts['volume_topology'] == 'hyperswap': + if old_pool != new_pool: + msg = (_('Unable to retype volume %s: current action needs ' + 'volume pool change, hyperswap volume does not ' + 'support pool change.') % volume.name) + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if volume.previous_status == 'in-use': + msg = _('Retype an in-use volume to a hyperswap ' + 'volume is not allowed.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if not new_opts['easytier']: + raise exception.InvalidInput( + reason=_('The default easytier of hyperswap volume is ' + 'on, it does not support easytier off.')) + if (old_opts['volume_topology'] != 'hyperswap' and + self._helpers._get_vdisk_fc_mappings(volume.name)): + msg = _('Unable to retype: it is not allowed to change a ' + 'normal volume with snapshot to a hyperswap ' + 'volume.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + if (old_opts['volume_topology'] == 'hyperswap' and + old_opts['peer_pool'] != new_opts['peer_pool']): + msg = _('Unable to retype: it is not allowed to change a ' + 'hyperswap volume peer_pool.') + LOG.error(msg) + raise exception.InvalidInput(message=msg) + + def _retype_hyperswap_volume(self, volume, host, old_opts, new_opts, + old_pool, new_pool, vdisk_changes, + need_copy, new_type): + if (old_opts['volume_topology'] != 'hyperswap' and + new_opts['volume_topology'] == 'hyperswap'): + LOG.debug('retype: Convert a normal volume %s to hyperswap ' + 'volume.', volume.name) + self._helpers.convert_volume_to_hyperswap(volume.name, + new_opts, + self._state) + elif (old_opts['volume_topology'] == 'hyperswap' and + new_opts['volume_topology'] != 'hyperswap'): + LOG.debug('retype: Convert a hyperswap volume %s to normal ' + 'volume.', volume.name) + if new_pool == old_pool: + self._helpers.convert_hyperswap_volume_to_normal( + volume.name, + old_opts['peer_pool']) + elif new_pool == old_opts['peer_pool']: + self._helpers.convert_hyperswap_volume_to_normal( + volume.name, + old_pool) + else: + rel_info = self._helpers.get_relationship_info(volume.name) + aux_vdisk = rel_info['aux_vdisk_name'] + if need_copy: + self.add_vdisk_copy(aux_vdisk, old_opts['peer_pool'], new_type, + auto_delete=True) + elif vdisk_changes: + self._helpers.change_vdisk_options(aux_vdisk, + vdisk_changes, + new_opts, self._state) + if need_copy: + self.add_vdisk_copy(volume.name, old_pool, new_type, + auto_delete=True) + elif vdisk_changes: + self._helpers.change_vdisk_options(volume.name, + vdisk_changes, + new_opts, self._state) + def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. @@ -4066,8 +4409,9 @@ class StorwizeSVCCommonDriver(san.SanDriver, elif key in no_copy_keys: vdisk_changes.append(key) - if (utils.extract_host(volume['host'], 'pool') != - utils.extract_host(host['host'], 'pool')): + old_pool = utils.extract_host(volume['host'], 'pool') + new_pool = utils.extract_host(host['host'], 'pool') + if old_pool != new_pool: need_copy = True if old_opts['mirror_pool'] != new_opts['mirror_pool']: @@ -4078,50 +4422,69 @@ class StorwizeSVCCommonDriver(san.SanDriver, new_rep_type = self._get_specs_replicated_type(new_type) old_rep_type = self._get_volume_replicated_type(ctxt, volume) old_io_grp = self._helpers.get_volume_io_group(volume['name']) - new_io_grp = self._helpers.select_io_group(self._state, new_opts) + new_io_grp = self._helpers.select_io_group(self._state, + new_opts, new_pool) self._verify_retype_params(volume, new_opts, old_opts, need_copy, change_mirror, new_rep_type, old_rep_type) - if need_copy: - self._check_volume_copy_ops() - dest_pool = self._helpers.can_migrate_to_host(host, self._state) - if dest_pool is None: - return False - retype_iogrp_property(volume, - new_io_grp, old_io_grp) - try: - self.add_vdisk_copy(volume['name'], dest_pool, new_type, - auto_delete=True) - except exception.VolumeDriverException: - # roll back changing iogrp property - retype_iogrp_property(volume, old_io_grp, new_io_grp) - msg = (_('Unable to retype: A copy of volume %s exists. ' - 'Retyping would exceed the limit of 2 copies.'), - volume['id']) - raise exception.VolumeDriverException(message=msg) + if old_opts['volume_topology'] or new_opts['volume_topology']: + self._check_hyperswap_retype_params(volume, new_opts, old_opts, + change_mirror, new_rep_type, + old_rep_type, old_pool, + new_pool, old_io_grp) + self._retype_hyperswap_volume(volume, host, old_opts, new_opts, + old_pool, new_pool, vdisk_changes, + need_copy, new_type) else: - retype_iogrp_property(volume, new_io_grp, old_io_grp) + if need_copy: + self._check_volume_copy_ops() + dest_pool = self._helpers.can_migrate_to_host(host, + self._state) + if dest_pool is None: + return False - self._helpers.change_vdisk_options(volume['name'], vdisk_changes, - new_opts, self._state) - if change_mirror: - copies = self._helpers.get_vdisk_copies(volume.name) - if not old_opts['mirror_pool'] and new_opts['mirror_pool']: - # retype from non mirror vol to mirror vol - self.add_vdisk_copy(volume['name'], - new_opts['mirror_pool'], new_type) - elif old_opts['mirror_pool'] and not new_opts['mirror_pool']: - # retype from mirror vol to non mirror vol - secondary = copies['secondary'] - if secondary: - self._helpers.rm_vdisk_copy( - volume.name, secondary['copy_id']) - else: - # migrate the second copy to another pool. - self._helpers.migratevdisk( - volume.name, new_opts['mirror_pool'], - copies['secondary']['copy_id']) + retype_iogrp_property(volume, + new_io_grp, old_io_grp) + try: + if self._state['code_level'] < (7, 6, 0, 0): + new_op = self.add_vdisk_copy(volume.name, dest_pool, + new_type) + self._add_vdisk_copy_op(ctxt, volume, new_op) + else: + self.add_vdisk_copy(volume.name, dest_pool, new_type, + auto_delete=True) + except exception.VolumeDriverException: + # roll back changing iogrp property + retype_iogrp_property(volume, old_io_grp, new_io_grp) + msg = (_('Unable to retype: A copy of volume %s exists. ' + 'Retyping would exceed the limit of 2 copies.'), + volume['id']) + raise exception.VolumeDriverException(message=msg) + else: + retype_iogrp_property(volume, new_io_grp, old_io_grp) + + self._helpers.change_vdisk_options(volume['name'], + vdisk_changes, + new_opts, self._state) + if change_mirror: + copies = self._helpers.get_vdisk_copies(volume.name) + if not old_opts['mirror_pool'] and new_opts['mirror_pool']: + # retype from non mirror vol to mirror vol + self.add_vdisk_copy(volume['name'], + new_opts['mirror_pool'], new_type) + elif (old_opts['mirror_pool'] and + not new_opts['mirror_pool']): + # retype from mirror vol to non mirror vol + secondary = copies['secondary'] + if secondary: + self._helpers.rm_vdisk_copy( + volume.name, secondary['copy_id']) + else: + # migrate the second copy to another pool. + self._helpers.migratevdisk( + volume.name, new_opts['mirror_pool'], + copies['secondary']['copy_id']) if new_opts['qos']: # Add the new QoS setting to the volume. If the volume has an # old QoS setting, it will be overwritten. @@ -4222,7 +4585,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, vol_rep_type = None rel_info = self._helpers.get_relationship_info(vdisk['name']) copies = self._helpers.get_vdisk_copies(vdisk['name']) - if rel_info: + if rel_info and rel_info['copy_type'] != 'activeactive': vol_rep_type = ( storwize_const.GMCV if storwize_const.GMCV_MULTI == rel_info['cycling_mode'] @@ -4264,6 +4627,34 @@ class StorwizeSVCCommonDriver(san.SanDriver, opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) + # Manage hyperswap volume + if rel_info and rel_info['copy_type'] == 'activeactive': + if opts['volume_topology'] != 'hyperswap': + msg = _("Failed to manage existing volume due to " + "the hyperswap volume to be managed is " + "mismatched with the provided non-hyperswap type.") + raise exception.ManageExistingVolumeTypeMismatch( + reason=msg) + aux_vdisk = rel_info['aux_vdisk_name'] + aux_vol_attr = self._helpers.get_vdisk_attributes(aux_vdisk) + peer_pool = aux_vol_attr['mdisk_grp_name'] + if opts['peer_pool'] != peer_pool: + msg = (_("Failed to manage existing hyperswap volume due " + "to peer pool mismatch. The peer pool of the " + "volume to be managed is %(vol_pool)s, but the " + "peer_pool of the chosen type is %(peer_pool)s.") + % {'vol_pool': peer_pool, + 'peer_pool': opts['peer_pool']}) + raise exception.ManageExistingVolumeTypeMismatch( + reason=msg) + else: + if opts['volume_topology'] == 'hyperswap': + msg = _("Failed to manage existing volume, the volume to " + "be managed is not a hyperswap volume, " + "mismatch with the provided hyperswap type.") + raise exception.ManageExistingVolumeTypeMismatch( + reason=msg) + resp = self._helpers.lsvdiskcopy(vdisk['name']) expected_copy_num = 2 if opts['mirror_pool'] else 1 if len(resp) != expected_copy_num: @@ -4318,7 +4709,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, msg = (_("Failed to manage existing volume due to " "I/O group mismatch. The I/O group of the " "volume to be managed is %(vdisk_iogrp)s. I/O group" - "of the chosen type is %(opt_iogrp)s.") % + " of the chosen type is %(opt_iogrp)s.") % {'vdisk_iogrp': vdisk['IO_group_name'], 'opt_iogrp': opts['iogrp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) @@ -4393,9 +4784,11 @@ class StorwizeSVCCommonDriver(san.SanDriver, return self._stats @staticmethod - def _get_rccg_name(group, grp_id=None): + def _get_rccg_name(group, grp_id=None, hyper_grp=False): group_id = group.id if group else grp_id - return storwize_const.RCCG_PREFIX + group_id[0:4] + '-' + group_id[-5:] + rccg = (storwize_const.HYPERCG_PREFIX + if hyper_grp else storwize_const.RCCG_PREFIX) + return rccg + group_id[0:4] + '-' + group_id[-5:] # Add CG capability to generic volume groups def create_group(self, context, group): @@ -4405,7 +4798,6 @@ class StorwizeSVCCommonDriver(san.SanDriver, :param group: the group object. :returns: model_update """ - LOG.debug("Creating group.") model_update = {'status': fields.GroupStatus.AVAILABLE} @@ -4418,7 +4810,8 @@ class StorwizeSVCCommonDriver(san.SanDriver, support_grps = ['group_snapshot_enabled', 'consistent_group_snapshot_enabled', - 'consistent_group_replication_enabled'] + 'consistent_group_replication_enabled', + 'hyperswap_group_enabled'] supported_grp = False for grp_spec in support_grps: if utils.is_group_a_type(group, grp_spec): @@ -4442,6 +4835,14 @@ class StorwizeSVCCommonDriver(san.SanDriver, 'not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update + opts = self._get_vdisk_params(vol_type_id) + if opts['volume_topology']: + # An unsupported configuration + LOG.error('Unable to create group: create consistent ' + 'snapshot group with a hyperswap volume type' + ' is not supported.') + model_update = {'status': fields.GroupStatus.ERROR} + return model_update # We'll rely on the generic group implementation if it is # a non-consistent snapshot group. @@ -4489,6 +4890,33 @@ class StorwizeSVCCommonDriver(san.SanDriver, "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR} + return model_update + + if utils.is_group_a_type(group, "hyperswap_group_enabled"): + if not self._helpers.is_system_topology_hyperswap(self._state): + LOG.error('Unable to create group: create group on ' + 'a system that does not support hyperswap.') + model_update = {'status': fields.GroupStatus.ERROR} + + for vol_type_id in group.volume_type_ids: + opts = self._get_vdisk_params(vol_type_id) + if not opts['volume_topology']: + # An unsupported configuration + LOG.error('Unable to create group: create consistent ' + 'hyperswap group with non-hyperswap volume' + ' type is not supported.') + model_update = {'status': fields.GroupStatus.ERROR} + return model_update + + rccg_name = self._get_rccg_name(group, hyper_grp=True) + try: + self._helpers.create_rccg( + rccg_name, self._state['system_name']) + except exception.VolumeBackendAPIException as err: + LOG.error("Failed to create rccg %(rccg)s. " + "Exception: %(exception)s.", + {'rccg': group.name, 'exception': err}) + model_update = {'status': fields.GroupStatus.ERROR} return model_update def delete_group(self, context, group, volumes): @@ -4503,10 +4931,12 @@ class StorwizeSVCCommonDriver(san.SanDriver, # we'll rely on the generic group implementation if it is # not a consistency group and not a consistency replication - # request. + # request and not a hyperswap group request. if (not utils.is_group_a_cg_snapshot_type(group) and not - utils.is_group_a_type(group, - "consistent_group_replication_enabled")): + utils.is_group_a_type(group, + "consistent_group_replication_enabled") + and not utils.is_group_a_type(group, + "hyperswap_group_enabled")): raise NotImplementedError() model_update = {'status': fields.GroupStatus.DELETED} @@ -4515,6 +4945,11 @@ class StorwizeSVCCommonDriver(san.SanDriver, "consistent_group_replication_enabled"): model_update, volumes_model_update = self._delete_replication_grp( group, volumes) + + if utils.is_group_a_type(group, "hyperswap_group_enabled"): + model_update, volumes_model_update = self._delete_hyperswap_grp( + group, volumes) + else: for volume in volumes: try: @@ -4547,10 +4982,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.debug("Updating group.") # we'll rely on the generic group implementation if it is not a - # consistency group request and not consistency replication request. + # consistency group request and not consistency replication request + # and not a hyperswap group request. if (not utils.is_group_a_cg_snapshot_type(group) and not - utils.is_group_a_type(group, - "consistent_group_replication_enabled")): + utils.is_group_a_type(group, + "consistent_group_replication_enabled") + and not utils.is_group_a_type(group, + "hyperswap_group_enabled")): raise NotImplementedError() if utils.is_group_a_type(group, @@ -4558,6 +4996,10 @@ class StorwizeSVCCommonDriver(san.SanDriver, return self._update_replication_grp(context, group, add_volumes, remove_volumes) + if utils.is_group_a_type(group, "hyperswap_group_enabled"): + return self._update_hyperswap_group(context, group, + add_volumes, remove_volumes) + if utils.is_group_a_cg_snapshot_type(group): return None, None, None @@ -4585,6 +5027,13 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) + if utils.is_group_a_type(group, "hyperswap_group_enabled"): + # An unsupported configuration + msg = _('Unable to create hyperswap group: create hyperswap ' + 'group from a hyperswap group is not supported.') + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + if not utils.is_group_a_cg_snapshot_type(group): # we'll rely on the generic volume groups implementation if it is # not a consistency group request. @@ -4929,3 +5378,82 @@ class StorwizeSVCCommonDriver(san.SanDriver, "from group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) return model_update, None, None + + def _delete_hyperswap_grp(self, group, volumes): + model_update = {'status': fields.GroupStatus.DELETED} + volumes_model_update = [] + try: + rccg_name = self._get_rccg_name(group, hyper_grp=True) + self._helpers.delete_rccg(rccg_name) + except exception.VolumeBackendAPIException as err: + LOG.error("Failed to delete rccg %(rccg)s. " + "Exception: %(exception)s.", + {'rccg': group.name, 'exception': err}) + model_update = {'status': fields.GroupStatus.ERROR_DELETING} + + for volume in volumes: + try: + self._helpers.delete_hyperswap_volume(volume.name, True) + volumes_model_update.append( + {'id': volume.id, 'status': 'deleted'}) + except exception.VolumeDriverException as err: + LOG.error("Failed to delete the volume %(vol)s of CG. " + "Exception: %(exception)s.", + {'vol': volume.name, 'exception': err}) + volumes_model_update.append( + {'id': volume.id, + 'status': 'error_deleting'}) + return model_update, volumes_model_update + + def _update_hyperswap_group(self, context, group, + add_volumes=None, remove_volumes=None): + LOG.info("Update hyperswap group: %(group)s. ", {'group': group.id}) + model_update = {'status': fields.GroupStatus.AVAILABLE} + rccg_name = self._get_rccg_name(group, hyper_grp=True) + if not self._helpers.get_rccg(rccg_name): + LOG.error("Failed to update rccg: %(grp)s does not exist in " + "backend.", {'grp': group.id}) + model_update['status'] = fields.GroupStatus.ERROR + return model_update, None, None + + # Add remote copy relationship to rccg + for volume in add_volumes: + hyper_volume = self._helpers.is_volume_hyperswap(volume.name) + if not hyper_volume: + LOG.error("Failed to update rccg: the non hyperswap volume" + " of %(vol)s can't be added to hyperswap group.", + {'vol': volume.id}) + model_update['status'] = fields.GroupStatus.ERROR + return model_update, None, None + try: + rcrel = self._helpers.get_relationship_info(volume.name) + if not rcrel: + LOG.error("Failed to update rccg: remote copy relationship" + " of %(vol)s does not exist in backend.", + {'vol': volume.id}) + model_update['status'] = fields.GroupStatus.ERROR + else: + self._helpers.chrcrelationship(rcrel['name'], rccg_name) + except exception.VolumeBackendAPIException as err: + model_update['status'] = fields.GroupStatus.ERROR + LOG.error("Failed to add the remote copy of volume %(vol)s to " + "rccg. Exception: %(exception)s.", + {'vol': volume.name, 'exception': err}) + + # Remove remote copy relationship from rccg + for volume in remove_volumes: + try: + rcrel = self._helpers.get_relationship_info(volume.name) + if not rcrel: + LOG.error("Failed to update rccg: remote copy relationship" + " of %(vol)s does not exit in backend.", + {'vol': volume.id}) + model_update['status'] = fields.GroupStatus.ERROR + else: + self._helpers.chrcrelationship(rcrel['name']) + except exception.VolumeBackendAPIException as err: + model_update['status'] = fields.GroupStatus.ERROR + LOG.error("Failed to remove the remote copy of volume %(vol)s " + "from rccg. Exception: %(exception)s.", + {'vol': volume.name, 'exception': err}) + return model_update, None, None diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py index 2f2f6bf53b1..132cfccef41 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py @@ -95,9 +95,10 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): 2.2.2 - Add npiv support 2.2.3 - Add replication group support 2.2.4 - Add backup snapshots support + 2.2.5 - Add hyperswap support """ - VERSION = "2.2.3" + VERSION = "2.2.5" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" @@ -123,10 +124,11 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): # attach the snapshot will be failed. self._check_snapshot_replica_volume_status(snapshot) - vol_attrs = ['id', 'name', 'display_name'] + vol_attrs = ['id', 'name', 'volume_type_id', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, + volume_type_id=snapshot.volume_type_id, display_name='backup-snapshot') return self.initialize_connection(volume, connector) @@ -163,12 +165,36 @@ class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) + opts = self._get_vdisk_params(volume.volume_type_id) + host_site = opts['host_site'] # Check if a host object is defined for this host name host_name = backend_helper.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to Storwize/SVC - host_name = backend_helper.create_host(connector) + # The host_site is necessary for hyperswap volume. + if self._helpers.is_volume_hyperswap( + volume_name) and host_site is None: + msg = (_('There is no host_site configured for a hyperswap' + ' volume %s.') % volume_name) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + host_name = backend_helper.create_host(connector, host_site) + else: + host_info = self._helpers.ssh.lshost(host=host_name) + if 'site_name' in host_info[0]: + if not host_info[0]['site_name'] and host_site: + self._helpers.update_host(host_name, host_site) + elif host_info[0]['site_name']: + ref_host_site = host_info[0]['site_name'] + if host_site and host_site != ref_host_site: + msg = (_('The existing host site is %(ref_host_site)s,' + ' but the new host site is %(host_site)s.') % + {'ref_host_site': ref_host_site, + 'host_site': host_site}) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) volume_attributes = backend_helper.get_vdisk_attributes(volume_name) if volume_attributes is None: diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py index 0bd9f772217..aba235d94ac 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py @@ -95,9 +95,10 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): 2.2.1 - Add vdisk mirror/stretch cluster support 2.2.2 - Add replication group support 2.2.3 - Add backup snapshots support + 2.2.4 - Add hyperswap support """ - VERSION = "2.2.2" + VERSION = "2.2.4" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" @@ -123,10 +124,11 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): # attach the snapshot will be failed. self._check_snapshot_replica_volume_status(snapshot) - vol_attrs = ['id', 'name', 'display_name'] + vol_attrs = ['id', 'name', 'volume_type_id', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, + volume_type_id=snapshot.volume_type_id, display_name='backup-snapshot') return self.initialize_connection(volume, connector) @@ -161,13 +163,38 @@ class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) + opts = self._get_vdisk_params(volume.volume_type_id) + host_site = opts['host_site'] # Check if a host object is defined for this host name host_name = backend_helper.get_host_from_connector(connector, iscsi=True) if host_name is None: # Host does not exist - add a new host to Storwize/SVC - host_name = backend_helper.create_host(connector, iscsi=True) + # The host_site is necessary for hyperswap volume + if self._helpers.is_volume_hyperswap( + volume_name) and host_site is None: + msg = (_('There is no host_site configured for a hyperswap' + ' volume %s.') % volume_name) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + host_name = backend_helper.create_host(connector, iscsi=True, + site = host_site) + else: + host_info = self._helpers.ssh.lshost(host=host_name) + if 'site_name' in host_info[0]: + if not host_info[0]['site_name'] and host_site: + self._helpers.update_host(host_name, host_site) + elif host_info[0]['site_name']: + ref_host_site = host_info[0]['site_name'] + if host_site and host_site != ref_host_site: + msg = (_('The existing host site is %(ref_host_site)s,' + ' but the new host site is %(host_site)s.') % + {'ref_host_site': ref_host_site, + 'host_site': host_site}) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) chap_secret = backend_helper.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled diff --git a/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst b/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst index 0ab045d9700..2a7f2de82dd 100755 --- a/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst +++ b/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst @@ -315,6 +315,9 @@ driver: - multipath - iogrp - mirror_pool +- volume_topology +- peer_pool +- host_site These keys have the same semantics as their counterparts in the configuration file. They are set similarly; for example, ``rsize=2`` or @@ -452,6 +455,12 @@ modify volume types, you can also change these extra specs properties: - mirror_pool +- volume_topology + +- peer_pool + +- host_site + .. note:: When you change the ``rsize``, ``grainsize`` or ``compression`` @@ -515,3 +524,26 @@ default as the ``backend_id``: If the synchronization is not done manually, Storwize Block Storage service driver will perform the synchronization and do the failback after the synchronization is finished. + +Hyperswap Volumes +----------------- + +A hyperswap volume is created with a volume-type that has the extra spec +``drivers:volume_topology`` set to ``hyperswap``. +To support hyperswap volumes, IBM Storwize/SVC firmware version 7.6.0 or +later is required. + +.. code-block:: console + + $ cinder type-create hyper_type + $ cinder type-key hyper_type set drivers:volume_topology=hyperswap \ + drivers:peer_pool=Pool_site2 drivers:host_site=site1 + +.. note:: + + The property ``rsize`` is considered as ``buffersize`` for hyperswap + volume. + The hyperswap property ``iogrp`` is selected by storage. + +A group is created as a hyperswap group with a group-type that has the +group spec ``hyperswap_group_enabled`` set to `` True``. diff --git a/releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml b/releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml new file mode 100644 index 00000000000..fc70635edcd --- /dev/null +++ b/releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added hyperswap volume and group support in Storwize cinder driver. + Storwize/svc versions prior to 7.6 do not support this feature. +