diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py index 8f86f19a4e7..d5563bef60f 100644 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -44,6 +44,7 @@ from cinder.volume.drivers.dell_emc.vmax import fc from cinder.volume.drivers.dell_emc.vmax import iscsi from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import metadata +from cinder.volume.drivers.dell_emc.vmax import migrate from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import rest from cinder.volume.drivers.dell_emc.vmax import utils @@ -984,6 +985,17 @@ class VMAXCommonData(object): 'snapvx_source': 'false', 'storageGroupId': []} + staging_sg = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG' + staging_mv1 = 'STG-myhostA-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV' + staging_mv2 = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV' + staging_mvs = [staging_mv1, staging_mv2] + legacy_mv1 = 'OS-myhostA-No_SLO-e14f48b8-MV' + legacy_mv2 = 'OS-myhostB-No_SLO-e14f48b8-MV' + legacy_shared_sg = 'OS-myhostA-No_SLO-SG' + legacy_mvs = [legacy_mv1, legacy_mv2] + legacy_not_shared_mv = 'OS-myhostA-SRP_1-Diamond-NONE-MV' + legacy_not_shared_sg = 'OS-myhostA-SRP_1-Diamond-NONE-SG' + class FakeLookupService(object): def get_device_mapping_from_network(self, initiator_wwns, target_wwns): @@ -8687,3 +8699,486 @@ class VMAXVolumeMetadataDebugTest(test.TestCase): self.volume_metadata.gather_version_info(self.data.array) self.assertEqual( self.data.version_dict, self.volume_metadata.version_dict) + + +class VMAXMigrateTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + volume_utils.get_max_over_subscription_ratio = mock.Mock() + super(VMAXMigrateTest, self).setUp() + configuration = FakeConfiguration( + None, 'MaskingTests', 1, 1, san_ip='1.1.1.1', + san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1', + san_password='smc', san_api_port=8443, + vmax_port_groups=[self.data.port_group_name_f]) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = iscsi.VMAXISCSIDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.migrate = self.common.migrate + + def test_get_masking_view_component_dict_shared_format_1(self): + """Test for get_masking_view_component_dict, legacy case 1.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-No_SLO-8970da0c-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('No_SLO', component_dict['no_slo']) + self.assertEqual('-8970da0c', component_dict['uuid']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_shared_format_2(self): + """Test for get_masking_view_component_dict, legacy case 2.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-No_SLO-F-8970da0c-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('-F', component_dict['protocol']) + self.assertEqual('No_SLO', component_dict['no_slo']) + self.assertEqual('-8970da0c', component_dict['uuid']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_shared_format_3(self): + """Test for get_masking_view_component_dict, legacy case 3.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Silver-NONE-74346a64-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('SRP_1', component_dict['srp']) + self.assertEqual('Silver', component_dict['slo']) + self.assertEqual('NONE', component_dict['workload']) + self.assertEqual('-74346a64', component_dict['uuid']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_shared_format_4(self): + """Test for get_masking_view_component_dict, legacy case 4.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Bronze-DSS-I-1b454e9f-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('SRP_1', component_dict['srp']) + self.assertEqual('Bronze', component_dict['slo']) + self.assertEqual('DSS', component_dict['workload']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('-1b454e9f', component_dict['uuid']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_non_shared_format_5(self): + """Test for get_masking_view_component_dict, legacy case 5.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-No_SLO-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('No_SLO', component_dict['no_slo']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_non_shared_format_6(self): + """Test for get_masking_view_component_dict, legacy case 6.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-No_SLO-F-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('No_SLO', component_dict['no_slo']) + self.assertEqual('-F', component_dict['protocol']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_non_shared_format_7(self): + """Test for get_masking_view_component_dict, legacy case 7.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Diamond-OLTP-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('SRP_1', component_dict['srp']) + self.assertEqual('Diamond', component_dict['slo']) + self.assertEqual('OLTP', component_dict['workload']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_non_shared_format_8(self): + """Test for get_masking_view_component_dict, legacy case 8.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Gold-NONE-F-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('SRP_1', component_dict['srp']) + self.assertEqual('Gold', component_dict['slo']) + self.assertEqual('NONE', component_dict['workload']) + self.assertEqual('-F', component_dict['protocol']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_host_with_dashes_no_slo( + self): + """Test for get_masking_view_component_dict, dashes in host.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-host-with-dashes-No_SLO-I-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('host-with-dashes', component_dict['host']) + self.assertEqual('No_SLO', component_dict['no_slo']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_host_with_dashes_slo(self): + """Test for get_masking_view_component_dict, dashes and slo.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-host-with-dashes-SRP_1-Diamond-NONE-I-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('host-with-dashes', component_dict['host']) + self.assertEqual('SRP_1', component_dict['srp']) + self.assertEqual('Diamond', component_dict['slo']) + self.assertEqual('NONE', component_dict['workload']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('MV', component_dict['postfix']) + + def test_get_masking_view_component_dict_replication_enabled(self): + """Test for get_masking_view_component_dict, replication enabled.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Diamond-OLTP-I-RE-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('Diamond', component_dict['slo']) + self.assertEqual('OLTP', component_dict['workload']) + self.assertEqual('-RE', component_dict['RE']) + + def test_get_masking_view_component_dict_compression_disabled(self): + """Test for get_masking_view_component_dict, compression disabled.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Bronze-DSS_REP-I-CD-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('Bronze', component_dict['slo']) + self.assertEqual('DSS_REP', component_dict['workload']) + self.assertEqual('-CD', component_dict['CD']) + + def test_get_masking_view_component_dict_CD_RE(self): + """Test for get_masking_view_component_dict, CD and RE.""" + component_dict = self.migrate.get_masking_view_component_dict( + 'OS-myhost-SRP_1-Platinum-OLTP_REP-I-CD-RE-MV', 'SRP_1') + self.assertEqual('OS', component_dict['prefix']) + self.assertEqual('myhost', component_dict['host']) + self.assertEqual('-I', component_dict['protocol']) + self.assertEqual('Platinum', component_dict['slo']) + self.assertEqual('OLTP_REP', component_dict['workload']) + self.assertEqual('-CD', component_dict['CD']) + self.assertEqual('-RE', component_dict['RE']) + + @mock.patch.object(migrate.VMAXMigrate, + '_perform_migration', + return_value=True) + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=(VMAXCommonData.legacy_mvs, + [VMAXCommonData.legacy_shared_sg])) + @mock.patch.object(migrate.VMAXMigrate, + 'get_volume_host_list', + return_value=['myhostB']) + def test_do_migrate_if_candidate( + self, mock_mvs, mock_os_host, mock_migrate): + self.assertTrue(self.migrate.do_migrate_if_candidate( + self.data.array, self.data.srp, self.data.device_id, + self.data.test_volume, self.data.connector)) + + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=([VMAXCommonData.legacy_not_shared_mv], + [VMAXCommonData.legacy_not_shared_sg])) + def test_do_migrate_if_candidate_not_shared( + self, mock_mvs): + self.assertFalse(self.migrate.do_migrate_if_candidate( + self.data.array, self.data.srp, self.data.device_id, + self.data.test_volume, self.data.connector)) + + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=(VMAXCommonData.legacy_mvs, + [VMAXCommonData.legacy_shared_sg, + 'non_fast_sg'])) + def test_do_migrate_if_candidate_in_multiple_sgs( + self, mock_mvs): + self.assertFalse(self.migrate.do_migrate_if_candidate( + self.data.array, self.data.srp, self.data.device_id, + self.data.test_volume, self.data.connector)) + + @mock.patch.object(migrate.VMAXMigrate, + '_perform_migration', + return_value=True) + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=(VMAXCommonData.legacy_mvs, + [VMAXCommonData.legacy_shared_sg])) + @mock.patch.object(migrate.VMAXMigrate, + 'get_volume_host_list', + return_value=['myhostA', 'myhostB']) + def test_dp_migrate_if_candidate_multiple_os_hosts( + self, mock_mvs, mock_os_host, mock_migrate): + self.assertFalse(self.migrate.do_migrate_if_candidate( + self.data.array, self.data.srp, self.data.device_id, + self.data.test_volume, self.data.connector)) + + @mock.patch.object(migrate.VMAXMigrate, + '_delete_staging_masking_views') + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + side_effect=[(VMAXCommonData.staging_mvs, + [VMAXCommonData.staging_sg]), + ([VMAXCommonData.staging_mv2], + [VMAXCommonData.staging_sg])]) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_masking_views', + return_value=VMAXCommonData.staging_mvs) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_storage_group_with_vol', + return_value=VMAXCommonData.staging_sg) + def test_perform_migration(self, mock_sg, mock_mvs, mock_new, mock_del): + """Test to perform migration""" + source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' + mv_details_list = list() + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) + self.assertTrue(self.migrate._perform_migration( + self.data.array, self.data.device_id, mv_details_list, + source_sg_name, 'myhostB')) + + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_storage_group_with_vol', + return_value=None) + def test_perform_migration_storage_group_fail(self, mock_sg): + """Test to perform migration""" + source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' + mv_details_list = list() + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) + self.assertRaises( + exception.VolumeBackendAPIException, + self.migrate._perform_migration, self.data.array, + self.data.device_id, mv_details_list, + source_sg_name, 'myhostB') + with self.assertRaisesRegex( + exception.VolumeBackendAPIException, + 'MIGRATE - Unable to create staging storage group.'): + self.migrate._perform_migration( + self.data.array, self.data.device_id, mv_details_list, + source_sg_name, 'myhostB') + + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_masking_views', + return_value=[]) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_storage_group_with_vol', + return_value=VMAXCommonData.staging_sg) + def test_perform_migration_masking_views_fail(self, mock_sg, mock_mvs): + """Test to perform migration""" + source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' + mv_details_list = list() + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) + with self.assertRaisesRegex( + exception.VolumeBackendAPIException, + 'MIGRATE - Unable to create staging masking views.'): + self.migrate._perform_migration( + self.data.array, self.data.device_id, mv_details_list, + source_sg_name, 'myhostB') + + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=(VMAXCommonData.staging_mvs, + [VMAXCommonData.staging_sg, + VMAXCommonData.staging_sg])) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_masking_views', + return_value=VMAXCommonData.staging_mvs) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_storage_group_with_vol', + return_value=VMAXCommonData.staging_sg) + def test_perform_migration_sg_list_len_fail( + self, mock_sg, mock_mvs, mock_new): + """Test to perform migration""" + source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' + mv_details_list = list() + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) + + exception_message = ( + r"MIGRATE - The current storage group list has 2 " + r"members. The list is " + r"\[\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\', " + r"\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\'\]. " + r"Will not proceed with cleanup. Please contact customer " + r"representative.") + + with self.assertRaisesRegex( + exception.VolumeBackendAPIException, + exception_message): + self.migrate._perform_migration( + self.data.array, self.data.device_id, mv_details_list, + source_sg_name, 'myhostB') + + @mock.patch.object(migrate.VMAXMigrate, + '_get_mvs_and_sgs_from_volume', + return_value=(VMAXCommonData.staging_mvs, + ['not_staging_sg'])) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_masking_views', + return_value=VMAXCommonData.staging_mvs) + @mock.patch.object(migrate.VMAXMigrate, + '_create_stg_storage_group_with_vol', + return_value=VMAXCommonData.staging_sg) + def test_perform_migration_stg_sg_mismatch_fail( + self, mock_sg, mock_mvs, mock_new): + """Test to perform migration""" + source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' + mv_details_list = list() + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) + mv_details_list.append(self.migrate.get_masking_view_component_dict( + 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) + with self.assertRaisesRegex( + exception.VolumeBackendAPIException, + 'MIGRATE - The current storage group not_staging_sg does not ' + 'match STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG. ' + 'Will not proceed with cleanup. Please contact customer ' + 'representative.'): + self.migrate._perform_migration( + self.data.array, self.data.device_id, mv_details_list, + source_sg_name, 'myhostB') + + @mock.patch.object(rest.VMAXRest, 'delete_masking_view') + def test_delete_staging_masking_views(self, mock_del): + self.assertTrue(self.migrate._delete_staging_masking_views( + self.data.array, self.data.staging_mvs, 'myhostB')) + mock_del.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'delete_masking_view') + def test_delete_staging_masking_views_no_host_match(self, mock_del): + self.assertFalse(self.migrate._delete_staging_masking_views( + self.data.array, self.data.staging_mvs, 'myhostC')) + mock_del.assert_not_called() + + @mock.patch.object(rest.VMAXRest, 'create_masking_view') + @mock.patch.object(rest.VMAXRest, 'get_masking_view', + return_value=VMAXCommonData.maskingview[0]) + def test_create_stg_masking_views(self, mock_get, mock_create): + mv_detail_list = list() + for masking_view in self.data.legacy_mvs: + masking_view_dict = self.migrate.get_masking_view_component_dict( + masking_view, 'SRP_1') + if masking_view_dict: + mv_detail_list.append(masking_view_dict) + self.assertIsNotNone(self.migrate._create_stg_masking_views( + self.data.array, mv_detail_list, self.data.staging_sg, + self.data.extra_specs)) + self.assertEqual(2, mock_create.call_count) + + @mock.patch.object(rest.VMAXRest, 'create_masking_view') + @mock.patch.object(rest.VMAXRest, 'get_masking_view', + side_effect=[VMAXCommonData.maskingview[0], None]) + def test_create_stg_masking_views_mv_not_created( + self, mock_get, mock_create): + mv_detail_list = list() + for masking_view in self.data.legacy_mvs: + masking_view_dict = self.migrate.get_masking_view_component_dict( + masking_view, 'SRP_1') + if masking_view_dict: + mv_detail_list.append(masking_view_dict) + self.assertIsNone(self.migrate._create_stg_masking_views( + self.data.array, mv_detail_list, self.data.staging_sg, + self.data.extra_specs)) + + @mock.patch.object(provision.VMAXProvision, 'create_volume_from_sg') + @mock.patch.object(provision.VMAXProvision, 'create_storage_group', + return_value=VMAXCommonData.staging_mvs[0]) + def test_create_stg_storage_group_with_vol(self, mock_mv, mock_create): + self.migrate._create_stg_storage_group_with_vol( + self.data.array, 'myhostB', self.data.extra_specs) + mock_create.assert_called_once() + + @mock.patch.object(provision.VMAXProvision, 'create_volume_from_sg') + @mock.patch.object(provision.VMAXProvision, 'create_storage_group', + return_value=None) + def test_create_stg_storage_group_with_vol_None( + self, mock_mv, mock_create): + self.assertIsNone(self.migrate._create_stg_storage_group_with_vol( + self.data.array, 'myhostB', self.data.extra_specs)) + + @mock.patch.object(rest.VMAXRest, + 'get_masking_views_from_storage_group', + return_value=VMAXCommonData.legacy_mvs) + @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', + return_value=[VMAXCommonData.legacy_shared_sg]) + def test_get_mvs_and_sgs_from_volume(self, mock_sgs, mock_mvs): + mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume( + self.data.array, self.data.device_id) + mock_mvs.assert_called_once() + self.assertEqual([self.data.legacy_shared_sg], sg_list) + self.assertEqual(self.data.legacy_mvs, mv_list) + + @mock.patch.object(rest.VMAXRest, + 'get_masking_views_from_storage_group') + @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', + return_value=list()) + def test_get_mvs_and_sgs_from_volume_empty_sg_list( + self, mock_sgs, mock_mvs): + mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume( + self.data.array, self.data.device_id) + mock_mvs.assert_not_called() + self.assertTrue(len(sg_list) == 0) + self.assertTrue(len(mv_list) == 0) + + def test_get_volume_host_list(self): + volume1 = deepcopy(self.data.test_volume) + volume1.volume_attachment.objects = [self.data.test_volume_attachment] + os_host_list = self.migrate.get_volume_host_list( + volume1, self.data.connector) + self.assertEqual('HostX', os_host_list[0]) + + def test_get_volume_host_list_no_attachments(self): + _volume_attachment = deepcopy(self.data.test_volume_attachment) + _volume_attachment.update({'connector': None}) + volume1 = deepcopy(self.data.test_volume) + volume1.volume_attachment.objects = [_volume_attachment] + os_host_list = self.migrate.get_volume_host_list( + volume1, self.data.connector) + self.assertTrue(len(os_host_list) == 0) + + @mock.patch.object(rest.VMAXRest, + 'delete_masking_view') + @mock.patch.object(rest.VMAXRest, + 'get_masking_views_from_storage_group', + return_value=[VMAXCommonData.staging_mv1]) + @mock.patch.object(rest.VMAXRest, + 'get_volumes_in_storage_group', + return_value=[VMAXCommonData.volume_id]) + def test_cleanup_staging_objects(self, mock_vols, mock_mvs, mock_del_mv): + self.migrate.cleanup_staging_objects( + self.data.array, [self.data.staging_sg], self.data.extra_specs) + mock_del_mv.assert_called_once_with( + self.data.array, self.data.staging_mv1) + + @mock.patch.object(rest.VMAXRest, + 'delete_masking_view') + def test_cleanup_staging_objects_not_staging(self, mock_del_mv): + self.migrate.cleanup_staging_objects( + self.data.array, [self.data.storagegroup_name_f], + self.data.extra_specs) + mock_del_mv.assert_not_called() + + @mock.patch.object(rest.VMAXRest, + 'get_masking_views_from_storage_group') + @mock.patch.object(rest.VMAXRest, + 'get_volumes_in_storage_group', + return_value=[VMAXCommonData.device_id, + VMAXCommonData.device_id2], ) + def test_cleanup_staging_objects_multiple_vols(self, mock_vols, mock_mvs): + self.migrate.cleanup_staging_objects( + self.data.array, [self.data.storagegroup_name_f], + self.data.extra_specs) + mock_mvs.assert_not_called() diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index f6fe96d0f64..082f361d844 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -32,6 +32,7 @@ from cinder.objects import fields from cinder.volume import configuration from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import metadata as volume_metadata +from cinder.volume.drivers.dell_emc.vmax import migrate from cinder.volume.drivers.dell_emc.vmax import provision from cinder.volume.drivers.dell_emc.vmax import rest from cinder.volume.drivers.dell_emc.vmax import utils @@ -127,6 +128,8 @@ class VMAXCommon(object): self.version = version self.volume_metadata = volume_metadata.VMAXVolumeMetadata( self.rest, version, LOG.isEnabledFor(logging.DEBUG)) + self.migrate = migrate.VMAXMigrate(prtcl, self.rest) + # replication self.replication_enabled = False self.extend_replicated_vol = False @@ -521,12 +524,17 @@ class VMAXCommon(object): volume_name = volume.name LOG.debug("Detaching volume %s.", volume_name) reset = False if is_multiattach else True + if is_multiattach: + storage_group_names = self.rest.get_storage_groups_from_volume( + array, device_id) self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, reset, connector, async_grp=async_grp) if is_multiattach: self.masking.return_volume_to_fast_managed_group( array, device_id, extra_specs) + self.migrate.cleanup_staging_objects( + array, storage_group_names, extra_specs) def _unmap_lun(self, volume, connector): """Unmaps a volume from the host. @@ -650,7 +658,8 @@ class VMAXCommon(object): if self.utils.is_volume_failed_over(volume): extra_specs = rep_extra_specs device_info_dict, is_multiattach = ( - self.find_host_lun_id(volume, connector['host'], extra_specs)) + self.find_host_lun_id(volume, connector.get('host'), extra_specs, + connector=connector)) masking_view_dict = self._populate_masking_dict( volume, connector, extra_specs) masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach @@ -1116,20 +1125,29 @@ class VMAXCommon(object): return founddevice_id def find_host_lun_id(self, volume, host, extra_specs, - rep_extra_specs=None): + rep_extra_specs=None, connector=None): """Given the volume dict find the host lun id for a volume. :param volume: the volume dict :param host: host from connector (can be None on a force-detach) :param extra_specs: the extra specs :param rep_extra_specs: rep extra specs, passed in if metro device + :param connector: connector object can be none. :returns: dict -- the data dict """ maskedvols = {} is_multiattach = False volume_name = volume.name device_id = self._find_device_on_array(volume, extra_specs) - if rep_extra_specs is not None: + if connector: + if self.migrate.do_migrate_if_candidate( + extra_specs[utils.ARRAY], extra_specs[utils.SRP], + device_id, volume, connector): + LOG.debug("MIGRATE - Successfully migrated from device " + "%(dev)s from legacy shared storage groups, " + "pre Pike release.", + {'dev': device_id}) + if rep_extra_specs: device_id = self.get_remote_target_device( extra_specs[utils.ARRAY], volume, device_id)[0] extra_specs = rep_extra_specs diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index f29eb16a472..31e00614b35 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -103,9 +103,10 @@ class VMAXFCDriver(san.SanDriver, driver.FibreChannelDriver): (bugs #1783855 #1783867) - Fix for HyperMax OS Upgrade Bug (bug #1790141) 3.2.3 - Legacy volume not found fix (#1867163) + 3.2.4 - Fix to enable legacy volumes to live migrate (#1867163) """ - VERSION = "3.2.3" + VERSION = "3.2.4" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index f76c5ffc473..786359f6a0e 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -108,9 +108,10 @@ class VMAXISCSIDriver(san.SanISCSIDriver): (bugs #1783855 #1783867) - Fix for HyperMax OS Upgrade Bug (bug #1790141) 3.2.3 - Legacy volume not found fix (#1867163) + 3.2.4 - Fix to enable legacy volumes to live migrate (#1867163) """ - VERSION = "3.2.3" + VERSION = "3.2.4" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index c4ed61c1a6a..4dad053364a 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -1022,6 +1022,7 @@ class VMAXMasking(object): :param reset: flag to indicate if reset is required -- bool :param async_grp: the async rep group """ + move = False short_host_name = None storagegroup_names = (self.rest.get_storage_groups_from_volume( @@ -1612,6 +1613,14 @@ class VMAXMasking(object): sg_list = self.rest.get_storage_group_list( serial_number, params={ 'child': 'true', 'volumeId': device_id}) + # You need to put in something here for legacy + if not sg_list.get('storageGroupId'): + storage_group_list = self.rest.get_storage_groups_from_volume( + serial_number, device_id) + if storage_group_list and len(storage_group_list) == 1: + if 'STG-' in storage_group_list[0]: + return mv_dict + split_pool = extra_specs['pool_name'].split('+') src_slo = split_pool[0] src_wl = split_pool[1] if len(split_pool) == 4 else 'NONE' diff --git a/cinder/volume/drivers/dell_emc/vmax/migrate.py b/cinder/volume/drivers/dell_emc/vmax/migrate.py new file mode 100644 index 00000000000..668fe05f8dd --- /dev/null +++ b/cinder/volume/drivers/dell_emc/vmax/migrate.py @@ -0,0 +1,423 @@ +# Copyright (c) 2020 Dell Inc. or its subsidiaries. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.dell_emc.vmax import masking +from cinder.volume.drivers.dell_emc.vmax import provision +from cinder.volume.drivers.dell_emc.vmax import utils + +LOG = logging.getLogger(__name__) + + +class VMAXMigrate(object): + """Upgrade class for Rest based PowerMax volume drivers. + + This upgrade class is for Dell EMC PowerMax volume drivers + based on UniSphere Rest API. + It supports VMAX 3 and VMAX All Flash and PowerMax arrays. + + """ + def __init__(self, prtcl, rest): + self.rest = rest + self.utils = utils.VMAXUtils() + self.masking = masking.VMAXMasking(prtcl, self.rest) + self.provision = provision.VMAXProvision(self.rest) + + def do_migrate_if_candidate( + self, array, srp, device_id, volume, connector): + """Check and migrate if the volume is a candidate + + If the volume is in the legacy (SMIS) masking view structure + move it to staging storage group within a staging masking view. + + :param array: array serial number + :param srp: the SRP + :param device_id: the volume device id + :param volume: the volume object + :param connector: the connector object + """ + mv_detail_list = list() + + masking_view_list, storage_group_list = ( + self._get_mvs_and_sgs_from_volume( + array, device_id)) + + for masking_view in masking_view_list: + masking_view_dict = self.get_masking_view_component_dict( + masking_view, srp) + if masking_view_dict: + mv_detail_list.append(masking_view_dict) + + if not mv_detail_list: + return False + + if len(storage_group_list) != 1: + LOG.warning("MIGRATE - The volume %(dev_id)s is not in one " + "storage group as is expected for migration. " + "The volume is in storage groups %(sg_list)s." + "Migration will not proceed.", + {'dev_id': device_id, + 'sg_list': storage_group_list}) + return False + else: + source_storage_group_name = storage_group_list[0] + + # Get the host that OpenStack has volume exposed to (it should only + # be one host). + os_host_list = self.get_volume_host_list(volume, connector) + if len(os_host_list) != 1: + LOG.warning("MIGRATE - OpenStack has recorded that " + "%(dev_id)s is attached to hosts %(os_hosts)s " + "and not 1 host as is expected. " + "Migration will not proceed.", + {'dev_id': device_id, + 'os_hosts': os_host_list}) + return False + else: + os_host_name = os_host_list[0] + LOG.info("MIGRATE - Volume %(dev_id)s is a candidate for " + "migration. The OpenStack host is %(os_host_name)s." + "The volume is in storage group %(sg_name)s.", + {'dev_id': device_id, + 'os_host_name': os_host_name, + 'sg_name': source_storage_group_name}) + return self._perform_migration( + array, device_id, mv_detail_list, source_storage_group_name, + os_host_name) + + def _perform_migration( + self, array, device_id, mv_detail_list, source_storage_group_name, + os_host_name): + """Perform steps so we can get the volume in a correct state. + + :param array: the storage array + :param device_id: the device_id + :param mv_detail_list: the masking view list + :param source_storage_group_name: the source storage group + :param os_host_name: the host the volume is exposed to + :returns: boolean + """ + extra_specs = {utils.INTERVAL: 3, utils.RETRIES: 200} + stg_sg_name = self._create_stg_storage_group_with_vol( + array, os_host_name, extra_specs) + if not stg_sg_name: + # Throw an exception here + exception_message = _("MIGRATE - Unable to create staging " + "storage group.") + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + LOG.info("MIGRATE - Staging storage group %(stg_sg_name)s has " + "been successfully created.", {'stg_sg_name': stg_sg_name}) + + new_stg_mvs = self._create_stg_masking_views( + array, mv_detail_list, stg_sg_name, extra_specs) + LOG.info("MIGRATE - Staging masking views %(new_stg_mvs)s have " + "been successfully created.", {'new_stg_mvs': new_stg_mvs}) + + if not new_stg_mvs: + exception_message = _("MIGRATE - Unable to create staging " + "masking views.") + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + + # Move volume from old storage group to new staging storage group + self.move_volume_from_legacy_to_staging( + array, device_id, source_storage_group_name, + stg_sg_name, extra_specs) + + LOG.info("MIGRATE - Device id %(device_id)s has been successfully " + "moved from %(src_sg)s to %(tgt_sg)s.", + {'device_id': device_id, + 'src_sg': source_storage_group_name, + 'tgt_sg': stg_sg_name}) + + new_masking_view_list, new_storage_group_list = ( + self._get_mvs_and_sgs_from_volume( + array, device_id)) + + if len(new_storage_group_list) != 1: + exception_message = (_( + "MIGRATE - The current storage group list has %(list_len)d " + "members. The list is %(sg_list)s. Will not proceed with " + "cleanup. Please contact customer representative.") % { + 'list_len': len(new_storage_group_list), + 'sg_list': new_storage_group_list}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + else: + current_storage_group_name = new_storage_group_list[0] + if current_storage_group_name.lower() != stg_sg_name.lower(): + exception_message = (_( + "MIGRATE - The current storage group %(sg_1)s " + "does not match %(sg_2)s. Will not proceed with " + "cleanup. Please contact customer representative.") % { + 'sg_1': current_storage_group_name, + 'sg_2': stg_sg_name}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + + if not self._delete_staging_masking_views( + array, new_masking_view_list, os_host_name): + exception_message = _("MIGRATE - Unable to delete staging masking " + "views. Please contact customer " + "representative.") + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + + final_masking_view_list, final_storage_group_list = ( + self._get_mvs_and_sgs_from_volume( + array, device_id)) + if len(final_masking_view_list) != 1: + exception_message = (_( + "MIGRATE - The final masking view list has %(list_len)d " + "entries and not 1 entry as is expected. The list is " + "%(mv_list)s. Please contact customer representative.") % { + 'list_len': len(final_masking_view_list), + 'sg_list': final_masking_view_list}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + message=exception_message) + + return True + + def move_volume_from_legacy_to_staging( + self, array, device_id, source_storage_group_name, + stg_sg_name, extra_specs): + """Move the volume from legacy SG to staging SG + + :param array: array serial number + :param device_id: the device id of the volume + :param source_storage_group_name: the source storage group + :param stg_sg_name: the target staging storage group + :param extra_specs: the extra specs + """ + num_vol_in_sg = self.rest.get_num_vols_in_sg( + array, source_storage_group_name) + if num_vol_in_sg == 1: + # Can't move last volume and leave masking view empty + # so creating a holder volume + temp_vol_size = '1' + hold_vol_name = 'hold-' + str(uuid.uuid1()) + self.provision.create_volume_from_sg( + array, hold_vol_name, source_storage_group_name, + temp_vol_size, extra_specs) + LOG.info("MIGRATE - Volume %(vol)s has been created because " + "there was only one volume remaining in storage group " + "%(src_sg)s and we are attempting a move it to staging " + "storage group %(tgt_sg)s.", + {'vol': hold_vol_name, + 'src_sg': source_storage_group_name, + 'tgt_sg': stg_sg_name}) + + self.rest.move_volume_between_storage_groups( + array, device_id, source_storage_group_name, + stg_sg_name, extra_specs) + + def _delete_staging_masking_views( + self, array, masking_view_list, os_host_name): + """Delete the staging masking views + + Delete the staging masking views except the masking view + exposed to the OpenStack compute + + :param array: array serial number + :param masking_view_list: masking view namelist + :param os_host_name: the host the volume is exposed to in OpenStack + :returns: boolean + """ + delete_mv_list = list() + safe_to_delete = False + for masking_view_name in masking_view_list: + if os_host_name in masking_view_name: + safe_to_delete = True + else: + delete_mv_list.append(masking_view_name) + if safe_to_delete: + for delete_mv in delete_mv_list: + self.rest.delete_masking_view(array, delete_mv) + LOG.info("MIGRATE - Masking view %(delete_mv)s has been " + "successfully deleted.", + {'delete_mv': delete_mv}) + return safe_to_delete + + def _create_stg_masking_views( + self, array, mv_detail_list, stg_sg_name, extra_specs): + """Create a staging masking views + + :param array: array serial number + :param mv_detail_list: masking view detail list + :param stg_sg_name: staging storage group name + :param extra_specs: the extra specs + :returns: masking view list + """ + new_masking_view_list = list() + for mv_detail in mv_detail_list: + host_name = mv_detail.get('host') + masking_view_name = mv_detail.get('mv_name') + masking_view_components = self.rest.get_masking_view( + array, masking_view_name) + # Create a staging masking view + random_uuid = uuid.uuid1() + staging_mv_name = 'STG-' + host_name + '-' + str( + random_uuid) + '-MV' + if masking_view_components: + self.rest.create_masking_view( + array, staging_mv_name, stg_sg_name, + masking_view_components.get('portGroupId'), + masking_view_components.get('hostId'), extra_specs) + masking_view_dict = self.rest.get_masking_view( + array, staging_mv_name) + if masking_view_dict: + new_masking_view_list.append(staging_mv_name) + else: + LOG.warning("Failed to create staging masking view " + "%(mv_name)s. Migration cannot proceed.", + {'mv_name': masking_view_name}) + return None + return new_masking_view_list + + def _create_stg_storage_group_with_vol(self, array, os_host_name, + extra_specs): + """Create a staging storage group and add volume + + :param array: array serial number + :param os_host_name: the openstack host name + :param extra_specs: the extra specs + :returns: storage group name + """ + random_uuid = uuid.uuid1() + # Create a staging SG + stg_sg_name = 'STG-' + os_host_name + '-' + ( + str(random_uuid) + '-SG') + temp_vol_name = 'tempvol-' + str(random_uuid) + temp_vol_size = '1' + + _stg_storage_group = self.provision.create_storage_group( + array, stg_sg_name, + None, None, None, extra_specs) + if _stg_storage_group: + self.provision.create_volume_from_sg( + array, temp_vol_name, stg_sg_name, + temp_vol_size, extra_specs) + return stg_sg_name + else: + return None + + def _get_mvs_and_sgs_from_volume(self, array, device_id): + """Given a device Id get its storage groups and masking views. + + :param array: array serial number + :param device_id: the volume device id + :returns: masking view list, storage group list + """ + final_masking_view_list = [] + storage_group_list = self.rest.get_storage_groups_from_volume( + array, device_id) + for sg in storage_group_list: + masking_view_list = self.rest.get_masking_views_from_storage_group( + array, sg) + final_masking_view_list.extend(masking_view_list) + return final_masking_view_list, storage_group_list + + def get_masking_view_component_dict( + self, masking_view_name, srp): + """Get components from input string. + + :param masking_view_name: the masking view name -- str + :param srp: the srp -- str + :returns: object components -- dict + """ + regex_str_share = ( + r'^(?POS)-(?P.+?)((?P' + srp + r')-' + r'(?P.+?)-(?P.+?)|(?PNo_SLO))' + r'((?P-I|-F)|)' + r'(?P-CD|)(?P-RE|)' + r'(?P-[0-9A-Fa-f]{8}|)' + r'-(?PMV)$') + + object_dict = self.utils.get_object_components_and_correct_host( + regex_str_share, masking_view_name) + + if object_dict: + object_dict['mv_name'] = masking_view_name + return object_dict + + def get_volume_host_list(self, volume, connector): + """Get host list attachments from connector object + + :param volume: the volume object + :param connector: the connector object + :returns os_host_list + """ + os_host_list = list() + if connector is not None: + attachment_list = volume.volume_attachment + LOG.debug("Volume attachment list: %(atl)s. " + "Attachment type: %(at)s", + {'atl': attachment_list, 'at': type(attachment_list)}) + try: + att_list = attachment_list.objects + except AttributeError: + att_list = attachment_list + if att_list is not None: + host_list = [att.connector['host'] for att in att_list if + att is not None and att.connector is not None] + for host_name in host_list: + os_host_list.append(self.utils.get_host_short_name(host_name)) + return os_host_list + + def cleanup_staging_objects( + self, array, storage_group_names, extra_specs): + """Delete the staging masking views and storage groups + + :param array: the array serial number + :param storage_group_names: a list of storage group names + :param extra_specs: the extra specs + """ + def _do_cleanup(sg_name, device_id): + masking_view_list = ( + self.rest.get_masking_views_from_storage_group( + array, sg_name)) + for masking_view in masking_view_list: + if 'STG-' in masking_view: + self.rest.delete_masking_view(array, masking_view) + self.rest.remove_vol_from_sg( + array, sg_name, device_id, + extra_specs) + self.rest.delete_volume(array, device_id) + self.rest.delete_storage_group(array, sg_name) + + for storage_group_name in storage_group_names: + if 'STG-' in storage_group_name: + volume_list = self.rest.get_volumes_in_storage_group( + array, storage_group_name) + if len(volume_list) == 1: + try: + _do_cleanup(storage_group_name, volume_list[0]) + except Exception: + LOG.warning("MIGRATE - An attempt was made to " + "cleanup after a legacy live migration, " + "but it failed. You may choose to " + "cleanup manually.") diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index 575b9fb78c1..cfcf6118835 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -873,3 +873,41 @@ class VMAXUtils(object): return False return True + + @staticmethod + def get_volume_attached_hostname(device_info): + """Parse a hostname from a storage group ID. + + :param device_info: the device info dict + :return: str -- the attached hostname + """ + try: + sg_id = device_info.get("storageGroupId")[0] + return sg_id.split('-')[1] + except IndexError: + return None + + def get_object_components_and_correct_host(self, regex_str, input_str): + """Get components from input string. + + :param regex_str: the regex -- str + :param input_str: the input string -- str + :returns: object components -- dict + """ + object_dict = self.get_object_components(regex_str, input_str) + if object_dict and 'host' in object_dict: + if object_dict['host'].endswith('-'): + object_dict['host'] = object_dict['host'][:-1] + return object_dict + + @staticmethod + def get_object_components(regex_str, input_str): + """Get components from input string. + + :param regex_str: the regex -- str + :param input_str: the input string -- str + :returns: dict + """ + full_str = re.compile(regex_str) + match = full_str.match(input_str) + return match.groupdict() if match else None diff --git a/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml b/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml new file mode 100644 index 00000000000..5f5716d1a98 --- /dev/null +++ b/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + This PowerMax driver moves the legacy shared volume from the masking + view structure in Ocata and prior releases (when SMI-S was supported) to + staging masking view(s) in Pike and later releases (U4P REST). + In Ocata, the live migration process shared the storage group, + containing the volume, among the different compute nodes. In Pike, + we changed the masking view structure to facilitate a cleaner live + migration process where only the intended volume is migrated without + exposing other volumes in the storage group. The staging storage group + and masking views facilitate a seamless live migration operation in + upgraded releases.