From 83399aceb0025b12baa0bcd82f04706cbda8d18c Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Thu, 8 Aug 2024 16:44:49 +0900 Subject: [PATCH] Hitachi: Prevent to delete a LDEV assigned to multi objects This patch prevents to delete a LDEV that is unexpectedly assigned to two or more objects(volumes or snapshots). In the unexpected situation, if ``delete`` command for one of objects is run again, the data which is used by other objects is lost. In order to prevent the data-loss, when creating an object, the driver creates a LDEV and stores a value obtained by omitting the hyphen from the object ID(*1) to ``LDEV nickname``. When deleting an object, the driver compares the own object ID and the object ID in ``LDEV nickname``, then, the object and the LDEV is deleted only if both object IDs are same. On the other hand, if both object IDs are not same, only the object is deleted and the LDEV is kept, to prevent data-loss. If format of ``LDEV nickname`` is not object ID(*2), both the object and the LDEV is deleted without comparison, because it avoids disk full risk, due to not deleting any LDEVs. This patch implements only the object ID storing while creating a snapshot and comparing IDs while deleting, because the feature to store the object ID while creating a volume has already been implemented. (*1) Max length of ``LDEV nickname`` is 32 digits characters on Hitachi storage. (*2) 32 digits hexadecimal Closes-Bug: #2072317 Change-Id: I7c6bd9a75dd1d7165d4f8614abb3d59fa642212d --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 144 +++++++++++++----- .../hitachi/test_hitachi_hbsd_rest_fc.py | 79 +++++++--- .../hitachi/test_hitachi_hbsd_rest_iscsi.py | 51 ++++--- .../drivers/hpe/xp/test_hpe_xp_rest_fc.py | 55 ++++--- .../drivers/hpe/xp/test_hpe_xp_rest_iscsi.py | 43 +++--- .../nec/v/test_internal_nec_rest_fc.py | 55 ++++--- .../nec/v/test_internal_nec_rest_iscsi.py | 53 ++++--- cinder/volume/drivers/hitachi/hbsd_common.py | 109 +++++++++++-- cinder/volume/drivers/hitachi/hbsd_fc.py | 4 +- cinder/volume/drivers/hitachi/hbsd_iscsi.py | 4 +- .../drivers/hitachi/hbsd_replication.py | 137 +++++++++++++---- cinder/volume/drivers/hitachi/hbsd_rest.py | 32 +++- cinder/volume/drivers/hitachi/hbsd_utils.py | 10 +- .../drivers/hitachi-vsp-driver.rst | 8 + ...hi-prevent-data-loss-9ec3569d7d5b1e7d.yaml | 6 + 15 files changed, 584 insertions(+), 206 deletions(-) create mode 100644 releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 37aff891755..49a40d5c024 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -271,6 +271,29 @@ GET_LDEV_RESULT = { "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SPLIT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", } GET_LDEV_RESULT_MAPPED = { @@ -308,6 +331,7 @@ GET_LDEV_RESULT_PAIR = { "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_REP = { @@ -316,6 +340,16 @@ GET_LDEV_RESULT_REP = { "attributes": ["CVS", "HDP", "GAD"], "status": "NML", "numOfPorts": 1, + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_REP_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "GAD"], + "status": "NML", + "numOfPorts": 1, + "label": "00000000000000000000000000000001", } GET_POOL_RESULT = { @@ -891,19 +925,42 @@ class HBSDMIRRORFCDriverTest(test.TestCase): self.ldev_count = self.ldev_count + 1 return FakeResponse(200, GET_LDEV_RESULT_REP) else: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(17, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_secondary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP_LABEL) + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_secondary_is_invalid_ldev(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP_LABEL), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(6, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -981,7 +1038,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -991,7 +1049,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): @@ -1288,41 +1346,25 @@ class HBSDMIRRORFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(4, request.call_count) - args, kwargs = request.call_args_list[3] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(2, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume_replication(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[4], - "available") - self.assertEqual(6, request.call_count) - original_volume_nickname = TEST_VOLUME[0]['id'].replace("-", "") - for i in (4, 5): - args, kwargs = request.call_args_list[i] - self.assertEqual(kwargs['json']['label'], original_volume_nickname) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[4], "available") + self.assertEqual(3, request.call_count) + actual = ({'_name_id': TEST_VOLUME[4]['id'], + 'provider_location': TEST_VOLUME[4]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1472,6 +1514,39 @@ class HBSDMIRRORFCDriverTest(test.TestCase): 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type') + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + def test_create_group_from_src_Exception( + self, get_volume_type_extra_specs, get_volume_type, request): + extra_specs = {"test1": "aaa"} + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type.return_value = {} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common.rep_primary._stats = {} + self.driver.common.rep_primary._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + self.driver.common.rep_secondary._stats = {} + self.driver.common.rep_secondary._stats['pools'] = [ + {'location_info': {'pool_id': 40}}] + self.assertRaises(exception.VolumeDriverException, + self.driver.create_group_from_src, + self.ctxt, TEST_GROUP[1], + [TEST_VOLUME[1], TEST_VOLUME[1]], + source_group=TEST_GROUP[0], + source_vols=[TEST_VOLUME[0], TEST_VOLUME[3]] + ) + self.assertEqual(10, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1522,7 +1597,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1532,7 +1608,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index f2596ca20d2..0b42e7780f9 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -219,6 +219,29 @@ GET_LDEV_RESULT = { "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -241,6 +264,15 @@ GET_LDEV_RESULT_PAIR = { "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "HTI"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_TEST = { @@ -877,6 +909,12 @@ class HBSDRESTFCDriverTest(test.TestCase): TEST_VOLUME[0]) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -957,7 +995,8 @@ class HBSDRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ @@ -965,7 +1004,7 @@ class HBSDRESTFCDriverTest(test.TestCase): ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -975,7 +1014,8 @@ class HBSDRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving': 'disable'} self.driver.common._stats = {} @@ -984,11 +1024,11 @@ class HBSDRESTFCDriverTest(test.TestCase): ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -1008,7 +1048,7 @@ class HBSDRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1303,17 +1343,12 @@ class HBSDRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1517,7 +1552,8 @@ class HBSDRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1525,7 +1561,7 @@ class HBSDRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1544,6 +1580,7 @@ class HBSDRESTFCDriverTest(test.TestCase): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1555,7 +1592,7 @@ class HBSDRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1566,7 +1603,7 @@ class HBSDRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index 796ff187564..84f10132f65 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -194,6 +194,18 @@ GET_LDEV_RESULT = { "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -216,6 +228,7 @@ GET_LDEV_RESULT_PAIR = { "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -631,24 +644,31 @@ class HBSDRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT) + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_create_cloned_volume( @@ -863,17 +883,12 @@ class HBSDRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1037,7 +1052,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1045,7 +1061,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1064,6 +1080,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1075,7 +1092,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index f0ec496becc..4e2d4f83e9b 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -187,6 +187,7 @@ GET_LDEV_RESULT = { "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ GET_LDEV_RESULT_MAPPED = { ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP", "FS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOL_RESULT = { @@ -701,17 +720,18 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -727,7 +747,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -948,17 +968,12 @@ class HPEXPRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1095,14 +1110,15 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1118,6 +1134,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1128,7 +1145,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1139,7 +1156,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index cffdab86760..7173edc42f2 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -190,6 +190,7 @@ GET_LDEV_RESULT = { "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -207,11 +208,21 @@ GET_LDEV_RESULT_MAPPED = { ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -548,17 +559,18 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -761,17 +773,12 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -902,14 +909,15 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -925,6 +933,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -935,7 +944,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index cb43ce20909..0cc49dd31e1 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -187,6 +187,7 @@ GET_LDEV_RESULT = { "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ GET_LDEV_RESULT_MAPPED = { ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_SNAPSHOTS_RESULT = { @@ -691,17 +710,18 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -717,7 +737,7 @@ class VStorageRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -938,17 +958,12 @@ class VStorageRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1085,14 +1100,15 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1108,6 +1124,7 @@ class VStorageRESTFCDriverTest(test.TestCase): self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1118,7 +1135,7 @@ class VStorageRESTFCDriverTest(test.TestCase): ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1129,7 +1146,7 @@ class VStorageRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py index 61a038b108e..b1ade2db115 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py @@ -191,6 +191,7 @@ GET_LDEV_RESULT = { "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -208,11 +209,29 @@ GET_LDEV_RESULT_MAPPED = { ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -584,17 +603,18 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -797,17 +817,12 @@ class VStorageRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -938,14 +953,15 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -961,6 +977,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -971,7 +988,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -982,7 +999,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index ed972d6f234..77ff8e32b58 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -14,6 +14,7 @@ # """Common module for Hitachi HBSD Driver.""" +from collections import defaultdict import json import re @@ -47,6 +48,8 @@ _GROUP_NAME_VAR_LEN = {GROUP_NAME_VAR_WWN: _GROUP_NAME_VAR_WWN_LEN, STR_VOLUME = 'volume' STR_SNAPSHOT = 'snapshot' +_UUID_PATTERN = re.compile(r'^[\da-f]{32}$') + _INHERITED_VOLUME_OPTS = [ 'volume_backend_name', 'volume_driver', @@ -346,13 +349,19 @@ class HBSDCommon(): """Disconnect all volume pairs to which the specified S-VOL belongs.""" raise NotImplementedError() - def get_pair_info(self, ldev): + def get_pair_info(self, ldev, ldev_info=None): """Return volume pair info(LDEV number, pair status and pair type).""" raise NotImplementedError() - def delete_pair(self, ldev): - """Disconnect all volume pairs to which the specified LDEV belongs.""" - pair_info = self.get_pair_info(ldev) + def delete_pair(self, ldev, ldev_info=None): + """Disconnect all volume pairs to which the specified LDEV belongs. + + :param int ldev: The ID of the LDEV whose TI pair needs be deleted + :param dict ldev_info: LDEV info + :return: None + :raises VolumeDriverException: if the LDEV is a P-VOL of a TI pair + """ + pair_info = self.get_pair_info(ldev, ldev_info) if not pair_info: return if pair_info['pvol'] == ldev: @@ -383,12 +392,57 @@ class HBSDCommon(): """Delete the specified LDEV from the storage.""" raise NotImplementedError() - def delete_ldev(self, ldev): - """Delete the specified LDEV.""" - self.delete_pair(ldev) + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV. + + :param int ldev: The ID of the LDEV to be deleted + :param dict ldev_info: LDEV info + :return: None + """ + self.delete_pair(ldev, ldev_info) self.unmap_ldev_from_storage(ldev) self.delete_ldev_from_storage(ldev) + def is_invalid_ldev(self, ldev, obj, ldev_info_): + """Check if the specified LDEV corresponds to the specified object. + + If the LDEV label and the object's id or name_id do not match, the LDEV + was deleted and another LDEV with the same ID was created for another + volume or snapshot. In this case, we say that the LDEV is invalid. + If the LDEV label is not set or its format is unexpected, we cannot + judge if the LDEV corresponds to the object. This can happen if the + LDEV was created in older versions of this product or if the user + overwrote the label. In this case, we just say that the LDEV is not + invalid, although we are not completely sure about it. + The reason for using name_id rather than id for volumes in comparison + is that id of the volume that corresponds to the LDEV changes by + host-assisted migration while that is not the case with name_id and + that the LDEV label is created from id of the volume when the LDEV is + created and is never changed after that. + Because Snapshot objects do not have name_id, we use id instead of + name_id if the object is a Snapshot. We assume that the object is a + Snapshot object if hasattr(obj, 'name_id') returns False. + This method returns False if the LDEV does not exist on the storage. + The absence of the LDEV on the storage is detected elsewhere. + :param int ldev: The ID of the LDEV to be checked + :param obj: The object to be checked + :type obj: Volume or Snapshot + :param dict ldev_info_: LDEV info. This is an output area. Data is + written by this method, but the area must be secured by the caller. + :return: True if the LDEV does not correspond to the object, False + otherwise + :rtype: bool + """ + ldev_info = self.get_ldev_info(None, ldev) + # To avoid calling the same REST API multiple times, we pass the LDEV + # info to the caller. + ldev_info_.update(ldev_info) + return ('label' in ldev_info + and _UUID_PATTERN.match(ldev_info['label']) + and ldev_info['label'] != ( + obj.name_id if hasattr(obj, 'name_id') else + obj.id).replace('-', '')) + def delete_volume(self, volume): """Delete the specified volume.""" ldev = self.get_ldev(volume) @@ -397,8 +451,18 @@ class HBSDCommon(): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume['id']) return + # Check if the LDEV corresponds to the volume. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, volume, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='volume', + obj_id=volume.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) @@ -422,6 +486,7 @@ class HBSDCommon(): new_ldev = self.copy_on_storage( ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_snapshot=True) + self.modify_ldev_name(new_ldev, snapshot.id.replace("-", "")) return { 'provider_location': str(new_ldev), } @@ -434,8 +499,18 @@ class HBSDCommon(): MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', id=snapshot['id']) return + # Check if the LDEV corresponds to the snapshot. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, snapshot, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='snapshot', + obj_id=snapshot.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) @@ -1102,12 +1177,10 @@ class HBSDCommon(): """Migrate the specified volume.""" return False - def update_migrated_volume(self, volume, new_volume): - """Update LDEV settings after generic volume migration.""" - ldev = self.get_ldev(new_volume) - # We do not need to check if ldev is not None because it is guaranteed - # that ldev is not None because migration has been successful so far. - self.modify_ldev_name(ldev, volume['id'].replace("-", "")) + def update_migrated_volume(self, new_volume): + """Return model update for migrated volume.""" + return {'_name_id': new_volume.name_id, + 'provider_location': new_volume.provider_location} def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" @@ -1164,7 +1237,11 @@ class HBSDCommon(): provider_location = obj.get('provider_location') if not provider_location: return None - if provider_location.isdigit(): + if provider_location.isdigit() and not getattr(self, 'is_secondary', + False): + # This format implies that the value is the ID of an LDEV in the + # primary storage. Therefore, the secondary instance should not + # retrieve this value. return int(provider_location) if provider_location.startswith('{'): loc = json.loads(provider_location) diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index 25a20479878..9afb63ed74c 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -191,9 +191,7 @@ class HBSDFCDriver(driver.FibreChannelDriver): self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - self.common.update_migrated_volume(volume, new_volume) - super(HBSDFCDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 21fcc0d1e57..8bcfe6b38cd 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -187,9 +187,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver): self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - self.common.update_migrated_volume(volume, new_volume) - super(HBSDISCSIDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py index 09395f375c4..3cef57aefce 100644 --- a/cinder/volume/drivers/hitachi/hbsd_replication.py +++ b/cinder/volume/drivers/hitachi/hbsd_replication.py @@ -14,6 +14,7 @@ # """replication module for Hitachi HBSD Driver.""" +from collections import defaultdict import json from eventlet import greenthread @@ -562,16 +563,32 @@ class HBSDREPLICATION(rest.HBSDREST): } return self.rep_primary.create_volume(volume) - def _has_rep_pair(self, ldev): - ldev_info = self.rep_primary.get_ldev_info( - ['status', 'attributes'], ldev) + def _has_rep_pair(self, ldev, ldev_info=None): + """Return if the specified LDEV has a replication pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: True if the LDEV status is normal and the LDEV has a + replication pair, False otherwise + :rtype: bool + """ + if ldev_info is None: + ldev_info = self.rep_primary.get_ldev_info( + ['status', 'attributes'], ldev) return (ldev_info['status'] == rest.NORMAL_STS and self.driver_info['mirror_attr'] in ldev_info['attributes']) - def _get_rep_pair_info(self, pldev): - """Return replication pair info.""" + def _get_rep_pair_info(self, pldev, ldev_info=None): + """Return replication pair info. + + :param int pldev: The ID of the LDEV(P-VOL in case of a pair) + :param dict ldev_info: LDEV info + :return: replication pair info. An empty dict if the LDEV does not + have a pair. + :rtype: dict + """ pair_info = {} - if not self._has_rep_pair(pldev): + if not self._has_rep_pair(pldev, ldev_info): return pair_info self._require_rep_secondary() copy_group_name = self._create_rep_copy_group_name(pldev) @@ -609,6 +626,65 @@ class HBSDREPLICATION(rest.HBSDREST): self.rep_primary.client.delete_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol) + def _delete_volume_pre_check(self, volume): + """Pre-check for delete_volume(). + + :param Volume volume: The volume to be checked + :return: svol: The ID of the S-VOL + :rtype: int + :return: pvol_is_invalid: True if P-VOL is invalid, False otherwise + :rtype: bool + :return: svol_is_invalid: True if S-VOL is invalid, False otherwise + :rtype: bool + :return: pair_exists: True if the pair exists, False otherwise + :rtype: bool + """ + # Check if the LDEV in the primary storage corresponds to the volume + pvol_is_invalid = True + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + pvol_info = defaultdict(lambda: None) + pvol = self.rep_primary.get_ldev(volume) + if pvol is not None: + if self.rep_primary.is_invalid_ldev(pvol, volume, pvol_info): + # If the LDEV is assigned to another object, skip deleting it. + self.rep_primary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=pvol, ldev_label=pvol_info['label']) + else: + pvol_is_invalid = False + # Check if the pair exists on the storage. + pair_exists = False + svol_is_invalid = True + svol = None + if not pvol_is_invalid: + pair_info = self._get_rep_pair_info(pvol, pvol_info) + if pair_info: + pair_exists = True + # Because this pair is a valid P-VOL's pair, we need to delete + # it and its LDEVs. The LDEV ID of the S-VOL to be deleted is + # uniquely determined from the pair info. Therefore, there is + # no need to get it from provider_location or to validate the + # S-VOL by comparing the volume ID with the S-VOL's label. + svol = pair_info['svol_info'][0]['ldev'] + svol_is_invalid = False + # Check if the LDEV in the secondary storage corresponds to the volume + if svol_is_invalid: + svol = self.rep_secondary.get_ldev(volume) + if svol is not None: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + svol_info = defaultdict(lambda: None) + if self.rep_secondary.is_invalid_ldev(svol, volume, svol_info): + # If the LDEV is assigned to another object, skip deleting + # it. + self.rep_secondary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=svol, ldev_label=svol_info['label']) + else: + svol_is_invalid = False + return svol, pvol_is_invalid, svol_is_invalid, pair_exists + def delete_volume(self, volume): """Delete the specified volume.""" self._require_rep_primary() @@ -618,22 +694,34 @@ class HBSDREPLICATION(rest.HBSDREST): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume.id) return - pair_info = self._get_rep_pair_info(ldev) - if pair_info: - self._delete_rep_pair( - pair_info['pvol'], pair_info['svol_info'][0]['ldev']) + # Run pre-check. + svol, pvol_is_invalid, svol_is_invalid, pair_exists = ( + self._delete_volume_pre_check(volume)) + # Delete the pair if it exists. + if pair_exists: + self._delete_rep_pair(ldev, svol) + # Delete LDEVs if they are valid. + thread = None + if not svol_is_invalid: thread = greenthread.spawn( self.rep_secondary.delete_volume, volume) - try: + try: + if not pvol_is_invalid: self.rep_primary.delete_volume(volume) - finally: + finally: + if thread is not None: thread.wait() - else: - self.rep_primary.delete_volume(volume) - def delete_ldev(self, ldev): + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV[s]. + + :param int ldev: The ID of the LDEV(P-VOL in case of a pair) to be + deleted + :param dict ldev_info: LDEV(P-VOL in case of a pair) info + :return: None + """ self._require_rep_primary() - pair_info = self._get_rep_pair_info(ldev) + pair_info = self._get_rep_pair_info(ldev, ldev_info) if pair_info: self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev']) th = greenthread.spawn(self.rep_secondary.delete_ldev, @@ -940,23 +1028,6 @@ class HBSDREPLICATION(rest.HBSDREST): else: return self.rep_primary.migrate_volume(volume, host) - def update_migrated_volume(self, volume, new_volume): - """Update LDEV settings after generic volume migration.""" - self._require_rep_primary() - ldev = self.rep_primary.get_ldev(new_volume) - # We do not need to check if ldev is not None because it is guaranteed - # that ldev is not None because migration has been successful so far. - if self._has_rep_pair(ldev): - self._require_rep_secondary() - thread = greenthread.spawn( - self.rep_secondary.update_migrated_volume, volume, new_volume) - try: - self.rep_primary.update_migrated_volume(volume, new_volume) - finally: - thread.wait() - else: - self.rep_primary.update_migrated_volume(volume, new_volume) - def _resync_rep_pair(self, pvol, svol): copy_group_name = self._create_rep_copy_group_name(pvol) rep_type = self.driver_info['mirror_attr'] diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py index ac7eb2c8606..28cdbc4e2c5 100644 --- a/cinder/volume/drivers/hitachi/hbsd_rest.py +++ b/cinder/volume/drivers/hitachi/hbsd_rest.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -522,9 +522,22 @@ class HBSDREST(common.HBSDCommon): self._create_clone_pair(pvol, svol, snap_pool_id) def get_ldev_info(self, keys, ldev, **kwargs): - """Return a dictionary of LDEV-related items.""" + """Return a dictionary of LDEV-related items. + + :param keys: LDEV Attributes to be obtained. Specify None to obtain all + LDEV attributes. + :type keys: list or NoneType + :param int ldev: The LDEV ID + :param dict kwargs: REST API options + :return: LDEV info + :rtype: dict + """ d = {} result = self.client.get_ldev(ldev, **kwargs) + if not keys: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + return defaultdict(lambda: None, result) for key in keys: d[key] = result.get(key) return d @@ -909,10 +922,17 @@ class HBSDREST(common.HBSDCommon): return pvol, [{'ldev': svol, 'is_psus': is_psus, 'status': status}] - def get_pair_info(self, ldev): - """Return info of the volume pair.""" + def get_pair_info(self, ldev, ldev_info=None): + """Return info of the volume pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: TI pair info if the LDEV has TI pairs, None otherwise + :rtype: dict or NoneType + """ pair_info = {} - ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) + if ldev_info is None: + ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): return None @@ -1278,6 +1298,8 @@ class HBSDREST(common.HBSDCommon): extra_specs = self.get_volume_extra_specs(snapshot.volume) pair['svol'] = self.create_ldev(size, extra_specs, pool_id, ldev_range) + self.modify_ldev_name(pair['svol'], + snapshot.id.replace("-", "")) except Exception as exc: pair['msg'] = utils.get_exception_msg(exc) raise loopingcall.LoopingCallDone(pair) diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py index b5f2ee90af6..7bd33bbf1f0 100644 --- a/cinder/volume/drivers/hitachi/hbsd_utils.py +++ b/cinder/volume/drivers/hitachi/hbsd_utils.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -253,6 +253,14 @@ class HBSDMsg(enum.Enum): 'to be active by the Fibre Channel Zone Manager.', 'suffix': WARNING_SUFFIX, } + SKIP_DELETING_LDEV = { + 'msg_id': 348, + 'loglevel': base_logging.WARNING, + 'msg': 'Skip deleting the LDEV and its LUNs and pairs because the ' + 'LDEV is used by another object. (%(obj)s: %(obj_id)s, LDEV: ' + '%(ldev)s, LDEV label: %(ldev_label)s)', + 'suffix': WARNING_SUFFIX, + } STORAGE_COMMAND_FAILED = { 'msg_id': 600, 'loglevel': base_logging.ERROR, diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst index db4d059d87e..3c8825dca4a 100644 --- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst +++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst @@ -143,6 +143,12 @@ If you use iSCSI: 1. ``Ports`` Assign an IP address and a TCP port number to the port. +.. note:: + + * Do not change LDEV nickname for the LDEVs created by Hitachi block + storage driver. The nickname is referred when deleting a volume or + a snapshot, to avoid data-loss risk. See details in `bug #2072317`_. + Set up Hitachi storage volume driver and volume operations ---------------------------------------------------------- @@ -508,3 +514,5 @@ attach operations for each volume type. https://docs.hitachivantara.com/r/en-us/svos/9.8.7/mk-97hm85026/ about-adaptive-data-reduction/capacity-saving/ capacity-saving-function-data-deduplication-and-compression +.. _bug #2072317: + https://bugs.launchpad.net/cinder/+bug/2072317 diff --git a/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml new file mode 100644 index 00000000000..7bc3dc419b8 --- /dev/null +++ b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2072317 + `_: Fix potential + data-loss due to a network issue during a volume deletion.