From ac1ee9bb8eba4d03d4594ee91a540a65cd9b4ad9 Mon Sep 17 00:00:00 2001 From: anthony gamboa Date: Mon, 2 Jun 2025 11:20:46 -0700 Subject: [PATCH] Hitachi: Add support for Hitachi VSP One B20 Change-Id: Ica909518b9a0e4e58ad757bc9373ffc5e52e5a14 Signed-off-by: Sa Pham (cherry picked from commit bc5591a1f9ceba64eaf5e5200b0bed60f79914ab) (cherry picked from commit 606dc17cd35e4ba0d14edbc796cfdb31d2ffc14f) --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 32 +- .../hitachi/test_hitachi_hbsd_rest_fc.py | 649 +++++++++++++++++- .../hitachi/test_hitachi_hbsd_rest_iscsi.py | 30 +- .../drivers/hpe/xp/test_hpe_xp_rest_fc.py | 21 +- .../drivers/hpe/xp/test_hpe_xp_rest_iscsi.py | 21 +- .../nec/v/test_internal_nec_rest_fc.py | 21 +- .../nec/v/test_internal_nec_rest_iscsi.py | 21 +- .../volume/drivers/nec/v/test_nec_rest_fc.py | 3 + .../drivers/nec/v/test_nec_rest_iscsi.py | 3 + cinder/volume/drivers/hitachi/hbsd_common.py | 115 ++++ cinder/volume/drivers/hitachi/hbsd_rest.py | 176 ++++- .../volume/drivers/hitachi/hbsd_rest_api.py | 23 + cinder/volume/drivers/hitachi/hbsd_utils.py | 20 +- cinder/volume/drivers/hpe/xp/hpe_xp_rest.py | 9 + cinder/volume/drivers/hpe/xp/hpe_xp_utils.py | 3 + cinder/volume/drivers/nec/v/nec_v_rest.py | 7 + .../drivers/hitachi-vsp-driver.rst | 74 +- .../notes/B20-support-8c2baf5f781efffd.yaml | 7 + 18 files changed, 1180 insertions(+), 55 deletions(-) create mode 100644 releasenotes/notes/B20-support-8c2baf5f781efffd.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 43df2e611d5..b3a66e3c647 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022, 2024, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -606,6 +607,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 + self.configuration.hitachi_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -1018,11 +1020,13 @@ class HBSDMIRRORFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(4, request.call_count) + self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_extend_volume_replication(self, request): @@ -1064,7 +1068,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.extend_volume(TEST_VOLUME[4], 256) - self.assertEqual(23, request.call_count) + self.assertEqual(27, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @@ -1155,6 +1159,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1167,7 +1173,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): ret = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1220,7 +1226,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): {'pldev': 1, 'sldev': 2, 'remote-copy': hbsd_utils.MIRROR_ATTR})} self.assertEqual(actual, ret) - self.assertEqual(23, request.call_count) + self.assertEqual(25, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1233,6 +1239,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1246,7 +1254,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): TEST_VOLUME[0], TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @@ -1555,6 +1563,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1568,7 +1578,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], @@ -1607,7 +1617,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0], TEST_VOLUME[3]] ) - self.assertEqual(10, request.call_count) + self.assertEqual(11, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1620,6 +1630,8 @@ class HBSDMIRRORFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1633,7 +1645,7 @@ class HBSDMIRRORFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], @@ -1765,4 +1777,4 @@ class HBSDMIRRORFCDriverTest(test.TestCase): TEST_VOLUME[5]) self.assertEqual(2, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(14, request.call_count) + self.assertEqual(16, request.call_count) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index 4c78d6918d1..b17f5d2b32d 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -17,6 +18,7 @@ import functools from unittest import mock +import ddt from oslo_config import cfg from oslo_utils import units import requests @@ -213,6 +215,17 @@ COMPLETED_FAILED_RESULT_LU_DEFINED = { }, } +COMPLETED_FAILED_RESULT = { + "status": "Completed", + "state": "Failed", + "error": { + "errorCode": { + "SSB1": "1111", + "SSB2": "2222", + }, + }, +} + GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, @@ -295,6 +308,100 @@ GET_LDEV_RESULT_PAIR_STATUS_TEST = { "dataReductionMode": "disabled" } +GET_LDEV_RESULT_DRS = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_DRS_WITH_PARENT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "00000000000000000000000000000000", + "parentLdevId": 10, +} + +GET_LDEV_RESULT_DRS_MANAGED_PARENT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "HBSD-VCP", +} + +GET_LDEV_RESULT_VCP_MANAGED_PARENT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS", "VCP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "HBSD-VCP", + "parentLdevId": 10, +} + +GET_LDEV_RESULT_VCP_MANAGED_PARENT_LARGE = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 137438953472, + "attributes": ["CVS", "HDP", "DRS", "VCP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "HBSD-VCP", + "parentLdevId": 10, +} + +GET_LDEV_RESULT_VCP_LARGE = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 137438953472, + "attributes": ["CVS", "HDP", "DRS", "VCP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "00000000000000000000000000000000", + "parentLdevId": 10, +} + +GET_LDEV_RESULT_VC = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS", "VC"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "00000000000000000000000000000000", + "parentLdevId": 10, +} + +GET_LDEV_RESULT_VCP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "DRS", "VCP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "ENABLED", + "dataReductionMode": "compression_deduplication", + "label": "00000000000000000000000000000000", + "parentLdevId": 10, +} + GET_POOL_RESULT = { "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, @@ -309,6 +416,7 @@ GET_SNAPSHOTS_RESULT = { "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, + "snapshotId": "0,1", }, ], } @@ -321,6 +429,7 @@ GET_SNAPSHOTS_RESULT_PAIR = { "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, + "snapshotId": "0,1", }, ], } @@ -333,6 +442,7 @@ GET_SNAPSHOTS_RESULT_BUSY = { "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, + "snapshotId": "0,1", }, ], } @@ -501,6 +611,7 @@ class FakeResponse(): return self.data +@ddt.ddt class HBSDRESTFCDriverTest(test.TestCase): """Unit test class for HBSD REST interface fibre channel module.""" @@ -555,6 +666,7 @@ class HBSDRESTFCDriverTest(test.TestCase): self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 + self.configuration.hitachi_manage_drs_volumes = False self.configuration.hitachi_port_scheduler = False self.configuration.hitachi_group_name_format = None @@ -863,6 +975,116 @@ class HBSDRESTFCDriverTest(test.TestCase): self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_create_volume_drs( + self, get_volume_type_qos_specs, get_volume_type_extra_specs, + request): + self.override_config('hitachi_manage_drs_volumes', False, + group=conf.SHARED_CONF_GROUP) + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_volume(TEST_VOLUME[3]) + args, kwargs = request.call_args_list[0] + body = kwargs['json'] + self.assertEqual(body.get('dataReductionMode'), + 'compression_deduplication') + self.assertEqual(body.get('isDataReductionSharedVolumeEnabled'), + True) + self.assertEqual('1', ret['provider_location']) + get_volume_type_extra_specs.assert_called_once_with(TEST_VOLUME[3].id) + get_volume_type_qos_specs.assert_called_once_with( + TEST_VOLUME[3].volume_type.id) + self.assertEqual(2, request.call_count) + + @ddt.data(' False', False, 'False', 'Sheep', None) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_create_volume_drs_explicit_false_or_invalid( + self, false_drs_setting, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': false_drs_setting, + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + self.assertRaises(exception.VolumeDriverException, + self.driver.create_volume, + TEST_VOLUME[3]) + get_volume_type_extra_specs.assert_called_once_with(TEST_VOLUME[3].id) + get_volume_type_qos_specs.assert_called_once_with( + TEST_VOLUME[3].volume_type.id) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_create_volume_drs_managed( + self, get_volume_type_qos_specs, get_volume_type_extra_specs, + request): + self.driver.common.conf.hitachi_manage_drs_volumes = True + # Inexplicably, the below does not work. + # self.override_config('hitachi_manage_drs_volumes', True, + # group=conf.SHARED_CONF_GROUP) + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + ret = self.driver.create_volume(TEST_VOLUME[3]) + args, kwargs = request.call_args_list[0] + body = kwargs['json'] + self.assertEqual(body.get('dataReductionMode'), + 'compression_deduplication') + self.assertEqual(body.get('isDataReductionSharedVolumeEnabled'), + True) + args, kwargs = request.call_args_list[1] + body = kwargs['json'] + self.assertEqual(body.get('label'), 'HBSD-VCP') + args, kwargs = request.call_args_list[3] + body = kwargs['json'] + self.assertEqual(body.get('dataReductionMode'), + 'compression_deduplication') + self.assertEqual(body.get('isDataReductionSharedVolumeEnabled'), + True) + args, kwargs = request.call_args_list[10] + body = kwargs['json'] + self.assertEqual(body.get('label'), '00000000000000000000000000000003') + self.assertEqual('1', ret['provider_location']) + self.assertEqual(2, get_volume_type_extra_specs.call_count) + get_volume_type_qos_specs.assert_called_once_with( + TEST_VOLUME[3].volume_type.id) + self.assertEqual(11, request.call_count) + @reduce_retrying_time @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -973,13 +1195,59 @@ class HBSDRESTFCDriverTest(test.TestCase): self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_drs(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_drs_managed_last_vclone(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS_MANAGED_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(6, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_drs_unmanaged_last_vclone_with_parent(self, + request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(5, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_drs_managed_parent_has_more_vclones(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_VCP_MANAGED_PARENT)] + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(5, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @@ -1046,6 +1314,134 @@ class HBSDRESTFCDriverTest(test.TestCase): self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_extend_volume_drs(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(5, request.call_count) + body = request.call_args_list[4][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + self.assertEqual(body['parameters']['enhancedExpansion'], True) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_extend_volume_drs_mngd_parent(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, + request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_VCP_MANAGED_PARENT), + FakeResponse(200, GET_LDEV_RESULT_VCP_MANAGED_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(8, request.call_count) + body = request.call_args_list[5][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + self.assertEqual(body['parameters']['enhancedExpansion'], True) + body = request.call_args_list[7][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + self.assertEqual(body['parameters']['enhancedExpansion'], True) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_extend_volume_drs_lg_mngd_parent(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, + request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_VCP_MANAGED_PARENT_LARGE), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(6, request.call_count) + body = request.call_args_list[5][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_extend_volume_drs_lg_unmngd_parent(self, + get_volume_type_qos_specs, + get_volume_type_extra_specs, + request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_VCP_LARGE), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.extend_volume(TEST_VOLUME[0], 256) + self.assertEqual(6, request.call_count) + body = request.call_args_list[5][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_extend_volume_drs_unmngd_parent(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, + request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT), + FakeResponse(200, GET_LDEV_RESULT_VCP), + FakeResponse(200, GET_LDEV_RESULT_DRS_WITH_PARENT)] + self.assertRaises(exception.VolumeDriverException, + self.driver.extend_volume, + TEST_VOLUME[0], + 256) + self.assertEqual(6, request.call_count) + body = request.call_args_list[5][1]['json'] + self.assertIn('enhancedExpansion', body['parameters']) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @@ -1128,6 +1524,8 @@ class HBSDRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -1139,7 +1537,7 @@ class HBSDRESTFCDriverTest(test.TestCase): self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1150,6 +1548,8 @@ class HBSDRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} @@ -1162,7 +1562,35 @@ class HBSDRESTFCDriverTest(test.TestCase): self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_create_vcloned_volume(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + extra_specs = {"hbsd:drs": " True", + "hbsd:capacity_saving": "deduplication_compression"} + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + self.driver.common._stats = {} + self.driver.common._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) + self.assertEqual('1', vol['provider_location']) + self.assertEqual(1, get_volume_type_extra_specs.call_count) + self.assertEqual(1, get_volume_type_qos_specs.call_count) + self.assertEqual(9, request.call_count) + self.assertIn('virtual-clone', request.call_args_list[7][0][1]) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @@ -1392,6 +1820,64 @@ class HBSDRESTFCDriverTest(test.TestCase): TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_manage_existing_drs(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEVS_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(1, get_volume_type_qos_specs.call_count) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_manage_existing_vc(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_VC), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEVS_RESULT)] + ret = self.driver.manage_existing( + TEST_VOLUME[0], self.test_existing_ref) + self.assertEqual('1', ret['provider_location']) + self.assertEqual(1, get_volume_type_qos_specs.call_count) + self.assertEqual(3, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_manage_existing_vcp(self, get_volume_type_qos_specs, + get_volume_type_extra_specs, request): + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type_qos_specs.return_value = {'qos_specs': None} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_VCP)] + self.assertRaises(exception.ManageExistingInvalidReference, + self.driver.manage_existing, + TEST_VOLUME[1], + self.test_existing_ref) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -1485,6 +1971,151 @@ class HBSDRESTFCDriverTest(test.TestCase): self.assertEqual(4, request.call_count) self.assertTrue(ret) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_retype_drs_removed(self, get_volume_type_qos_specs, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_DRS)] + get_volume_type_qos_specs.return_value = {'qos_specs': None} + host = { + 'capabilities': { + 'location_info': { + 'pool_id': 30, + }, + }, + } + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + } + new_type = fake_volume.fake_volume_type_obj( + CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), + extra_specs=extra_specs) + old_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + new_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + } + old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], + new_type_ref['id'])[0] + + self.assertRaises(exception.VolumeDriverException, + self.driver.retype, self.ctxt, TEST_VOLUME[0], + new_type, diff, host) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_retype_drs_with_csv_removed(self, get_volume_type_qos_specs, + request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_DRS), + ] + get_volume_type_qos_specs.return_value = {'qos_specs': None} + host = { + 'capabilities': { + 'location_info': { + 'pool_id': 30, + }, + }, + } + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + } + new_type = fake_volume.fake_volume_type_obj( + CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), + extra_specs=extra_specs) + old_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + new_specs = { + 'hbsd:drs': ' True', + } + old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], + new_type_ref['id'])[0] + self.assertRaises(exception.VolumeDriverException, + self.driver.retype, self.ctxt, TEST_VOLUME[0], + new_type, diff, host) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_retype_drs_with_csv_disabled(self, get_volume_type_qos_specs, + request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_DRS), + FakeResponse(200, GET_LDEV_RESULT_DRS)] + get_volume_type_qos_specs.return_value = {'qos_specs': None} + host = { + 'capabilities': { + 'location_info': { + 'pool_id': 30, + }, + }, + } + extra_specs = { + 'hbsd:capacity_saving': 'disable', + 'hbsd:drs': ' True', + } + new_type = fake_volume.fake_volume_type_obj( + CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), + extra_specs=extra_specs) + old_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + new_specs = { + 'hbsd:capacity_saving': 'disable', + 'hbsd:drs': ' True', + } + old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], + new_type_ref['id'])[0] + ret = self.driver.retype(self.ctxt, TEST_VOLUME[0], + new_type, diff, host) + self.assertEqual(2, request.call_count) + self.assertEqual(ret, False) + + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type_qos_specs') + def test_retype_drs_added(self, get_volume_type_qos_specs, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT)] + get_volume_type_qos_specs.return_value = {'qos_specs': None} + host = { + 'capabilities': { + 'location_info': { + 'pool_id': 30, + }, + }, + } + extra_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + new_type = fake_volume.fake_volume_type_obj( + CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), + extra_specs=extra_specs) + old_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + } + new_specs = { + 'hbsd:capacity_saving': 'deduplication_compression', + 'hbsd:drs': ' True', + } + old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) + new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) + diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], + new_type_ref['id'])[0] + self.assertRaises(exception.VolumeDriverException, + self.driver.retype, self.ctxt, TEST_VOLUME[0], + new_type, diff, host) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_retype_qos(self, get_volume_type_qos_specs, request): @@ -1562,7 +2193,7 @@ class HBSDRESTFCDriverTest(test.TestCase): ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(16, request.call_count) + self.assertEqual(17, request.call_count) actual = (True, {'provider_location': '1'}) self.assertTupleEqual(actual, ret) @@ -1615,7 +2246,7 @@ class HBSDRESTFCDriverTest(test.TestCase): ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(15, request.call_count) + self.assertEqual(16, request.call_count) actual = (True, {'provider_location': '1'}) self.assertTupleEqual(actual, ret) @@ -1668,6 +2299,8 @@ class HBSDRESTFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1680,7 +2313,7 @@ class HBSDRESTFCDriverTest(test.TestCase): ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -1695,6 +2328,8 @@ class HBSDRESTFCDriverTest(test.TestCase): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1707,7 +2342,7 @@ class HBSDRESTFCDriverTest(test.TestCase): ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index 9f573a76881..db28bc058fb 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -374,6 +375,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 + self.configuration.hitachi_manage_drs_volumes = False self.configuration.hitachi_port_scheduler = False self.configuration.hitachi_group_name_format = None @@ -599,10 +601,12 @@ class HBSDRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @@ -716,6 +720,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -728,7 +734,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -739,6 +745,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -751,7 +759,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -763,6 +771,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] input_qos_specs = { @@ -779,7 +789,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(6, request.call_count) + self.assertEqual(8, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1069,6 +1079,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} @@ -1080,7 +1092,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -1096,6 +1108,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} @@ -1107,7 +1121,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -1126,6 +1140,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase): get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -1139,7 +1155,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase): ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) - self.assertEqual(6, request.call_count) + self.assertEqual(8, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index 8091c0a20f8..bef1960dd39 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -460,6 +460,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): self.configuration.hpexp_copy_speed = 3 self.configuration.hpexp_copy_check_interval = 3 self.configuration.hpexp_async_copy_check_interval = 10 + self.configuration.hpexp_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -709,10 +710,12 @@ class HPEXPRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @@ -785,6 +788,8 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -794,7 +799,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -805,6 +810,8 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -815,7 +822,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @@ -1098,6 +1105,8 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -1109,7 +1118,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -1123,6 +1132,8 @@ class HPEXPRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -1134,7 +1145,7 @@ class HPEXPRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index 513d0c153a2..789b0179be3 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -356,6 +356,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): self.configuration.hpexp_copy_speed = 3 self.configuration.hpexp_copy_check_interval = 3 self.configuration.hpexp_async_copy_check_interval = 10 + self.configuration.hpexp_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -517,10 +518,12 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @@ -602,6 +605,8 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -611,7 +616,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -622,6 +627,8 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -632,7 +639,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -888,6 +895,8 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -899,7 +908,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -913,6 +922,8 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -924,7 +935,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index 38c195c4b98..1baafe413f1 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -454,6 +454,7 @@ class VStorageRESTFCDriverTest(test.TestCase): self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 + self.configuration.nec_v_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -699,10 +700,12 @@ class VStorageRESTFCDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @@ -775,6 +778,8 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -784,7 +789,7 @@ class VStorageRESTFCDriverTest(test.TestCase): {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -795,6 +800,8 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -805,7 +812,7 @@ class VStorageRESTFCDriverTest(test.TestCase): vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @@ -1093,6 +1100,8 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} @@ -1102,7 +1111,7 @@ class VStorageRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -1116,6 +1125,8 @@ class VStorageRESTFCDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -1127,7 +1138,7 @@ class VStorageRESTFCDriverTest(test.TestCase): self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py index b36a977a286..c526b3577bd 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py @@ -365,6 +365,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 + self.configuration.nec_v_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -561,10 +562,12 @@ class VStorageRESTISCSIDriverTest(test.TestCase): @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) - self.assertEqual(3, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @@ -646,6 +649,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -655,7 +660,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -666,6 +671,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -676,7 +683,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -932,6 +939,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -943,7 +952,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @@ -957,6 +966,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} @@ -968,7 +979,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(7, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py index 2598638f422..4f753e315ae 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py @@ -181,6 +181,7 @@ class VStorageRESTFCDriverTest(test.TestCase): self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 + self.configuration.nec_v_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -300,6 +301,8 @@ class VStorageRESTFCDriverTest(test.TestCase): drv.configuration.nec_v_copy_check_interval) self.assertEqual(drv.configuration.hitachi_async_copy_check_interval, drv.configuration.nec_v_async_copy_check_interval) + self.assertEqual(drv.configuration.hitachi_manage_drs_volumes, + drv.configuration.nec_v_manage_drs_volumes) self.assertEqual(drv.configuration.hitachi_rest_disable_io_wait, drv.configuration.nec_v_rest_disable_io_wait) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepalive, diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py index b51225f3b3d..c2c19d623ad 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py @@ -203,6 +203,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase): self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 + self.configuration.nec_v_manage_drs_volumes = False self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] @@ -322,6 +323,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase): drv.configuration.nec_v_copy_check_interval) self.assertEqual(drv.configuration.hitachi_async_copy_check_interval, drv.configuration.nec_v_async_copy_check_interval) + self.assertEqual(drv.configuration.hitachi_manage_drs_volumes, + drv.configuration.nec_v_manage_drs_volumes) self.assertEqual(drv.configuration.hitachi_rest_disable_io_wait, drv.configuration.nec_v_rest_disable_io_wait) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepalive, diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index 1def1176934..a7e38803d29 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -48,8 +49,15 @@ _GROUP_NAME_VAR_LEN = {GROUP_NAME_VAR_WWN: _GROUP_NAME_VAR_WWN_LEN, STR_VOLUME = 'volume' STR_SNAPSHOT = 'snapshot' +STR_MANAGED_VCP_LDEV_NAME = 'HBSD-VCP' + _UUID_PATTERN = re.compile(r'^[\da-f]{32}$') +DRS_MODE = { + ' True': True, + ' False': False, +} + _INHERITED_VOLUME_OPTS = [ 'volume_backend_name', 'volume_driver', @@ -124,6 +132,11 @@ COMMON_VOLUME_OPTS = [ min=1, max=600, help='Interval in seconds to check asynchronous copying status during ' 'a copy pair deletion or data restoration.'), + cfg.BoolOpt( + 'hitachi_manage_drs_volumes', + default=False, + help='If true, the driver will create a driver managed vClone parent ' + 'for each non-cloned DRS volume it creates.'), ] COMMON_PORT_OPTS = [ @@ -263,7 +276,14 @@ class HBSDCommon(): def create_volume(self, volume): """Create a volume and return its properties.""" + extra_specs = self.get_volume_extra_specs(volume) + + # If we're a managed DRS volume, we need to call + # create_managed_drs_volume. + if self.is_managed_drs_volume(extra_specs): + return self.create_managed_drs_volume(volume) + pool_id = self.get_pool_id_of_volume(volume) ldev_range = self.storage_info['ldev_range'] qos_specs = utils.get_qos_specs_from_volume(volume) @@ -278,6 +298,80 @@ class HBSDCommon(): 'provider_location': str(ldev), } + def is_managed_drs_volume(self, extra_specs): + + is_managed_drs = False + if (self.conf.hitachi_manage_drs_volumes and + self.driver_info.get('driver_dir_name')): + + extra_specs_drs = (self.driver_info['driver_dir_name'] + + ':drs') + drs = extra_specs.get(extra_specs_drs) + + is_managed_drs = DRS_MODE.get(drs, False) + + return is_managed_drs + + def get_drs_parent_extra_specs(self, extra_specs): + """Build subset of extra specs for a DRS vClone parent.""" + + extra_specs_parent = {} + + extra_specs_drs = (self.driver_info['driver_dir_name'] + + ':drs') + drs = extra_specs.get(extra_specs_drs) + extra_specs_csv = (self.driver_info['driver_dir_name'] + + ':capacity_saving') + capacity_saving = extra_specs.get(extra_specs_csv) + + extra_specs_parent[extra_specs_drs] = drs + extra_specs_parent[extra_specs_csv] = capacity_saving + + LOG.debug("Managed parent extra specs: %s", extra_specs_parent) + + return extra_specs_parent + + def create_managed_drs_volume(self, volume): + """Create a managed DRS volume and return its properties.""" + + LOG.debug("Creating managed DRS volume.") + + extra_specs = self.get_volume_extra_specs(volume) + pool_id = self.get_pool_id_of_volume(volume) + ldev_range = self.storage_info['ldev_range'] + qos_specs = utils.get_qos_specs_from_volume(volume) + size = volume['size'] + + # Create our parent volume using only the DRS-related + # specs. + try: + + parent = self.create_ldev(size, + self.get_drs_parent_extra_specs( + extra_specs), + pool_id, ldev_range) + except Exception: + with excutils.save_and_reraise_exception(): + self.output_log(MSG.CREATE_LDEV_FAILED) + self.modify_ldev_name(parent, STR_MANAGED_VCP_LDEV_NAME) + + # Create a clone using our parent volume and the + # given extra specs. + try: + ldev = self.copy_on_storage(parent, size, extra_specs, + pool_id, + pool_id, ldev_range, + qos_specs=qos_specs) + except Exception: + self.delete_ldev(parent) + with excutils.save_and_reraise_exception(): + self.output_log(MSG.CREATE_LDEV_FAILED) + self.modify_ldev_name(ldev, volume['id'].replace("-", "")) + + return { + 'provider_location': str(ldev), + } + def get_ldev_info(self, keys, ldev, **kwargs): """Return a dictionary of LDEV-related items.""" raise NotImplementedError() @@ -616,6 +710,27 @@ class HBSDCommon(): volume_id=volume['id']) self.raise_error(msg) self.delete_pair(ldev) + + # Extend a Managed parent if we have one and it's necessary. + ldev_info = self.get_ldev_info(['parentLdevId'], ldev) + if ldev_info['parentLdevId']: + parent_ldev = int(ldev_info['parentLdevId']) + parent_ldev_info = self.get_ldev_info( + ['blockCapacity', 'label'], parent_ldev) + + if (parent_ldev_info['label'] and + parent_ldev_info['label'] == STR_MANAGED_VCP_LDEV_NAME and + (parent_ldev_info['blockCapacity'] / + utils.GIGABYTE_PER_BLOCK_SIZE < new_size)): + + LOG.debug("Resizing Managed parent volume %d.", + parent_ldev) + self.extend_ldev(parent_ldev, + int(parent_ldev_info['blockCapacity'] / + utils.GIGABYTE_PER_BLOCK_SIZE), + new_size) + + # Finally, extend our LDEV self.extend_ldev(ldev, volume['size'], new_size) def get_ldev_by_name(self, name): diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py index eb1356a215e..fd8a3bb77c1 100644 --- a/cinder/volume/drivers/hitachi/hbsd_rest.py +++ b/cinder/volume/drivers/hitachi/hbsd_rest.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -112,6 +113,7 @@ _CAPACITY_SAVING_DR_MODE = { '': 'disabled', None: 'disabled', } +_DRS_MODE = common.DRS_MODE REST_VOLUME_OPTS = [ cfg.BoolOpt( @@ -253,7 +255,8 @@ def _check_ldev_manageability(self, ldev_info, ldev, existing_ref): if (not ldev_info['emulationType'].startswith('OPEN-V') or len(attributes) < 2 or not attributes.issubset( - set(['CVS', self.driver_info['hdp_vol_attr'], + set(['CVS', utils.DRS_VOL_ATTR, utils.VC_VOL_ATTR, + self.driver_info['hdp_vol_attr'], self.driver_info['hdt_vol_attr']]))): msg = self.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev, ldevtype=self.driver_info['nvol_ldev_type']) @@ -341,6 +344,16 @@ class HBSDREST(common.HBSDCommon): self.raise_error(msg) body['dataReductionMode'] = dr_mode + def _set_drs_mode(self, body, drs): + drs_mode = _DRS_MODE.get(drs, False) + if not drs_mode: + msg = self.output_log( + MSG.INVALID_EXTRA_SPEC_KEY, + key=self.driver_info['driver_dir_name'] + ':drs', + value=drs) + self.raise_error(msg) + body['isDataReductionSharedVolumeEnabled'] = drs_mode + def _create_ldev_on_storage(self, size, extra_specs, pool_id, ldev_range): """Create an LDEV on the storage system.""" body = { @@ -349,11 +362,17 @@ class HBSDREST(common.HBSDCommon): 'isParallelExecutionEnabled': True, } capacity_saving = None + has_drs = False if self.driver_info.get('driver_dir_name'): capacity_saving = extra_specs.get( self.driver_info['driver_dir_name'] + ':capacity_saving') + drs_spec_name = self.driver_info['driver_dir_name'] + ':drs' + has_drs = drs_spec_name in extra_specs + drs = extra_specs.get(drs_spec_name) if capacity_saving: self._set_dr_mode(body, capacity_saving) + if has_drs: + self._set_drs_mode(body, drs) if self.storage_info['ldev_range']: min_ldev, max_ldev = self.storage_info['ldev_range'][:2] body['startLdevId'] = min_ldev @@ -389,7 +408,8 @@ class HBSDREST(common.HBSDCommon): """Delete the specified LDEV from the storage.""" result = self.get_ldev_info(['emulationType', 'dataReductionMode', - 'dataReductionStatus'], ldev) + 'dataReductionStatus', + 'parentLdevId'], ldev) if result['dataReductionStatus'] == 'FAILED': msg = self.output_log( MSG.CONSISTENCY_NOT_GUARANTEE, ldev=ldev) @@ -406,6 +426,22 @@ class HBSDREST(common.HBSDCommon): ldev, body, timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev})) + # If we have a managed parent that is no longer a parent, + # delete it. + if result['parentLdevId']: + parent_ldev = int(result['parentLdevId']) + parent_info = self.get_ldev_info(['attributes', 'label'], + parent_ldev) + if ((not parent_info['attributes'] or + utils.VCP_VOL_ATTR not in parent_info['attributes']) and + (parent_info['label'] and + parent_info['label'] == common.STR_MANAGED_VCP_LDEV_NAME)): + LOG.debug("Deleting managed VCP LDEV %d.", parent_ldev) + self.client.delete_ldev( + parent_ldev, body, + timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, + {'ldev': parent_ldev})) + def _get_snap_pool_id(self, pvol): return ( self.storage_info['snap_pool_id'] @@ -484,7 +520,7 @@ class HBSDREST(common.HBSDCommon): self.output_log( MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol) - def _create_clone_pair(self, pvol, svol, snap_pool_id): + def _create_regular_clone_pair(self, pvol, svol, snap_pool_id): """Create a clone copy pair on the storage.""" snapshot_name = '%(prefix)s%(svol)s' % { 'prefix': self.driver_info['driver_prefix'] + '-clone', @@ -526,6 +562,87 @@ class HBSDREST(common.HBSDCommon): self.output_log( MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol) + def _can_config_vclone(self, pvol, svol, snap_pool_id): + """Determine if we can configure vClone (=DRS + matching pool)""" + chk_list = ['dataReductionStatus', 'poolId', 'attributes'] + pinfo = self.get_ldev_info(chk_list, pvol) + sinfo = self.get_ldev_info(chk_list, svol) + LOG.debug("vclone-chk.Pinfo=%s, Sinfo=%s", repr(pinfo), repr(sinfo)) + if (not pinfo['attributes'] or + utils.DRS_VOL_ATTR not in pinfo['attributes'] or + not sinfo['attributes'] or + utils.DRS_VOL_ATTR not in sinfo['attributes']): + return False + if (pinfo['poolId'] != snap_pool_id or + sinfo['poolId'] != snap_pool_id): + return False + return True + + def _create_vclone_pair(self, pvol, svol, snap_pool_id): + """Create a copy pair, then convert to vClone.""" + snapshot_name = '%(prefix)s%(svol)s' % { + 'prefix': self.driver_info['driver_prefix'] + '-vclone', + 'svol': svol % _SNAP_HASH_SIZE, + } + ss_result = None + try: + body = {"snapshotGroupName": snapshot_name, + "snapshotPoolId": self._get_snap_pool_id(pvol), + "pvolLdevId": pvol, + "svolLdevId": svol, + "isConsistencyGroup": False, + "isDataReductionForceCopy": True, + "canCascade": True} + self.client.add_snapshot(body) + except exception.VolumeDriverException as ex: + if (utils.safe_get_err_code(ex.kwargs.get('errobj')) == + rest_api.INVALID_SNAPSHOT_POOL and + not self.conf.hitachi_snap_pool): + msg = self.output_log( + MSG.INVALID_PARAMETER, + param=self.driver_info['param_prefix'] + '_snap_pool') + self.raise_error(msg) + else: + raise + try: + self._wait_copy_pair_status(svol, set([PAIR])) + LOG.debug("_wait_copy_pair_status[PAIR] done.svol=%d", svol) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self._delete_pair_from_storage(pvol, svol) + except exception.VolumeDriverException: + self.output_log( + MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol) + try: + ss_result = self.client.get_snapshot_by_svol(svol) + LOG.debug("snapshot result=%s,svol=%d", repr(ss_result), svol) + except Exception: + with excutils.save_and_reraise_exception(): + msg = self.output_log( + MSG.GET_SNAPSHOT_FROM_SVOL_FAILURE, svol=repr(svol)) + LOG.error(msg) + self._delete_pair_from_storage(pvol, svol) + self.raise_error(msg) + try: + ss_id = ss_result['data'][0]['snapshotId'] + self.client.snapshot_pair_to_vclone(ss_id) + LOG.debug("ss2vclone svol=%d", svol) + except Exception: + with excutils.save_and_reraise_exception(): + msg = self.output_log( + MSG.VCLONE_PAIR_FAILED, pvol=repr(pvol), svol=repr(svol)) + LOG.error(msg) + self._delete_pair_from_storage(pvol, svol) + self.raise_error(msg) + + def _create_clone_pair(self, pvol, svol, snap_pool_id): + """Check on the new pair configuration to see if it is TIA(=vClone).""" + if self._can_config_vclone(pvol, svol, snap_pool_id): + self._create_vclone_pair(pvol, svol, snap_pool_id) + else: + self._create_regular_clone_pair(pvol, svol, snap_pool_id) + def create_pair_on_storage( self, pvol, svol, snap_pool_id, is_snapshot=False): """Create a copy pair on the storage.""" @@ -847,7 +964,8 @@ class HBSDREST(common.HBSDCommon): targets['list'], mapped_targets['list']) unmap_targets['list'].sort( reverse=True, - key=lambda port: (port.get('portId'), port.get('hostGroupNumber'))) + key=lambda port: (port.get('portId'), + port.get('hostGroupNumber'))) self.unmap_ldev(unmap_targets, ldev) if self.conf.hitachi_group_delete: @@ -861,10 +979,23 @@ class HBSDREST(common.HBSDCommon): for port in ldev_info['ports']: targets['list'].append(port) + def is_ldev_drs(self, ldev): + """Determine if the given LDEV is a DRS volume.""" + ldev_info = self.get_ldev_info(['attributes'], ldev) + + if (ldev_info['attributes'] and + utils.DRS_VOL_ATTR in ldev_info['attributes']): + return True + + return False + def extend_ldev(self, ldev, old_size, new_size): """Extend the specified LDEV to the specified new size.""" body = {"parameters": {"additionalByteFormatCapacity": '%sG' % (new_size - old_size)}} + if self.is_ldev_drs(ldev): + body['parameters']['enhancedExpansion'] = True + self.client.extend_ldev(ldev, body) def get_pool_info(self, pool_id, result=None): @@ -1528,7 +1659,8 @@ class HBSDREST(common.HBSDCommon): return True, None - def _is_modifiable_dr_value(self, dr_mode, dr_status, new_dr_mode, volume): + def _is_modifiable_dr_value(self, dr_mode, dr_status, + new_dr_mode, is_drs, volume): if (dr_status == 'REHYDRATING' and new_dr_mode == 'compression_deduplication'): self.output_log(MSG.VOLUME_IS_BEING_REHYDRATED, @@ -1541,6 +1673,8 @@ class HBSDREST(common.HBSDCommon): volume_type=volume['volume_type']['name']) return False elif new_dr_mode == 'disabled': + if is_drs: + return False return dr_status in _DISABLE_ABLE_DR_STATUS.get(dr_mode, ()) elif new_dr_mode == 'compression_deduplication': return dr_status in _DEDUPCOMP_ABLE_DR_STATUS.get(dr_mode, ()) @@ -1573,6 +1707,8 @@ class HBSDREST(common.HBSDCommon): extra_specs_capacity_saving = None new_capacity_saving = None + extra_specs_drs = False + new_drs = None allowed_extra_specs = [] if self.driver_info.get('driver_dir_name'): extra_specs_capacity_saving = ( @@ -1580,6 +1716,12 @@ class HBSDREST(common.HBSDCommon): new_capacity_saving = ( new_type['extra_specs'].get(extra_specs_capacity_saving)) allowed_extra_specs.append(extra_specs_capacity_saving) + + extra_specs_drs = ( + self.driver_info['driver_dir_name'] + ':drs') + new_drs = ( + new_type['extra_specs'].get(extra_specs_drs)) + new_dr_mode = _CAPACITY_SAVING_DR_MODE.get(new_capacity_saving) if not new_dr_mode: msg = self.output_log( @@ -1587,6 +1729,7 @@ class HBSDREST(common.HBSDCommon): key=extra_specs_capacity_saving, value=new_capacity_saving) self.raise_error(msg) + ldev = self.get_ldev(volume) if ldev is None: msg = self.output_log( @@ -1594,7 +1737,19 @@ class HBSDREST(common.HBSDCommon): id=volume['id']) self.raise_error(msg) ldev_info = self.get_ldev_info( - ['dataReductionMode', 'dataReductionStatus', 'poolId'], ldev) + ['dataReductionMode', 'dataReductionStatus', + 'poolId', 'attributes'], ldev) + + # The DRS mode is not allowed to change. + is_current_drs = ldev_info['attributes'] and ( + utils.DRS_VOL_ATTR in ldev_info['attributes']) + if _DRS_MODE.get(new_drs, False) is not is_current_drs: + msg = self.output_log( + MSG.FAILED_CHANGE_VOLUME_TYPE, + key=extra_specs_drs, + value=new_drs) + self.raise_error(msg) + old_pool_id = ldev_info['poolId'] new_pool_id = host['capabilities']['location_info'].get('pool_id') if (not _check_specs_diff(diff, allowed_extra_specs) @@ -1610,7 +1765,10 @@ class HBSDREST(common.HBSDCommon): ['dataReductionMode', 'dataReductionStatus'], ldev) if not self._is_modifiable_dr_value( ldev_info['dataReductionMode'], - ldev_info['dataReductionStatus'], new_dr_mode, volume): + ldev_info['dataReductionStatus'], + new_dr_mode, + _DRS_MODE.get(new_drs, False), + volume): return False self._modify_capacity_saving(ldev, new_dr_mode) @@ -1628,7 +1786,9 @@ class HBSDREST(common.HBSDCommon): self._wait_copy_pair_status(svol, set([SMPL, PSUE])) status = self._get_copy_pair_status(svol) if status == PSUE: - msg = self.output_log(MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol) + msg = self.output_log(MSG.VOLUME_COPY_FAILED, + pvol=pvol, + svol=svol) self.raise_error(msg) def create_target_name(self, connector): diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_api.py b/cinder/volume/drivers/hitachi/hbsd_rest_api.py index e7081666d02..d6ba03c34c5 100644 --- a/cinder/volume/drivers/hitachi/hbsd_rest_api.py +++ b/cinder/volume/drivers/hitachi/hbsd_rest_api.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -835,6 +836,15 @@ class RestApiClient(): } self._delete_object(url, **kwargs) + def get_snapshot_by_svol(self, svolLdevId): + """Get a snapshot information by using svolLdevId.""" + url = '%(url)s/snapshots?svolLdevId=%(ldevId)d' % { + 'url': self.object_url, + 'ldevId': svolLdevId, + } + + return self._get_object(url) + def get_snapshots(self, params=None): """Get a list of snapshot information.""" url = '%(url)s/snapshots' % { @@ -886,6 +896,19 @@ class RestApiClient(): } self._invoke(url) + def snapshot_pair_to_vclone(self, snapshotId): + """convert snapshot(TIA) to vClone.""" + url = '%(url)s/snapshots/%(ssid)s/actions/virtual-clone/invoke' % { + 'url': self.object_url, + 'ssid': snapshotId, + } + body = { + "parameters": { + "operationType": "create" + } + } + self._add_object(url, body=body) + def discard_zero_page(self, ldev_id): """Return the ldev's no-data pages to the storage pool.""" url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py index b16092d87e3..6e283b96341 100644 --- a/cinder/volume/drivers/hitachi/hbsd_utils.py +++ b/cinder/volume/drivers/hitachi/hbsd_utils.py @@ -1,4 +1,5 @@ # Copyright (C) 2020, 2024, Hitachi, Ltd. +# Copyright (C) 2025, Hitachi Vantara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -26,8 +27,8 @@ from cinder import exception from cinder import utils as cinder_utils from cinder.volume import volume_types -VERSION = '2.4.0' -CI_WIKI_NAME = 'Hitachi_VSP_CI' +VERSION = '2.5.0' +CI_WIKI_NAME = 'Hitachi_CI' PARAM_PREFIX = 'hitachi' VENDOR_NAME = 'Hitachi' DRIVER_DIR_NAME = 'hbsd' @@ -36,6 +37,9 @@ DRIVER_FILE_PREFIX = 'hbsd' TARGET_PREFIX = 'HBSD-' HDP_VOL_ATTR = 'HDP' HDT_VOL_ATTR = 'HDT' +DRS_VOL_ATTR = 'DRS' +VCP_VOL_ATTR = 'VCP' +VC_VOL_ATTR = 'VC' NVOL_LDEV_TYPE = 'DP-VOL' TARGET_IQN_SUFFIX = '.hbsd-target' PAIR_ATTR = 'HTI' @@ -662,6 +666,18 @@ class HBSDMsg(enum.Enum): 'Cinder object. (%(obj)s: %(obj_id)s)', 'suffix': ERROR_SUFFIX, } + GET_SNAPSHOT_FROM_SVOL_FAILURE = { + 'msg_id': 772, + 'loglevel': base_logging.ERROR, + 'msg': 'Failed to get snapshot from s-vol %(svol)s. ', + 'suffix': ERROR_SUFFIX, + } + VCLONE_PAIR_FAILED = { + 'msg_id': 773, + 'loglevel': base_logging.ERROR, + 'msg': 'Failed to ss2vclone. p-vol=%(pvol)s,s-vol=%(svol)s', + 'suffix': ERROR_SUFFIX, + } def __init__(self, error_info): """Initialize Enum attributes.""" diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py index 6f42435d02b..b43a1a00319 100644 --- a/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py @@ -82,6 +82,11 @@ COMMON_VOLUME_OPTS = [ default=10, min=1, max=600, help='Interval in seconds to check copy asynchronously'), + cfg.BoolOpt( + 'hpexp_manage_drs_volumes', + default=False, + help='If true, the driver will create a driver managed vClone parent ' + 'for each non-cloned DRS volume it creates.'), ] REST_VOLUME_OPTS = [ @@ -226,6 +231,8 @@ class HPEXPRESTFC(hbsd_rest_fc.HBSDRESTFC): self.conf.hpexp_copy_check_interval) self.conf.hitachi_async_copy_check_interval = ( self.conf.hpexp_async_copy_check_interval) + self.conf.hitachi_manage_drs_volumes = ( + self.conf.hpexp_manage_drs_volumes) # REST_VOLUME_OPTS self.conf.hitachi_rest_disable_io_wait = ( @@ -297,6 +304,8 @@ class HPEXPRESTISCSI(hbsd_rest_iscsi.HBSDRESTISCSI): self.conf.hpexp_copy_check_interval) self.conf.hitachi_async_copy_check_interval = ( self.conf.hpexp_async_copy_check_interval) + self.conf.hitachi_manage_drs_volumes = ( + self.conf.hpexp_manage_drs_volumes) # REST_VOLUME_OPTS self.conf.hitachi_rest_disable_io_wait = ( diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py b/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py index 2a8477307f2..bdd227b9369 100644 --- a/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py +++ b/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py @@ -23,6 +23,9 @@ DRIVER_FILE_PREFIX = 'hpe_xp' TARGET_PREFIX = 'HPEXP-' HDP_VOL_ATTR = 'THP' HDT_VOL_ATTR = 'ST' +DRS_VOL_ATTR = 'DRS' +VCP_VOL_ATTR = 'VCP' +VC_VOL_ATTR = 'VC' NVOL_LDEV_TYPE = 'THP V-VOL' TARGET_IQN_SUFFIX = '.hpexp-target' PAIR_ATTR = 'FS' diff --git a/cinder/volume/drivers/nec/v/nec_v_rest.py b/cinder/volume/drivers/nec/v/nec_v_rest.py index 27560dd3aad..cf1ad52eb87 100644 --- a/cinder/volume/drivers/nec/v/nec_v_rest.py +++ b/cinder/volume/drivers/nec/v/nec_v_rest.py @@ -84,6 +84,11 @@ COMMON_VOLUME_OPTS = [ min=1, max=600, help='Interval in seconds to check asynchronous copying status during ' 'a copy pair deletion or data restoration.'), + cfg.BoolOpt( + 'nec_v_manage_drs_volumes', + default=False, + help='If true, the driver will create a driver managed vClone parent ' + 'for each non-cloned DRS volume it creates.'), ] REST_VOLUME_OPTS = [ @@ -212,6 +217,8 @@ def update_conf(conf): conf.nec_v_copy_check_interval) conf.hitachi_async_copy_check_interval = ( conf.nec_v_async_copy_check_interval) + conf.hitachi_manage_drs_volumes = ( + conf.nec_v_manage_drs_volumes) # REST_VOLUME_OPTS conf.hitachi_rest_disable_io_wait = ( diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst index d3c916c6174..696f6ef10ab 100644 --- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst +++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst @@ -59,6 +59,10 @@ Supported storages: | VSP G1000, | | | VSP G1500 | | +-----------------+------------------------+ +| VSP One B24, | A3-04-20 or later | +| B26, | | +| B28 | | ++-----------------+------------------------+ Required storage licenses: @@ -68,9 +72,11 @@ Required storage licenses: - Hitachi Dynamic Provisioning * Hitachi Local Replication (Hitachi Thin Image) +* Deduplication and compression (VSP One Block) + Optional storage licenses: -* Deduplication and compression +* Deduplication and compression (non-VSP One Block) * Global-Active Device @@ -99,6 +105,7 @@ Hitachi block storage driver also supports the following additional features: * Global-Active Device * Maximum number of copy pairs and consistency groups * Data deduplication and compression +* DRS volumes * Port scheduler * Port assignment using extra spec * Configuring Quality of Service (QoS) settings @@ -623,6 +630,71 @@ The cinder delete command finishes when the storage system starts the LDEV deletion process. The LDEV cannot be reused until the LDEV deletion process is completed on the storage system. +DRS volumes +---------------------------------- + +Use DRS volumes to improve storage utilization using data +reduction and data sharing. + +DRS volumes are required for VSP One Block series storage +when performing Clone operations. + +DRS volumes may not have the DRS or deduplication and +compression configuration modified or removed through +retyping. + +For details, +see `Capacity saving function: data deduplication and compression`_ +in the `Provisioning Guide`_. + +**Enabling DRS** + +To use the DRS functionality on the storage models, your storage +administrator must first enable the deduplication and compression for the DP +pool. + +For details about how to enable this setting, see the description of pool +management in the +`Hitachi Command Suite Configuration Manager REST API Reference Guide`_ or the +`Hitachi Ops Center API Configuration Manager REST API Reference Guide`_. + +.. note:: + + * Do not set a subscription limit (virtualVolumeCapacityRate) for the DP + pool. + +Creating a volume with DRS enabled +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +To create a volume with the DRS setting enabled, +enable deduplication and compression and DRS for the relevant volume type. + +**Procedure** + +1. To enable the deduplication and compression setting, specify the value +``deduplication_compression`` for ``hbsd:capacity_saving`` in the extra specs +for the volume type. + +2. To enable the DRS setting, speciy the value `` True`` for ``hbsd:drs`` +in the extra specs for the volume type. + +3. When creating a volume of the volume type created in the previous steps, +you can create a volume with the deduplication and compression function and +DRS function enabled. + +Deleting a volume with deduplication and compression enabled +<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +The cinder delete command finishes when the storage system starts the LDEV +deletion process. The LDEV cannot be reused until the LDEV deletion process is +completed on the storage system. + +.. note:: + + * When deleting a volume that has been cloned using Thin Image Advanced and + vClone (DRS volumes + same pool), the vClone parent volume cannot be deleted + until all children have been deleted. + Port scheduler -------------- diff --git a/releasenotes/notes/B20-support-8c2baf5f781efffd.yaml b/releasenotes/notes/B20-support-8c2baf5f781efffd.yaml new file mode 100644 index 00000000000..461b5143971 --- /dev/null +++ b/releasenotes/notes/B20-support-8c2baf5f781efffd.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Hitachi driver: Enable support for VSP One B20. VSP One B20 supports ADR + functionality that offers up to 4:1 data saving, and Thin Image Advanced that supports + superior ROW functionality. In addition, the B20 supports vClone technology that allows + for instantaneous cloning and shared data between clones.