diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py index d7acc748c99..67c8835289b 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py @@ -3012,6 +3012,48 @@ GET_CLUSTER_NAME_RESPONSE_REST = { "uuid": "fake-cluster-uuid" } +# ASA r2 specific cluster info response +GET_CLUSTER_INFO_RESPONSE_REST = { + "name": "jayaanancluster-1", + "_links": { + "self": { + "href": "/api/cluster" + } + } +} + +# ASA r2 specific cluster capacity response +GET_CLUSTER_CAPACITY_RESPONSE_REST = { + "efficiency_without_snapshots": { + "ratio": 1, + "logical_used": 692224 + }, + "block_storage": { + "size": 234712203264, + "available": 117230436352, + "physical_used": 117481766912, + "physical_used_percent": 50, + "total_metadata_used": 117481066496, + "log_and_recovery_metadata": 117356101632, + "delayed_frees": 87412736, + "full_threshold_percent": 98, + "nearly_full_threshold_percent": 95 + }, + "metric": { + "timestamp": "2025-08-05T08:56:45Z", + "status": "inconsistent_old_data", + "duration": "PT15S", + "available_size": 117230436352, + "used_size": 117481766912, + "total_size": 234712203264 + }, + "_links": { + "self": { + "href": "/api/storage/cluster?fields=**" + } + } +} + GET_VSERVER_PEERS_RECORDS_REST = [ { "_links": { @@ -3158,3 +3200,54 @@ GET_INTERFACES_NVME_REST = { ], 'num_records': 1 } + +GET_AGGREGATE_STORAGE_TYPES_RESPONSE_REST = { + "records": [ + { + "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", + "name": "dataFA_2_p0_i1", + "block_storage": { + "storage_type": "ssd", + "primary": { + "disk_class": "solid_state", + "raid_size": 29, + "disk_type": "ssd" + } + } + } + ], + "num_records": 1 +} + +GET_AGGREGATE_STORAGE_TYPES_MULTIPLE_RESPONSE_REST = { + "records": [ + { + "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", + "name": "dataFA_2_p0_i1", + "block_storage": { + "storage_type": "ssd", + "primary": { + "disk_class": "solid_state", + "disk_type": "ssd" + } + } + }, + { + "uuid": "4f6f3976-bg54-5e93-b919-9b8333dg1480", + "name": "dataFA_2_p0_i2", + "block_storage": { + "storage_type": "ssd", + "primary": { + "disk_class": "solid_state", + "disk_type": "ssd" + } + } + } + ], + "num_records": 2 +} + +GET_AGGREGATE_STORAGE_TYPES_EMPTY_RESPONSE_REST = { + "records": [], + "num_records": 0 +} diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py index aeb7f16be11..f435ac99732 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py @@ -45,7 +45,8 @@ CONNECTION_INFO = {'hostname': 'hostname', 'private_key_file': 'fake_private_key.pem', 'certificate_file': 'fake_cert.pem', 'ca_certificate_file': 'fake_ca_cert.crt', - 'certificate_host_validation': 'False' + 'certificate_host_validation': 'False', + 'is_disaggregated': 'False', } diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.py new file mode 100644 index 00000000000..055f4d9ba2a --- /dev/null +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.py @@ -0,0 +1,582 @@ +# Copyright (c) 2025 NetApp, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock +import uuid + +import ddt + +from cinder.tests.unit import test +from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( + fakes as fake_client) +from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake +from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api +from cinder.volume.drivers.netapp.dataontap.client import client_cmode +from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest +from cinder.volume.drivers.netapp.dataontap.client.client_cmode_rest_asar2\ + import RestClientASAr2 +from cinder.volume.drivers.netapp import utils as netapp_utils + + +CONNECTION_INFO = {'hostname': 'hostname', + 'transport_type': 'https', + 'port': 443, + 'username': 'admin', + 'password': 'passw0rd', + 'vserver': 'fake_vserver', + 'ssl_cert_path': 'fake_ca', + 'api_trace_pattern': 'fake_regex', + 'private_key_file': 'fake_private_key.pem', + 'certificate_file': 'fake_cert.pem', + 'ca_certificate_file': 'fake_ca_cert.crt', + 'certificate_host_validation': 'False', + 'is_disaggregated': 'True', # ASA r2 is disaggregated + } + + +@ddt.ddt +class NetAppRestCmodeASAr2ClientTestCase(test.TestCase): + + def setUp(self): + super(NetAppRestCmodeASAr2ClientTestCase, self).setUp() + + # Setup Client mocks + self.mock_object(client_cmode.Client, '_init_ssh_client') + # store the original reference so we can call it later in + # test__get_cluster_nodes_info + self.original_get_cluster_nodes_info = ( + client_cmode.Client._get_cluster_nodes_info) + self.mock_object(client_cmode.Client, '_get_cluster_nodes_info', + return_value=fake.HYBRID_SYSTEM_NODES_INFO) + self.mock_object(client_cmode.Client, 'get_ontap_version', + return_value=(9, 16, 1)) + self.mock_object(client_cmode.Client, + 'get_ontapi_version', + return_value=(0, 0)) + + # Setup RestClient mocks + self.mock_object(client_cmode_rest.RestClient, '_init_ssh_client') + + self.original_get_cluster_nodes_info = ( + client_cmode_rest.RestClient._get_cluster_nodes_info) + + if not hasattr(client_cmode_rest.RestClient, + '_get_cluster_nodes_info'): + setattr(client_cmode_rest.RestClient, + '_get_cluster_nodes_info', + None) + self.original_get_cluster_nodes_info = ( + client_cmode_rest.RestClient._get_cluster_nodes_info) + + self.mock_object(client_cmode_rest.RestClient, + '_get_cluster_nodes_info', + return_value=fake.HYBRID_SYSTEM_NODES_INFO) + self.mock_object(client_cmode_rest.RestClient, 'get_ontap_version', + return_value=(9, 16, 1)) + + # Setup ASA r2 specific mocks + self.mock_object(RestClientASAr2, '_init_ssh_client') + self.mock_object(RestClientASAr2, '_get_cluster_nodes_info', + return_value=fake.HYBRID_SYSTEM_NODES_INFO) + self.mock_object(RestClientASAr2, 'get_ontap_version', + return_value=(9, 16, 1)) + + with mock.patch.object(RestClientASAr2, + 'get_ontap_version', + return_value=(9, 16, 1)): + self.client = RestClientASAr2(**CONNECTION_INFO) + + self.client.ssh_client = mock.MagicMock() + self.client.connection = mock.MagicMock() + self.connection = self.client.connection + + self.vserver = CONNECTION_INFO['vserver'] + self.fake_volume = str(uuid.uuid4()) + self.fake_lun = str(uuid.uuid4()) + + def _mock_api_error(self, code='fake'): + return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) + + def test_initialization(self): + """Test ASA r2 client initialization.""" + self.assertIsInstance(self.client, RestClientASAr2) + self.assertIsInstance(self.client, + client_cmode_rest.RestClient) + + def test_init_asar2_features(self): + """Test ASA r2 specific features initialization.""" + # Test that _init_asar2_features is called during initialization + with mock.patch.object(RestClientASAr2, + '_init_asar2_features') as mock_init: + with mock.patch.object(RestClientASAr2, + 'get_ontap_version', + return_value=(9, 16, 1)): + RestClientASAr2(**CONNECTION_INFO) + + mock_init.assert_called_once() + + @ddt.data(True, False) + def test_get_ontapi_version(self, cached): + """Test that ASA r2 returns (0, 0) for ONTAPI version.""" + result = self.client.get_ontapi_version(cached=cached) + expected = (0, 0) + self.assertEqual(expected, result) + + def test_getattr_missing_method(self): + """Test __getattr__ behavior for missing methods.""" + result = getattr(self.client, 'nonexistent_method', None) + self.assertIsNone(result) + + def test_send_request_inherits_from_parent(self): + """Test that send_request inherits behavior from parent class.""" + expected = 'fake_response' + mock_get_records = self.mock_object( + self.client, 'get_records', + mock.Mock(return_value=expected)) + + res = self.client.send_request( + fake_client.FAKE_ACTION_ENDPOINT, 'get', + body=fake_client.FAKE_BODY, + query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) + + self.assertEqual(expected, res) + mock_get_records.assert_called_once_with( + fake_client.FAKE_ACTION_ENDPOINT, + fake_client.FAKE_HTTP_QUERY, False, 10000) + + def test_send_request_post_inherits_from_parent(self): + """Test that send_request POST inherits behavior from parent class.""" + expected = (201, 'fake_response') + mock_invoke = self.mock_object( + self.client.connection, 'invoke_successfully', + mock.Mock(return_value=expected)) + + res = self.client.send_request( + fake_client.FAKE_ACTION_ENDPOINT, 'post', + body=fake_client.FAKE_BODY, + query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) + + self.assertEqual(expected[1], res) + mock_invoke.assert_called_once_with( + fake_client.FAKE_ACTION_ENDPOINT, 'post', + body=fake_client.FAKE_BODY, + query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) + + @ddt.data( + {'enable_tunneling': True}, + {'enable_tunneling': False} + ) + @ddt.unpack + def test_get_records_inherits_from_parent(self, enable_tunneling): + """Test that get_records inherits behavior from parent class.""" + api_responses = [ + (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), + (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), + (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), + ] + + self.mock_object( + self.client.connection, 'invoke_successfully', + side_effect=copy.deepcopy(api_responses)) + + query = { + 'fields': 'name' + } + + result = self.client.get_records( + '/storage/volumes/', query=query, + enable_tunneling=enable_tunneling, + max_page_length=10) + + num_records = result['num_records'] + self.assertEqual(28, num_records) + self.assertEqual(28, len(result['records'])) + + expected_records = [] + expected_records.extend(api_responses[0][1]['records']) + expected_records.extend(api_responses[1][1]['records']) + expected_records.extend(api_responses[2][1]['records']) + + self.assertEqual(expected_records, result['records']) + + def test_send_ems_log_message_inherits_from_parent(self): + """Test send_ems_log_message inherits behavior""" + message_dict = { + 'computer-name': '25-dev-vm', + 'event-source': 'Cinder driver NetApp_iSCSI_ASAr2_direct', + 'app-version': 'dummy app version', + 'category': 'provisioning', + 'log-level': '5', + 'auto-support': 'false', + 'event-id': '1', + 'event-description': + '{"pools": {"vserver": "vserver_name",' + + '"aggregates": [], "flexvols": ["flexvol_01"]}}' + } + + body = { + 'computer_name': message_dict['computer-name'], + 'event_source': message_dict['event-source'], + 'app_version': message_dict['app-version'], + 'category': message_dict['category'], + 'severity': 'notice', + 'autosupport_required': message_dict['auto-support'] == 'true', + 'event_id': message_dict['event-id'], + 'event_description': message_dict['event-description'], + } + + self.mock_object(self.client, '_get_ems_log_destination_vserver', + return_value='vserver_name') + self.mock_object(self.client, 'send_request') + + self.client.send_ems_log_message(message_dict) + + self.client.send_request.assert_called_once_with( + '/support/ems/application-logs', 'post', body=body) + + def test_inheritance_all_parent_methods_available(self): + """Test that ASA r2 client has access to all parent methods.""" + # Test that common parent methods are available + parent_methods = [ + 'send_request', 'get_records', 'send_ems_log_message' + ] + + for method_name in parent_methods: + self.assertTrue(hasattr(self.client, method_name), + f"Method {method_name} should be available") + self.assertTrue(callable(getattr(self.client, method_name)), + f"Method {method_name} should be callable") + + def test_asar2_specific_ontapi_not_supported(self): + """Test that ASA r2 specifically doesn't support ONTAPI.""" + # This is a key differentiator for ASA r2 + result = self.client.get_ontapi_version() + self.assertEqual((0, 0), result) + + # No change for cached version + result_cached = self.client.get_ontapi_version(cached=True) + self.assertEqual((0, 0), result_cached) + + def test_disaggregated_platform_connection_info(self): + """Test ASA r2 client works with disaggregated platform settings.""" + # Verify the connection info includes disaggregated flag + self.assertEqual('True', CONNECTION_INFO['is_disaggregated']) + + # Test that client can be initialized with disaggregated settings + disaggregated_info = CONNECTION_INFO.copy() + disaggregated_info['is_disaggregated'] = 'True' + + with mock.patch.object(RestClientASAr2, 'get_ontap_version', + return_value=(9, 18, 1)): + client = RestClientASAr2(**disaggregated_info) + self.assertIsInstance(client, RestClientASAr2) + + def test_get_cluster_info_success(self): + """Test successful cluster info retrieval.""" + expected_response = fake_client.GET_CLUSTER_INFO_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + result = self.client.get_cluster_info() + + expected_query = {'fields': 'name,disaggregated'} + self.client.send_request.assert_called_once_with( + '/cluster', 'get', query=expected_query, enable_tunneling=False) + self.assertEqual(expected_response, result) + + def test_get_cluster_info_exception(self): + """Test exception handling during cluster info retrieval.""" + self.mock_object(self.client, 'send_request', + side_effect=Exception("API error")) + + result = self.client.get_cluster_info() + + expected_query = {'fields': 'name,disaggregated'} + self.client.send_request.assert_called_once_with( + '/cluster', 'get', query=expected_query, enable_tunneling=False) + self.assertIsNone(result) + + def test_get_cluster_info_empty_response(self): + """Test cluster info retrieval with empty response.""" + self.mock_object(self.client, 'send_request', + return_value={}) + + result = self.client.get_cluster_info() + + expected_query = {'fields': 'name,disaggregated'} + self.client.send_request.assert_called_once_with( + '/cluster', 'get', query=expected_query, enable_tunneling=False) + self.assertEqual({}, result) + + def test_get_cluster_info_netapp_api_error(self): + """Test NetApp API error handling during cluster info retrieval.""" + self.mock_object(self.client, 'send_request', + side_effect=netapp_api.NaApiError("NetApp API error")) + + result = self.client.get_cluster_info() + + expected_query = {'fields': 'name,disaggregated'} + self.client.send_request.assert_called_once_with( + '/cluster', 'get', query=expected_query, enable_tunneling=False) + self.assertIsNone(result) + + def test_get_cluster_capacity_success(self): + """Test successful cluster capacity retrieval.""" + expected_response = fake_client.GET_CLUSTER_CAPACITY_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + result = self.client.get_cluster_capacity() + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', + query=expected_query, enable_tunneling=False) + + expected_capacity = { + 'size-total': float(expected_response['block_storage']['size']), + 'size-available': + float(expected_response['block_storage']['available']) + } + self.assertEqual(expected_capacity, result) + + def test_get_cluster_capacity_no_response(self): + """Test cluster capacity retrieval with no response.""" + self.mock_object(self.client, 'send_request', + return_value=None) + + result = self.client.get_cluster_capacity() + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', + query=expected_query, enable_tunneling=False) + self.assertEqual({}, result) + + def test_get_cluster_capacity_missing_block_storage(self): + """Test cluster capacity retrieval with missing block_storage.""" + response = {'some_other_field': 'value'} + + self.mock_object(self.client, 'send_request', + return_value=response) + + result = self.client.get_cluster_capacity() + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', + query=expected_query, enable_tunneling=False) + + expected_capacity = { + 'size-total': 0.0, + 'size-available': 0.0 + } + self.assertEqual(expected_capacity, result) + + def test_get_cluster_capacity_partial_block_storage(self): + """Test cluster capacity retrieval with partial block_storage.""" + response = { + 'block_storage': { + 'size': 1000000000, + # missing 'available' field + } + } + + self.mock_object(self.client, 'send_request', + return_value=response) + + result = self.client.get_cluster_capacity() + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', + query=expected_query, enable_tunneling=False) + + expected_capacity = { + 'size-total': 1000000000.0, + 'size-available': 0.0 + } + self.assertEqual(expected_capacity, result) + + def test_get_cluster_capacity_exception(self): + """Test exception handling during cluster capacity retrieval.""" + self.mock_object(self.client, 'send_request', + side_effect=Exception("API error")) + + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_cluster_capacity) + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', + query=expected_query, enable_tunneling=False) + + def test_get_cluster_capacity_netapp_api_error(self): + """Test NetApp API error handling during cluster capacity retrieval.""" + self.mock_object(self.client, 'send_request', + side_effect=netapp_api.NaApiError("NetApp API error")) + + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_cluster_capacity) + + expected_query =\ + {'fields': 'block_storage.size,block_storage.available'} + self.client.send_request.assert_called_once_with( + '/storage/cluster', 'get', query=expected_query, + enable_tunneling=False) + + def test_get_aggregate_disk_types_success(self): + """Test successful aggregate disk types retrieval.""" + expected_response =\ + fake_client.GET_AGGREGATE_STORAGE_TYPES_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + result = self.client.get_aggregate_disk_types() + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + # Should return array of storage types + self.assertEqual(['ssd'], result) + + def test_get_aggregate_disk_types_multiple_records(self): + """Test aggregate disk types retrieval with multiple records.""" + expected_response =\ + fake_client.GET_AGGREGATE_STORAGE_TYPES_MULTIPLE_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + result = self.client.get_aggregate_disk_types() + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + # Should return array with all storage types including duplicates + self.assertEqual(['ssd', 'ssd'], result) + + def test_get_aggregate_disk_types_empty_records(self): + """Test aggregate disk types retrieval with empty records.""" + expected_response =\ + fake_client.GET_AGGREGATE_STORAGE_TYPES_EMPTY_RESPONSE_REST + + self.mock_object(self.client, 'send_request', + return_value=expected_response) + + result = self.client.get_aggregate_disk_types() + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + self.assertIsNone(result) + + def test_get_aggregate_disk_types_missing_block_storage(self): + """Test aggregate disk types retrieval with missing block_storage.""" + response = { + "records": [ + { + "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", + "name": "dataFA_2_p0_i1", + # missing block_storage field + } + ], + "num_records": 1 + } + + self.mock_object(self.client, 'send_request', + return_value=response) + + result = self.client.get_aggregate_disk_types() + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + + self.assertEqual([], result) + + def test_get_aggregate_disk_types_missing_storage_type(self): + """Test aggregate disk types retrieval with missing storage_type.""" + response = { + "records": [ + { + "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", + "name": "dataFA_2_p0_i1", + "block_storage": { + "primary": { + "disk_class": "solid_state", + "disk_type": "ssd" + } + # missing storage_type field + } + } + ], + "num_records": 1 + } + + self.mock_object(self.client, 'send_request', + return_value=response) + + result = self.client.get_aggregate_disk_types() + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + + self.assertEqual([], result) + + def test_get_aggregate_disk_types_netapp_api_error(self): + """Test NetApp API error handling.""" + self.mock_object(self.client, 'send_request', + side_effect=netapp_api.NaApiError("NetApp API error")) + + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_aggregate_disk_types) + + expected_query = {'fields': 'name,block_storage.storage_type'} + self.client.send_request.assert_called_once_with( + '/storage/aggregates', 'get', query=expected_query, + enable_tunneling=False) + + def test_get_performance_counter_info_not_supported(self): + """Performance counter info raises NetAppDriverException.""" + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_performance_counter_info, + 'system', 'cpu_busy') + + def test_get_performance_instance_uuids_not_supported(self): + """Performance instance UUIDs raises NetAppDriverException.""" + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_performance_instance_uuids, + 'system', 'node1') + + def test_get_performance_counters_not_supported(self): + """Performance counters raises NetAppDriverException.""" + self.assertRaises(netapp_utils.NetAppDriverException, + self.client.get_performance_counters, + 'system', ['uuid1'], ['cpu_busy']) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py index a05774a0980..6b0abfbd199 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py @@ -440,6 +440,19 @@ FAKE_CMODE_POOL_MAP = { }, } +FAKE_CLUSTER_INFO = { + 'name': 'jayaanancluster-1', + '_links': { + 'self': { + 'href': '/api/cluster' + } + } +} + +FAKE_CLUSTER_POOL_MAP = { + 'jayaanancluster-1': {'pool_name': 'jayaanancluster-1'} +} + FILE_LIST = ['file1', 'file2', 'file3'] FAKE_LUN = netapp_api.NaElement.create_node_with_children( diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py index 28abffd7d10..0d41ff7c682 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py @@ -1855,6 +1855,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): def test_add_looping_tasks(self): mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') + self.library.configuration.netapp_disaggregated_platform = False mock_call_snap_cleanup = self.mock_object( self.library, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( @@ -1897,3 +1898,38 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase): self.library.lun_table = {fake_lun.name: fake_lun} self.library._delete_lun_from_table('another-fake-lun') self.assertEqual({fake_lun.name: fake_lun}, self.library.lun_table) + + def test_add_looping_tasks_traditional_platform(self): + """Test _add_looping_tasks with AFF platform""" + mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') + self.library.configuration.netapp_disaggregated_platform = False + mock_call_snap_cleanup = self.mock_object( + self.library, '_delete_snapshots_marked_for_deletion') + mock_call_ems_logging = self.mock_object( + self.library, '_handle_ems_logging') + + self.library._add_looping_tasks() + + # Traditional platform should include snapshot cleanup task + mock_add_task.assert_has_calls([ + mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, + loopingcalls.ONE_MINUTE), + mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) + + def test_add_looping_tasks_disaggregated_platform(self): + """Test _add_looping_tasks with disaggregated platform""" + mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') + self.library.configuration.netapp_disaggregated_platform = True + mock_call_snap_cleanup = self.mock_object( + self.library, '_delete_snapshots_marked_for_deletion') + mock_call_ems_logging = self.mock_object( + self.library, '_handle_ems_logging') + + self.library._add_looping_tasks() + + # Disaggregated platform should NOT include snapshot cleanup task + mock_add_task.assert_has_calls([ + mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) + + # Verify snapshot cleanup is not called + mock_call_snap_cleanup.assert_not_called() diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py index aebeff23db6..8f89745490a 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py @@ -397,7 +397,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): mock.Mock(return_value=None) ) self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) - self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, @@ -645,8 +644,22 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): self.assertEqual({}, result) mock_list_flexvols.assert_called_once_with() - def test_update_ssc(self): + def test_update_ssc_disaggregated_platform(self): + """Test _update_ssc with disaggregated platform (ASA r2).""" + self.library.configuration.netapp_disaggregated_platform = True + mock_get_cluster_pool_map = self.mock_object( + self.library, '_get_cluster_to_pool_map', + return_value=fake.FAKE_CLUSTER_INFO) + result = self.library._update_ssc() + + self.assertIsNone(result) + mock_get_cluster_pool_map.assert_called_once_with() + self.library.ssc_library.update_ssc_asa.assert_called_once_with( + fake.FAKE_CLUSTER_INFO) + + def test_update_ssc(self): + """Test _update_ssc with traditional platform (flexvol).""" mock_get_pool_map = self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value=fake.FAKE_CMODE_VOLUMES) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py index 83f3b1e2cb4..94dcff2af47 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py @@ -145,6 +145,44 @@ class NetAppNVMeStorageLibraryTestCase(test.TestCase): na_utils.NetAppDriverException, self.library.check_for_setup_error) + def test_check_for_setup_error_disaggregated(self): + self.library.configuration.netapp_disaggregated_platform = True + self.mock_object(self.library, '_get_cluster_to_pool_map', + return_value=fake.POOL_NAME) + self.mock_object(self.library, '_add_looping_tasks') + self.library.namespace_ostype = 'linux' + self.library.host_type = 'linux' + self.mock_object(self.library.client, 'get_namespace_list', + return_value='fake_namespace_list') + self.mock_object(self.library, '_extract_and_populate_namespaces') + self.mock_object(self.library.loopingcalls, 'start_tasks') + + self.library.check_for_setup_error() + + self.library._add_looping_tasks.assert_called_once_with() + self.library.client.get_namespace_list.assert_called_once_with() + self.library._extract_and_populate_namespaces.assert_called_once_with( + 'fake_namespace_list') + self.library.loopingcalls.start_tasks.assert_called_once_with() + + @ddt.data( + {'pool_map': None, 'namespace': 'linux', 'host': 'linux'}, + {'pool_map': 'fake_map', 'namespace': 'fake', 'host': 'linux'}, + {'pool_map': 'fake_map', 'namespace': 'linux', 'host': 'fake'}) + @ddt.unpack + def test_check_for_setup_error_error_disaggregated( + self, pool_map, namespace, host): + self.library.configuration.netapp_disaggregated_platform = True + self.mock_object(self.library, '_get_cluster_to_pool_map', + return_value=pool_map) + self.library.namespace_ostype = namespace + self.library.host_type = host + self.mock_object(self.library, '_add_looping_tasks') + + self.assertRaises( + na_utils.NetAppDriverException, + self.library.check_for_setup_error) + def test_create_volume(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(volume_utils, 'extract_host', @@ -275,6 +313,19 @@ class NetAppNVMeStorageLibraryTestCase(test.TestCase): self.library.ssc_library.update_ssc.assert_called_once_with( 'fake_pool_map') + def test__update_ssc_disaggregated_platform(self): + self.library.configuration.netapp_disaggregated_platform = True + mock_get_cluster_pool_map = self.mock_object( + self.library, '_get_cluster_to_pool_map', + return_value=fake.FAKE_CLUSTER_POOL_MAP) + self.library.ssc_library.update_ssc_asa = mock.Mock() + + self.library._update_ssc() + + mock_get_cluster_pool_map.assert_called_once_with() + self.library.ssc_library.update_ssc_asa.assert_called_once_with( + fake.FAKE_CLUSTER_POOL_MAP) + def test__find_mapped_namespace_subsystem(self): self.mock_object(self.library.client, 'get_subsystem_by_host', return_value=[{'name': fake.SUBSYSTEM}]) @@ -1126,3 +1177,36 @@ class NetAppNVMeStorageLibraryTestCase(test.TestCase): self.assertIsNone(snapshots_model_update) mock_delete_namespace.assert_called_once_with(fake.VG_SNAPSHOT['name']) + + def test_netapp_disaggregated_platform_config_true(self): + """Test behavior when netapp_disaggregated_platform is True.""" + self.library.configuration.netapp_disaggregated_platform = True + + # Mock the cluster pool map method + mock_cluster_pool_map = self.mock_object( + self.library, '_get_cluster_to_pool_map', + return_value=fake.FAKE_CLUSTER_POOL_MAP) + + # Test _update_ssc uses cluster pool mapping + self.library.ssc_library.update_ssc_asa = mock.Mock() + self.library._update_ssc() + + mock_cluster_pool_map.assert_called_once_with() + self.library.ssc_library.update_ssc_asa.assert_called_once_with( + fake.FAKE_CLUSTER_POOL_MAP) + + def test_netapp_disaggregated_platform_config_false(self): + """Test behavior when netapp_disaggregated_platform is False.""" + self.library.configuration.netapp_disaggregated_platform = False + + mock_flexvol_pool_map = self.mock_object( + self.library, '_get_flexvol_to_pool_map', + return_value=fake.POOL_NAME) + + # Test _update_ssc uses flexvol pool mapping + self.library.ssc_library.update_ssc = mock.Mock() + self.library._update_ssc() + + mock_flexvol_pool_map.assert_called_once_with() + self.library.ssc_library.update_ssc.assert_called_once_with( + fake.POOL_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py index eed57da2a83..a99e383a5d6 100644 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py +++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py @@ -216,3 +216,49 @@ class NetAppDataOntapUtilsTestCase(test.TestCase): 'event-description': '', } self.assertEqual(expected, result) + + def test_get_cluster_to_pool_map_success(self): + """Test successful cluster-to-pool mapping.""" + mock_client = mock.Mock() + self.mock_object( + mock_client, 'get_cluster_info', + return_value={ + 'name': 'cluster1', + 'disaggregated': True + }) + expected_pool_map = { + 'cluster1': {'pool_name': 'cluster1'} + } + + result = utils.get_cluster_to_pool_map(mock_client) + + self.assertEqual(expected_pool_map, result) + mock_client.get_cluster_info.assert_called_once() + + def test_get_cluster_to_pool_map_disaggregated_true_raises_exception(self): + """Test that disaggregated=False raises InvalidConfigurationValue.""" + mock_client = mock.Mock() + self.mock_object( + mock_client, 'get_cluster_info', + return_value={ + 'name': 'cluster1', + 'disaggregated': False + }) + + self.assertRaises( + exception.InvalidConfigurationValue, + utils.get_cluster_to_pool_map, + mock_client) + + def test_get_cluster_to_pool_map_disaggregated_missing_exception(self): + mock_client = mock.Mock() + self.mock_object( + mock_client, 'get_cluster_info', + return_value={ + 'name': 'cluster1', + }) + + self.assertRaises( + exception.InvalidConfigurationValue, + utils.get_cluster_to_pool_map, + mock_client) diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py index a0f6c67910d..1784663a94c 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ b/cinder/volume/drivers/netapp/dataontap/block_base.py @@ -189,10 +189,13 @@ class NetAppBlockStorageLibrary( """ # Add the task that deletes snapshots marked for deletion. - self.loopingcalls.add_task( - self._delete_snapshots_marked_for_deletion, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE) + # ADD snapshot cleanup task to ASA r2 once snapshot feature is + # implemented in the driver. + if not self.configuration.netapp_disaggregated_platform: + self.loopingcalls.add_task( + self._delete_snapshots_marked_for_deletion, + loopingcalls.ONE_MINUTE, + loopingcalls.ONE_MINUTE) # Add the task that logs EMS messages self.loopingcalls.add_task( diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py index b10c3f59491..7d5dd688422 100644 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py @@ -134,11 +134,18 @@ class NetAppBlockStorageCmodeLibrary( def check_for_setup_error(self): """Check that the driver is working and can communicate.""" - if not self._get_flexvol_to_pool_map(): + if (not self._get_flexvol_to_pool_map() + and not self.configuration.netapp_disaggregated_platform): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise na_utils.NetAppDriverException(msg) + elif (self.configuration.netapp_disaggregated_platform + and not self._get_cluster_to_pool_map()): + msg = _('No pools are available for provisioning volumes. ' + 'Ensure ASA r2 configuration option is set correctly.') + raise na_utils.NetAppDriverException(msg) + self._add_looping_tasks() super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() @@ -361,7 +368,10 @@ class NetAppBlockStorageCmodeLibrary( # Utilization and performance metrics require cluster-scoped # credentials - if self.using_cluster_credentials: + # Performance metrics are skipped for disaggregated for now. + # TODO(jayaanan): Add support for performance metrics for ASA r2 + if (self.using_cluster_credentials + and not self.configuration.netapp_disaggregated_platform): # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) @@ -390,8 +400,11 @@ class NetAppBlockStorageCmodeLibrary( self.max_over_subscription_ratio) # Add up-to-date capacity info - capacity = self.zapi_client.get_flexvol_capacity( - flexvol_name=ssc_vol_name) + if self.configuration.netapp_disaggregated_platform: + capacity = self.zapi_client.get_cluster_capacity() + else: + capacity = self.zapi_client.get_flexvol_capacity( + flexvol_name=ssc_vol_name) size_total_gb = capacity['size-total'] / units.Gi pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) @@ -412,9 +425,12 @@ class NetAppBlockStorageCmodeLibrary( pool['provisioned_capacity_gb'] = na_utils.round_down( float(provisioned_cap) / units.Gi) - if self.using_cluster_credentials: - dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent( - ssc_vol_name) + if (self.using_cluster_credentials and + not self.configuration.netapp_disaggregated_platform): + dedupe_used = ( + self.zapi_client + .get_flexvol_dedupe_used_percent(ssc_vol_name) + ) else: dedupe_used = 0.0 pool['netapp_dedupe_used_percent'] = na_utils.round_down( @@ -442,8 +458,10 @@ class NetAppBlockStorageCmodeLibrary( def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" - - self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) + if self.configuration.netapp_disaggregated_platform: + self.ssc_library.update_ssc_asa(self._get_cluster_to_pool_map()) + else: + self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that match the pool name search pattern. @@ -474,6 +492,9 @@ class NetAppBlockStorageCmodeLibrary( return pools + def _get_cluster_to_pool_map(self): + return dot_utils.get_cluster_to_pool_map(self.zapi_client) + def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py index 6db3b176a93..7b19e43b4e5 100644 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py @@ -75,6 +75,7 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass): certificate_file = kwargs['certificate_file'] ca_certificate_file = kwargs['ca_certificate_file'] certificate_host_validation = kwargs['certificate_host_validation'] + is_disaggregated = kwargs.get('is_disaggregated', False) if private_key_file and certificate_file and ca_certificate_file: self.connection = netapp_api.RestNaServer( host=host, @@ -119,7 +120,8 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass): # NOTE(nahimsouza): ZAPI Client is needed to implement the fallback # when a REST method is not supported. - self.zapi_client = client_cmode.Client(**kwargs) + if not is_disaggregated: + self.zapi_client = client_cmode.Client(**kwargs) self._init_features() diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest_asar2.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest_asar2.py new file mode 100644 index 00000000000..f6c4c78dcb4 --- /dev/null +++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest_asar2.py @@ -0,0 +1,187 @@ +# Copyright (c) 2025 NetApp, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +NetApp ASA r2 REST client for Data ONTAP. + +This module provides the ASA r2 specific REST client that inherits from +the base REST client and overrides methods to implement ASA r2 specific +workflows when needed. +""" + +from oslo_log import log as logging + +from cinder.i18n import _ +from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest +from cinder.volume.drivers.netapp import utils as netapp_utils +from cinder.volume import volume_utils + +LOG = logging.getLogger(__name__) + + +class RestClientASAr2(client_cmode_rest.RestClient, + metaclass=volume_utils.TraceWrapperMetaclass): + """NetApp ASA r2 REST client for Data ONTAP. + + This client inherits from the base REST client and provides ASA r2 + specific functionality for disaggregated platform workflows. + + By default, all methods from the parent RestClient are called. + Override methods only when ASA r2 specific functionality is required. + The __getattr__ method automatically routes any missing methods to the + parent class, eliminating the need to explicitly define every method. + """ + + def __init__(self, **kwargs): + """Initialize the ASA r2 REST client. + + :param kwargs: Same parameters as the parent RestClient + """ + LOG.info("Initializing NetApp ASA r2 REST client") + super(RestClientASAr2, self).__init__(**kwargs) + self._init_asar2_features() + + def _init_asar2_features(self): + """Initialize ASA r2 specific features. + + This method can be used to set up ASA r2 specific features + and capabilities that are different from the standard ONTAP. + """ + LOG.debug("Initializing ASA r2 specific features") + + # Remove features not supported in ASA r2 by setting them to False + self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', + supported=False) + self.features.add_feature('SYSTEM_METRICS', supported=False) + + # Add ASA r2 specific features here + # For example, you might want to enable specific features + # that are only available in ASA r2 environments + + # Example of adding ASA r2 specific features: + # self.features.add_feature('ASA_R2_SPECIFIC_FEATURE', supported=True) + # self.features.add_feature('ASA_R2_ENHANCED_CLONING', supported=True) + LOG.debug("ASA r2 specific features initialized successfully") + + def __getattr__(self, name): + """Log missing method call and return None.""" + LOG.error("Method '%s' not found in ASA r2 client", name) + return None + + def get_performance_counter_info(self, object_name, counter_name): + """ASA r2 doesn't support performance counter APIs as of now. + + TODO: Performance counter support will be added in upcoming releases. + """ + msg = _('Performance counter APIs are not supported on ASA r2.') + raise netapp_utils.NetAppDriverException(msg) + + def get_performance_instance_uuids(self, object_name, node_name): + """ASA r2 doesn't support performance counter APIs.""" + msg = _('Performance counter APIs are not supported on ASA r2.') + raise netapp_utils.NetAppDriverException(msg) + + def get_performance_counters(self, object_name, instance_uuids, + counter_names): + """ASA r2 doesn't support performance counter APIs.""" + msg = _('Performance counter APIs are not supported on ASA r2.') + raise netapp_utils.NetAppDriverException(msg) + + # ASA r2 does not support ONTAPI, so we raise NotImplementedError + def get_ontapi_version(self, cached=True): + """ASA r2 doesn't support ONTAPI.""" + return (0, 0) + + def get_cluster_info(self): + """Get cluster information for ASA r2.""" + query_args = { + 'fields': 'name,disaggregated', + } + + try: + response = self.send_request('/cluster', + 'get', query=query_args, + enable_tunneling=False) + return response + except Exception as e: + LOG.exception('Failed to get cluster information: %s', e) + return None + + def get_cluster_capacity(self): + """Get cluster capacity information for ASA r2.""" + query = { + 'fields': 'block_storage.size,block_storage.available' + } + + try: + response = self.send_request('/storage/cluster', + 'get', query=query, + enable_tunneling=False) + if not response: + LOG.error('No response received from cluster capacity API') + return {} + + block_storage = response.get('block_storage', {}) + + size_total = block_storage.get('size', 0) + size_available = block_storage.get('available', 0) + + capacity = { + 'size-total': float(size_total), + 'size-available': float(size_available) + } + + LOG.debug('Cluster total size %s:', capacity['size-total']) + LOG.debug('Cluster available size %s:', capacity['size-available']) + + return capacity + + except Exception as e: + LOG.exception('Failed to get cluster capacity: %s', e) + msg = _('Failed to get cluster capacity: %s') + raise netapp_utils.NetAppDriverException(msg % e) + + def get_aggregate_disk_types(self): + """Get storage_types as array from all aggregates.""" + query = { + 'fields': 'name,block_storage.storage_type' + } + + try: + response = self.send_request('/storage/aggregates', + 'get', query=query, + enable_tunneling=False) + if not response or 'records' not in response: + LOG.error('No records received from aggregate API') + return None + + # Collect storage types from all aggregates + storage_types = [] + if response['records']: + for record in response['records']: + storage_type = ( + record.get('block_storage', {}).get('storage_type')) + if storage_type: + storage_types.append(storage_type) + + LOG.debug('Aggregate storage types: %s', storage_types) + return storage_types + + LOG.warning('No aggregate records found') + return None + + except Exception as e: + LOG.exception('Failed to get aggregate storage types: %s', e) + msg = _('Failed to get aggregate storage types: %s') + raise netapp_utils.NetAppDriverException(msg % e) diff --git a/cinder/volume/drivers/netapp/dataontap/nvme_library.py b/cinder/volume/drivers/netapp/dataontap/nvme_library.py index 49f05ad76b0..aa1af35f4ea 100644 --- a/cinder/volume/drivers/netapp/dataontap/nvme_library.py +++ b/cinder/volume/drivers/netapp/dataontap/nvme_library.py @@ -150,7 +150,11 @@ class NetAppNVMeStorageLibrary( def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" - self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) + """Refresh the storage service catalog with the latest set of pools.""" + if self.configuration.netapp_disaggregated_platform: + self.ssc_library.update_ssc_asa(self._get_cluster_to_pool_map()) + else: + self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that match the pool name search pattern. @@ -181,16 +185,25 @@ class NetAppNVMeStorageLibrary( return pools + def _get_cluster_to_pool_map(self): + return dot_utils.get_cluster_to_pool_map(self.client) + def check_for_setup_error(self): """Check that the driver is working and can communicate. Discovers the namespaces on the NetApp server. """ - if not self._get_flexvol_to_pool_map(): + if (not self.configuration.netapp_disaggregated_platform + and not self._get_flexvol_to_pool_map()): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise na_utils.NetAppDriverException(msg) + elif self.configuration.netapp_disaggregated_platform: + if not self._get_cluster_to_pool_map(): + msg = _('No pools are available for provisioning volumes. ' + 'Ensure ASA r2 configuration option is set correctly.') + raise na_utils.NetAppDriverException(msg) self._add_looping_tasks() if self.namespace_ostype not in self.ALLOWED_NAMESPACE_OS_TYPES: @@ -505,7 +518,8 @@ class NetAppNVMeStorageLibrary( # Utilization and performance metrics require cluster-scoped # credentials - if self.using_cluster_credentials: + if (self.using_cluster_credentials + and not self.configuration.netapp_disaggregated_platform): # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) @@ -534,8 +548,11 @@ class NetAppNVMeStorageLibrary( self.max_over_subscription_ratio) # Add up-to-date capacity info - capacity = self.client.get_flexvol_capacity( - flexvol_name=ssc_vol_name) + if self.configuration.netapp_disaggregated_platform: + capacity = self.client.get_cluster_capacity() + else: + capacity = self.client.get_flexvol_capacity( + flexvol_name=ssc_vol_name) size_total_gb = capacity['size-total'] / units.Gi pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) @@ -557,7 +574,8 @@ class NetAppNVMeStorageLibrary( pool['provisioned_capacity_gb'] = na_utils.round_down( float(provisioned_cap) / units.Gi) - if self.using_cluster_credentials: + if (self.using_cluster_credentials and + not self.configuration.netapp_disaggregated_platform): dedupe_used = self.client.get_flexvol_dedupe_used_percent( ssc_vol_name) else: diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py index 00384327729..aa2b1359c4a 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py @@ -119,6 +119,50 @@ class CapabilitiesLibrary(object): self.ssc = ssc + def update_ssc_asa(self, cluster_map): + """Periodically runs to update Storage Service Catalog data. + + The self.ssc attribute is updated with the following format. + { : {'pool_name': }} + """ + ssc = {} + + for cluster_name, cluster_info in cluster_map.items(): + + ssc_cluster = {} + + # Add metadata passed from the driver, including pool name + ssc_cluster.update(cluster_info) + # Add ASA r2 default cluster attributes + ssc_cluster.update({ + 'netapp_thin_provisioned': True, + 'thick_provisioning_support': False, + 'thin_provisioning_support': True, + 'netapp_aggregate': None, + 'netapp_is_flexgroup': False, + 'netapp_dedup': True, + 'netapp_compression': True, + 'netapp_mirrored': False, + 'netapp_flexvol_encryption': False, + }) + + # ASA r2 is disaggregated aggregate info is not available + disk_types = self.zapi_client.get_aggregate_disk_types() + aggr_disk_info = { + 'netapp_raid_type': None, + 'netapp_hybrid_aggregate': None, + 'netapp_disk_type': disk_types, + 'netapp_node_name': None + } + ssc_cluster.update(aggr_disk_info) + + # ASA r2 need min QoS support for all nodes + ssc_cluster['netapp_qos_min_support'] = str('true').lower() + + ssc[cluster_name] = ssc_cluster + LOG.debug("Storage Service Catalog: %s", ssc) + self.ssc = ssc + def _update_for_failover(self, zapi_client, flexvol_map): self.zapi_client = zapi_client diff --git a/cinder/volume/drivers/netapp/dataontap/utils/utils.py b/cinder/volume/drivers/netapp/dataontap/utils/utils.py index 01ab72bc7fc..1204cfbe782 100644 --- a/cinder/volume/drivers/netapp/dataontap/utils/utils.py +++ b/cinder/volume/drivers/netapp/dataontap/utils/utils.py @@ -20,6 +20,7 @@ import json import socket from oslo_config import cfg +from oslo_log import log as logging from cinder import exception from cinder.i18n import _ @@ -27,10 +28,13 @@ from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest +from cinder.volume.drivers.netapp.dataontap.client \ + import client_cmode_rest_asar2 from cinder.volume.drivers.netapp import options as na_opts from cinder.volume import volume_utils CONF = cfg.CONF +LOG = logging.getLogger(__name__) def get_backend_configuration(backend_name): @@ -67,6 +71,16 @@ def get_client_for_backend(backend_name, vserver_name=None, force_rest=False): """Get a cDOT API client for a specific backend.""" config = get_backend_configuration(backend_name) + + # Determine if disaggregated platform should be used + # Parameter takes precedence over config setting + is_disaggregated = config.netapp_disaggregated_platform + + # ZAPI clients are not supported for ASAr2 platform. + # We are forcing the client to be REST client for ASAr2. + if is_disaggregated: + force_rest = True + if config.netapp_use_legacy_client and not force_rest: client = client_cmode.Client( transport_type=config.netapp_transport_type, @@ -83,22 +97,42 @@ def get_client_for_backend(backend_name, vserver_name=None, force_rest=False): trace=volume_utils.TRACE_API, api_trace_pattern=config.netapp_api_trace_pattern) else: - client = client_cmode_rest.RestClient( - transport_type=config.netapp_transport_type, - ssl_cert_path=config.netapp_ssl_cert_path, - username=config.netapp_login, - password=config.netapp_password, - hostname=config.netapp_server_hostname, - private_key_file=config.netapp_private_key_file, - certificate_file=config.netapp_certificate_file, - ca_certificate_file=config.netapp_ca_certificate_file, - certificate_host_validation= - config.netapp_certificate_host_validation, - port=config.netapp_server_port, - vserver=vserver_name or config.netapp_vserver, - trace=volume_utils.TRACE_API, - api_trace_pattern=config.netapp_api_trace_pattern, - async_rest_timeout=config.netapp_async_rest_timeout) + # Check if ASA r2 disaggregated platform is enabled + if is_disaggregated: + client = client_cmode_rest_asar2.RestClientASAr2( + transport_type=config.netapp_transport_type, + ssl_cert_path=config.netapp_ssl_cert_path, + username=config.netapp_login, + password=config.netapp_password, + hostname=config.netapp_server_hostname, + private_key_file=config.netapp_private_key_file, + certificate_file=config.netapp_certificate_file, + ca_certificate_file=config.netapp_ca_certificate_file, + certificate_host_validation= + config.netapp_certificate_host_validation, + port=config.netapp_server_port, + vserver=vserver_name or config.netapp_vserver, + trace=volume_utils.TRACE_API, + api_trace_pattern=config.netapp_api_trace_pattern, + async_rest_timeout=config.netapp_async_rest_timeout, + is_disaggregated=is_disaggregated) + else: + client = client_cmode_rest.RestClient( + transport_type=config.netapp_transport_type, + ssl_cert_path=config.netapp_ssl_cert_path, + username=config.netapp_login, + password=config.netapp_password, + hostname=config.netapp_server_hostname, + private_key_file=config.netapp_private_key_file, + certificate_file=config.netapp_certificate_file, + ca_certificate_file=config.netapp_ca_certificate_file, + certificate_host_validation= + config.netapp_certificate_host_validation, + port=config.netapp_server_port, + vserver=vserver_name or config.netapp_vserver, + trace=volume_utils.TRACE_API, + api_trace_pattern=config.netapp_api_trace_pattern, + async_rest_timeout=config.netapp_async_rest_timeout) return client @@ -139,3 +173,39 @@ def build_ems_log_message_1(driver_name, app_version, vserver, ems_log['event-id'] = '1' ems_log['event-description'] = json.dumps(message) return ems_log + + +def get_cluster_to_pool_map(client): + """Get the cluster name for ASA r2 systems. + + For ASA r2 systems, instead of using flexvols, we use the cluster name + as the pool. The map is of the format suitable for seeding the storage + service catalog: { : {'pool_name': }} + + :param client: NetApp client instance to retrieve cluster information + :returns: Dictionary mapping cluster names to pool information + :raises: InvalidConfigurationValue if cluster is not disaggregated + """ + pools = {} + + cluster_info = client.get_cluster_info() + + # Check if cluster info is missing or cluster is not disaggregated (ASA r2) + if not cluster_info.get('disaggregated', False): + LOG.error("Cluster is not a disaggregated (ASA r2) platform. ") + raise exception.InvalidConfigurationValue( + option='disaggregated', + value=cluster_info.get('disaggregated', None) + ) + + cluster_name = cluster_info['name'] + LOG.debug("Found ASA r2 cluster: %s", cluster_name) + pools[cluster_name] = {'pool_name': cluster_name} + + msg_args = { + 'cluster': cluster_name, + } + msg = "ASA r2 cluster '%(cluster)s' added as pool" + LOG.debug(msg, msg_args) + + return pools diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py index ca3959a85fa..d9489185635 100644 --- a/cinder/volume/drivers/netapp/options.py +++ b/cinder/volume/drivers/netapp/options.py @@ -193,7 +193,14 @@ netapp_cluster_opts = [ cfg.StrOpt('netapp_vserver', help=('This option specifies the virtual storage server ' '(Vserver) name on the storage cluster on which ' - 'provisioning of block storage volumes should occur.')), ] + 'provisioning of block storage volumes should occur.')), + cfg.BoolOpt('netapp_disaggregated_platform', + default=False, + help=('This option specifies whether to enable ASA r2 ' + 'workflows for NetApp disaggregated platform. ' + 'When set to True, the driver will use ASA r2 ' + 'specific client and workflows for interacting ' + 'with NetApp ONTAP.')), ] netapp_img_cache_opts = [ cfg.IntOpt('netapp_nfs_image_cache_cleanup_interval', diff --git a/releasenotes/notes/netapp-asar2-disaggregated-platform-support-a1b2c3d4e5f6g7h8.yaml b/releasenotes/notes/netapp-asar2-disaggregated-platform-support-a1b2c3d4e5f6g7h8.yaml new file mode 100644 index 00000000000..3dc88303d88 --- /dev/null +++ b/releasenotes/notes/netapp-asar2-disaggregated-platform-support-a1b2c3d4e5f6g7h8.yaml @@ -0,0 +1,46 @@ +--- +features: + - | + Added support for NetApp ASA r2 (All-Flash SAN Array r2) disaggregated + platform in the NetApp unified driver. This introduces a new configuration + option ``netapp_disaggregated_platform`` that enables ASA r2 specific + workflows and optimizations. + + The implementation includes: + + * New boolean configuration option ``netapp_disaggregated_platform`` + (default: False) to enable ASA r2 workflows + * New ``RestClientASAr2`` class that inherits from the standard REST client + * Override capability for ASA r2 specific functionality when needed + * Full backward compatibility with existing NetApp ONTAP configurations + + To enable ASA r2 support, set the following in your cinder configuration: + + .. code-block:: ini + + [backend_netapp_asar2] + volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver + netapp_storage_family = ontap_cluster + netapp_storage_protocol = iscsi + netapp_use_legacy_client = False + netapp_disaggregated_platform = True + # ... other NetApp configuration options + + When ``netapp_disaggregated_platform`` is set to ``True``, the driver will: + + * Apply ASA r2 specific optimizations and workflows + * Maintain full compatibility with existing volume operations + * Automatically fall back to standard ONTAP behavior when ASA r2 specific + methods are not available + + The ASA r2 client inherits all functionality from the standard REST client + by default, with the ability to override individual methods for ASA r2 + specific behavior. This design ensures that: + + * No existing functionality is lost + * New ASA r2 features will be added incrementally + * ASAr2 does not support ZAPIs. Hence all the APIs are accessed using REST. + + This feature enables users to take advantage of NetApp's disaggregated + architecture and ASA r2 specific performance optimizations while + maintaining a familiar operational experience.