Browse Source

NetApp ONTAP: Add support for QoS minimums specs

Currently, the ONTAP Cinder driver only supports the max (ceiling)
throughput QoS specs.

This patch adds support for min (floor) throughput QoS policy specs
``minIOPS`` and ``minIOPSperGiB``, which can be set individually or
along with the max throughtput specs.

Added a new driver specific capability called `netapp_qos_min_support`.
It is used to filter the pools that has support to the Qos minimum
(floor) specs during the scheduler phase.

The feature is supported by ONTAP AFF with version equal or greater
than 9.2 for iSCSI/FCP and 9.3 for NFS, ONTAP Select Premium with
SSD and ONTAP C190 with version equal or greater than 9.6.

Implements: blueprint netapp-ontap-min-throughput-qos
Implements: blueprint netapp-ontap-min-throughput-qos-capability

Co-Authored-By: Felipe Rodrigues <felipen@netapp.com>

Change-Id: Ic6579d459670fec4e5295e51c12fd807d980bb81
changes/49/770649/7
Lucio Seki 11 months ago
committed by Felipe Rodrigues
parent
commit
fb358e45fe
  1. 21
      cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py
  2. 191
      cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py
  3. 106
      cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py
  4. 32
      cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py
  5. 20
      cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
  6. 13
      cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py
  7. 37
      cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py
  8. 4
      cinder/tests/unit/volume/drivers/netapp/fakes.py
  9. 90
      cinder/tests/unit/volume/drivers/netapp/test_utils.py
  10. 5
      cinder/volume/drivers/netapp/dataontap/block_cmode.py
  11. 129
      cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
  12. 5
      cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
  13. 25
      cinder/volume/drivers/netapp/dataontap/utils/capabilities.py
  14. 127
      cinder/volume/drivers/netapp/utils.py
  15. 13
      releasenotes/notes/bp-netapp-ontap-min-throughput-qos-cd3812df5c7da8fd.yaml

21
cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py

@ -618,17 +618,25 @@ AGGR_GET_ITER_SSC_RESPONSE = etree.XML("""
<raid-type>%(raid)s</raid-type>
<is-hybrid>true</is-hybrid>
</aggr-raid-attributes>
<aggr-ownership-attributes>
<home-name>%(node)s</home-name>
</aggr-ownership-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE})
""" % {
'aggr': VOLUME_AGGREGATE_NAME,
'raid': AGGREGATE_RAID_TYPE,
'node': NODE_NAME,
})
AGGR_INFO_SSC = {
'name': VOLUME_AGGREGATE_NAME,
'raid-type': AGGREGATE_RAID_TYPE,
'is-hybrid': True,
'node-name': NODE_NAME,
}
AGGR_SIZE_TOTAL = 107374182400
@ -1309,14 +1317,3 @@ VSERVER_DATA_LIST_RESPONSE = etree.XML("""
<num-records>1</num-records>
</results>
""" % {'vserver': VSERVER_NAME})
SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node>%s</node>
</node-details-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % NODE_NAME)

191
cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py

@ -53,6 +53,12 @@ class NetAppCmodeClientTestCase(test.TestCase):
super(NetAppCmodeClientTestCase, self).setUp()
self.mock_object(client_cmode.Client, '_init_ssh_client')
# store the original reference so we can call it later in
# test__get_cluster_nodes_info
self.original_get_cluster_nodes_info = (
client_cmode.Client._get_cluster_nodes_info)
self.mock_object(client_cmode.Client, '_get_cluster_nodes_info',
return_value=fake.HYBRID_SYSTEM_NODES_INFO)
self.mock_object(client_cmode.Client, 'get_ontap_version',
return_value='9.6')
with mock.patch.object(client_cmode.Client,
@ -216,6 +222,25 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.client.send_iter_request,
'storage-disk-get-iter')
@ddt.data((fake.AFF_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.AFF_SYSTEM_NODES_INFO),
(fake.FAS_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.FAS_SYSTEM_NODES_INFO),
(fake_client.NO_RECORDS_RESPONSE, []),
(fake.HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.HYBRID_SYSTEM_NODES_INFO))
@ddt.unpack
def test__get_cluster_nodes_info(self, response, expected):
client_cmode.Client._get_cluster_nodes_info = (
self.original_get_cluster_nodes_info)
nodes_response = netapp_api.NaElement(response)
self.mock_object(client_cmode.Client, 'send_iter_request',
return_value=nodes_response)
result = self.client._get_cluster_nodes_info()
self.assertEqual(expected, result)
def test_list_vservers(self):
api_response = netapp_api.NaElement(
@ -829,44 +854,114 @@ class NetAppCmodeClientTestCase(test.TestCase):
def test_provision_qos_policy_group_no_qos_policy_group_info(self):
self.client.provision_qos_policy_group(qos_policy_group_info=None)
self.client.provision_qos_policy_group(qos_policy_group_info=None,
qos_min_support=True)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_legacy_qos_policy_group_info(self):
self.client.provision_qos_policy_group(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY,
qos_min_support=True)
self.assertEqual(0, self.connection.qos_policy_group_create.call_count)
def test_provision_qos_policy_group_with_qos_spec_create_with_min(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=False)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO,
True)
mock_qos_policy_group_create.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': fake.MIN_IOPS,
'max_throughput': fake.MAX_IOPS,
})])
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create_unsupported(self):
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client.provision_qos_policy_group,
fake.QOS_POLICY_GROUP_INFO, False)
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=False)
self.mock_object(self.client, 'qos_policy_group_create')
self.mock_object(self.client, 'qos_policy_group_modify')
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX,
True)
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
mock_qos_policy_group_create.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'max_throughput': fake.MAX_THROUGHPUT,
})])
mock_qos_policy_group_modify.assert_not_called()
self.client.qos_policy_group_create.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
self.assertFalse(self.client.qos_policy_group_modify.called)
def test_provision_qos_policy_group_with_qos_spec_modify_with_min(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=True)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO,
True)
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': fake.MIN_IOPS,
'max_throughput': fake.MAX_IOPS,
})])
def test_provision_qos_policy_group_with_qos_spec_modify(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=True)
self.mock_object(self.client, 'qos_policy_group_create')
self.mock_object(self.client, 'qos_policy_group_modify')
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO)
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX,
True)
self.assertFalse(self.client.qos_policy_group_create.called)
self.client.qos_policy_group_modify.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)])
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'max_throughput': fake.MAX_THROUGHPUT,
})])
def test_qos_policy_group_exists(self):
@ -906,12 +1001,16 @@ class NetAppCmodeClientTestCase(test.TestCase):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'min-throughput': '0',
'max-throughput': fake.MAX_THROUGHPUT,
'vserver': self.vserver,
}
self.client.qos_policy_group_create(
fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
self.client.qos_policy_group_create({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': '0',
'max_throughput': fake.MAX_THROUGHPUT,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-create', api_args, False)])
@ -920,11 +1019,15 @@ class NetAppCmodeClientTestCase(test.TestCase):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'min-throughput': '0',
'max-throughput': fake.MAX_THROUGHPUT,
}
self.client.qos_policy_group_modify(
fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)
self.client.qos_policy_group_modify({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': '0',
'max_throughput': fake.MAX_THROUGHPUT,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-modify', api_args, False)])
@ -988,7 +1091,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
@ -1005,7 +1108,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO)
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)])
@ -2187,17 +2290,20 @@ class NetAppCmodeClientTestCase(test.TestCase):
'raid-type': None,
'is-hybrid': None,
},
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
expected = {
'name': fake_client.VOLUME_AGGREGATE_NAME,
'raid-type': 'raid_dp',
'is-hybrid': True,
'node-name': fake_client.NODE_NAME,
}
self.assertEqual(expected, result)
@ -2233,29 +2339,6 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertEqual({}, result)
def test_list_cluster_nodes(self):
api_response = netapp_api.NaElement(
fake_client.SYSTEM_NODE_GET_ITER_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_cluster_nodes()
self.assertListEqual([fake_client.NODE_NAME], result)
def test_list_cluster_nodes_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_cluster_nodes()
self.assertListEqual([], result)
@ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
@ddt.unpack
@ -3591,3 +3674,23 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.client.connection.send_request.assert_called_once_with(
'snapshot-get-iter', api_args)
self.assertListEqual(expected, result)
@ddt.data(True, False)
def test_is_qos_min_supported(self, supported):
self.client.features.add_feature('test', supported=supported)
mock_name = self.mock_object(netapp_utils,
'qos_min_feature_name',
return_value='test')
result = self.client.is_qos_min_supported(True, 'node')
mock_name.assert_called_once_with(True, 'node')
self.assertEqual(result, supported)
def test_is_qos_min_supported_invalid_node(self):
mock_name = self.mock_object(netapp_utils,
'qos_min_feature_name',
return_value='invalid_feature')
result = self.client.is_qos_min_supported(True, 'node')
mock_name.assert_called_once_with(True, 'node')
self.assertFalse(result)

106
cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py

@ -260,6 +260,8 @@ IGROUP1 = {'initiator-group-os-type': 'linux',
QOS_SPECS = {}
EXTRA_SPECS = {}
MAX_THROUGHPUT = '21734278B/s'
MIN_IOPS = '256IOPS'
MAX_IOPS = '512IOPS'
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_POLICY_GROUP_INFO_LEGACY = {
@ -268,11 +270,18 @@ QOS_POLICY_GROUP_INFO_LEGACY = {
}
QOS_POLICY_GROUP_SPEC = {
'min_throughput': MIN_IOPS,
'max_throughput': MAX_IOPS,
'policy_name': QOS_POLICY_GROUP_NAME,
}
QOS_POLICY_GROUP_SPEC_MAX = {
'max_throughput': MAX_THROUGHPUT,
'policy_name': QOS_POLICY_GROUP_NAME,
}
QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC}
QOS_POLICY_GROUP_INFO_MAX = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_MAX}
CLONE_SOURCE_NAME = 'fake_clone_source_name'
CLONE_SOURCE_ID = 'fake_clone_source_id'
@ -421,6 +430,103 @@ CG_VOLUME_SNAPSHOT = {
'volume_id': CG_VOLUME_ID,
}
AFF_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node-model>AFFA400</node-model>
<node>aff-node1</node>
<is-all-flash-optimized>true</is-all-flash-optimized>
<is-all-flash-select-optimized>false</is-all-flash-select-optimized>
</node-details-info>
<node-details-info>
<node-model>AFFA400</node-model>
<node>aff-node2</node>
<is-all-flash-optimized>true</is-all-flash-optimized>
<is-all-flash-select-optimized>false</is-all-flash-select-optimized>
</node-details-info>
</attributes-list>
<num-records>2</num-records>
</results>
""")
FAS_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node-model>FAS2554</node-model>
<node>fas-node1</node>
<is-all-flash-optimized>false</is-all-flash-optimized>
<is-all-flash-select-optimized>false</is-all-flash-select-optimized>
</node-details-info>
<node-details-info>
<node-model>FAS2554</node-model>
<node>fas-node2</node>
<is-all-flash-optimized>false</is-all-flash-optimized>
<is-all-flash-select-optimized>false</is-all-flash-select-optimized>
</node-details-info>
</attributes-list>
<num-records>2</num-records>
</results>
""")
HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node>select-node</node>
<is-all-flash-optimized>false</is-all-flash-optimized>
<is-all-flash-select-optimized>true</is-all-flash-select-optimized>
<node-model>FDvM300</node-model>
</node-details-info>
<node-details-info>
<node>c190-node</node>
<is-all-flash-optimized>true</is-all-flash-optimized>
<is-all-flash-select-optimized>false</is-all-flash-select-optimized>
<node-model>AFF-C190</node-model>
</node-details-info>
</attributes-list>
<num-records>2</num-records>
</results>
""")
AFF_NODE = {
'model': 'AFFA400',
'is_all_flash': True,
'is_all_flash_select': False,
}
AFF_NODE_1 = AFF_NODE.copy()
AFF_NODE_1['name'] = 'aff-node1'
AFF_NODE_2 = AFF_NODE.copy()
AFF_NODE_2['name'] = 'aff-node2'
FAS_NODE = {
'model': 'FAS2554',
'is_all_flash': False,
'is_all_flash_select': False,
}
FAS_NODE_1 = FAS_NODE.copy()
FAS_NODE_1['name'] = 'fas-node1'
FAS_NODE_2 = FAS_NODE.copy()
FAS_NODE_2['name'] = 'fas-node2'
SELECT_NODE = {
'model': 'FDvM300',
'is_all_flash': False,
'is_all_flash_select': True,
'name': 'select-node',
}
C190_NODE = {
'model': 'AFF-C190',
'is_all_flash': True,
'is_all_flash_select': False,
'name': 'c190-node',
}
AFF_SYSTEM_NODES_INFO = [AFF_NODE_1, AFF_NODE_2]
FAS_SYSTEM_NODES_INFO = [FAS_NODE_1, FAS_NODE_2]
HYBRID_SYSTEM_NODES_INFO = [SELECT_NODE, C190_NODE]
SYSTEM_GET_VERSION_RESPONSE = etree.XML("""
<results status="passed">
<build-timestamp>1395426307</build-timestamp>

32
cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py

@ -31,6 +31,7 @@ from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
@ -82,6 +83,11 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
config.netapp_api_trace_pattern = 'fake_regex'
return config
@ddt.data(fake.AFF_SYSTEM_NODES_INFO,
fake.FAS_SYSTEM_NODES_INFO,
fake.HYBRID_SYSTEM_NODES_INFO)
@mock.patch.object(client_base.Client, 'get_ontap_version',
return_value='9.6')
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@ -91,11 +97,14 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
'check_api_permissions')
@mock.patch.object(na_utils, 'check_flags')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
@mock.patch.object(client_base.Client, 'get_ontap_version',
mock.MagicMock(return_value='9.6'))
def test_do_setup(self, super_do_setup, mock_check_flags,
mock_check_api_permissions, mock_cluster_user_supported):
def test_do_setup(self, cluster_nodes_info,
super_do_setup, mock_check_flags,
mock_check_api_permissions, mock_cluster_user_supported,
mock_get_ontap_version):
self.mock_object(client_base.Client, '_init_ssh_client')
mock_get_cluster_nodes_info = self.mock_object(
client_cmode.Client, '_get_cluster_nodes_info',
return_value=cluster_nodes_info)
self.mock_object(
dot_utils, 'get_backend_configuration',
return_value=self.get_config_cmode())
@ -107,6 +116,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual(1, mock_check_flags.call_count)
mock_check_api_permissions.assert_called_once_with()
mock_cluster_user_supported.assert_called_once_with()
mock_get_ontap_version.assert_called_once_with(cached=False)
mock_get_cluster_nodes_info.assert_called_once_with()
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
@ -559,13 +570,21 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
return_value=fake.QOS_POLICY_GROUP_INFO)
self.mock_object(self.zapi_client, 'provision_qos_policy_group')
mock_is_qos_min_supported = self.mock_object(self.library.ssc_library,
'is_qos_min_supported',
return_value=True)
mock_extract_host = self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result)
self.zapi_client.provision_qos_policy_group.\
assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
assert_called_once_with(fake.QOS_POLICY_GROUP_INFO, True)
mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME)
mock_extract_host.assert_called_once_with(fake.VOLUME['host'],
level='pool')
def test_setup_qos_for_volume_exception_path(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
@ -653,6 +672,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._setup_qos_for_volume = mock.Mock()
self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
return_value=None)
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()

20
cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py

@ -627,15 +627,23 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
mock_is_qos_min_supported = self.mock_object(self.driver.ssc_library,
'is_qos_min_supported',
return_value=True)
mock_extract_host = self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS)
mock_get_info.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
mock_provision_qos.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
mock.call(fake.QOS_POLICY_GROUP_INFO, True)])
mock_set_policy.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False)])
mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME)
mock_extract_host.assert_called_once_with(fake.NFS_VOLUME['host'],
level='pool')
self.assertEqual(0, mock_error_log.call_count)
self.assertEqual(0, mock_debug_log.call_count)
self.assertEqual(0, mock_cleanup.call_count)
@ -652,6 +660,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
mock_is_qos_min_supported = self.mock_object(self.driver.ssc_library,
'is_qos_min_supported',
return_value=True)
mock_extract_host = self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
self.assertRaises(netapp_api.NaApiError,
self.driver._do_qos_for_volume,
@ -661,9 +674,12 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_info.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
mock_provision_qos.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
mock.call(fake.QOS_POLICY_GROUP_INFO, True)])
mock_set_policy.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False)])
mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME)
mock_extract_host.assert_called_once_with(fake.NFS_VOLUME['host'],
level='pool')
self.assertEqual(1, mock_error_log.call_count)
self.assertEqual(1, mock_debug_log.call_count)
mock_cleanup.assert_has_calls([

13
cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py

@ -42,6 +42,7 @@ SSC = {
'netapp_disk_type': ['SSD'],
'netapp_hybrid_aggregate': 'false',
'netapp_flexvol_encryption': 'true',
'netapp_qos_min_support': 'true',
'pool_name': 'volume1',
},
'volume2': {
@ -56,6 +57,7 @@ SSC = {
'netapp_disk_type': ['FCAL', 'SSD'],
'netapp_hybrid_aggregate': 'true',
'netapp_flexvol_encryption': 'false',
'netapp_qos_min_support': 'false',
'pool_name': 'volume2',
},
}
@ -95,6 +97,15 @@ SSC_ENCRYPTION_INFO = {
},
}
SSC_QOS_MIN_INFO = {
'volume1': {
'netapp_qos_min_support': 'true',
},
'volume2': {
'netapp_qos_min_support': 'false',
},
}
SSC_MIRROR_INFO = {
'volume1': {
'netapp_mirrored': 'false',
@ -109,11 +120,13 @@ SSC_AGGREGATE_INFO = {
'netapp_disk_type': ['SSD'],
'netapp_raid_type': 'raid_dp',
'netapp_hybrid_aggregate': 'false',
'netapp_node_name': 'node1',
},
'volume2': {
'netapp_disk_type': ['FCAL', 'SSD'],
'netapp_raid_type': 'raid_dp',
'netapp_hybrid_aggregate': 'true',
'netapp_node_name': 'node2',
},
}

37
cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py

@ -117,6 +117,18 @@ class CapabilitiesLibraryTestCase(test.TestCase):
six.assertCountEqual(self, list(fake.SSC_AGGREGATES), result)
def test_is_qos_min_supported(self):
ssc_pool = fake.SSC.get(fake.SSC_VOLUMES[0])
is_qos_min = ssc_pool['netapp_qos_min_support'] == 'true'
result = self.ssc_library.is_qos_min_supported(ssc_pool['pool_name'])
self.assertEqual(is_qos_min, result)
def test_is_qos_min_supported_not_found(self):
result = self.ssc_library.is_qos_min_supported('invalid_pool')
self.assertFalse(result)
def test_update_ssc(self):
mock_get_ssc_flexvol_info = self.mock_object(
@ -139,6 +151,11 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.ssc_library, '_get_ssc_encryption_info',
side_effect=[fake.SSC_ENCRYPTION_INFO['volume1'],
fake.SSC_ENCRYPTION_INFO['volume2']])
mock_get_ssc_qos_min_info = self.mock_object(
self.ssc_library, '_get_ssc_qos_min_info',
side_effect=[fake.SSC_QOS_MIN_INFO['volume1'],
fake.SSC_QOS_MIN_INFO['volume2']])
ordered_ssc = collections.OrderedDict()
ordered_ssc['volume1'] = fake.SSC_VOLUME_MAP['volume1']
ordered_ssc['volume2'] = fake.SSC_VOLUME_MAP['volume2']
@ -157,6 +174,8 @@ class CapabilitiesLibraryTestCase(test.TestCase):
mock.call('aggr1'), mock.call('aggr2')])
mock_get_ssc_encryption_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_qos_min_info.assert_has_calls([
mock.call('node1'), mock.call('node2')])
def test__update_for_failover(self):
self.mock_object(self.ssc_library, 'update_ssc')
@ -346,6 +365,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'netapp_disk_type': None,
'netapp_raid_type': None,
'netapp_hybrid_aggregate': None,
'netapp_node_name': None,
}
self.zapi_client.get_aggregate.assert_not_called()
self.zapi_client.get_aggregate_disk_types.assert_not_called()
@ -354,6 +374,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES,
'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE,
'netapp_hybrid_aggregate': 'true',
'netapp_node_name': fake_client.NODE_NAME,
}
self.zapi_client.get_aggregate.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
@ -377,6 +398,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'netapp_disk_type': None,
'netapp_raid_type': None,
'netapp_hybrid_aggregate': None,
'netapp_node_name': None,
}
self.assertEqual(expected, result)
@ -506,3 +528,18 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.assertFalse(self.ssc_library.cluster_user_supported())
else:
self.assertTrue(self.ssc_library.cluster_user_supported())
def test_get_ssc_qos_min_info(self):
self.mock_object(
self.ssc_library.zapi_client, 'is_qos_min_supported',
return_value=True)
result = self.ssc_library._get_ssc_qos_min_info('node')
expected = {
'netapp_qos_min_support': 'true',
}
self.assertEqual(expected, result)
self.zapi_client.is_qos_min_supported.assert_called_once_with(False,
'node')

4
cinder/tests/unit/volume/drivers/netapp/fakes.py

@ -94,7 +94,7 @@ QOS_SPECS = {}
EXTRA_SPECS = {}
MAX_THROUGHPUT = '21734278B/s'
MAX_THROUGHPUT_BPS = '21734278B/s'
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME}
@ -103,7 +103,7 @@ LEGACY_QOS = {
}
QOS_POLICY_GROUP_SPEC = {
'max_throughput': MAX_THROUGHPUT,
'max_throughput': MAX_THROUGHPUT_BPS,
'policy_name': 'openstack-%s' % VOLUME_ID,
}

90
cinder/tests/unit/volume/drivers/netapp/test_utils.py

@ -245,25 +245,46 @@ class NetAppDriverUtilsTestCase(test.TestCase):
na_utils.validate_qos_spec(qos_spec)
def test_validate_qos_spec_keys_weirdly_cased(self):
qos_spec = {'mAxIopS': 33000}
qos_spec = {'mAxIopS': 33000, 'mInIopS': 0}
# Just return without raising an exception.
na_utils.validate_qos_spec(qos_spec)
def test_validate_qos_spec_bad_key(self):
def test_validate_qos_spec_bad_key_max_flops(self):
qos_spec = {'maxFlops': 33000}
self.assertRaises(exception.Invalid,
na_utils.validate_qos_spec,
qos_spec)
def test_validate_qos_spec_bad_key_combination(self):
def test_validate_qos_spec_bad_key_min_bps(self):
qos_spec = {'minBps': 33000}
self.assertRaises(exception.Invalid,
na_utils.validate_qos_spec,
qos_spec)
def test_validate_qos_spec_bad_key_min_bps_per_gib(self):
qos_spec = {'minBPSperGiB': 33000}
self.assertRaises(exception.Invalid,
na_utils.validate_qos_spec,
qos_spec)
def test_validate_qos_spec_bad_key_combination_max_iops_max_bps(self):
qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000}
self.assertRaises(exception.Invalid,
na_utils.validate_qos_spec,
qos_spec)
def test_validate_qos_spec_bad_key_combination_miniops_miniopspergib(self):
qos_spec = {'minIOPS': 33000, 'minIOPSperGiB': 10000000}
self.assertRaises(exception.Invalid,
na_utils.validate_qos_spec,
qos_spec)
def test_map_qos_spec_none(self):
qos_spec = None
@ -271,6 +292,30 @@ class NetAppDriverUtilsTestCase(test.TestCase):
self.assertIsNone(result)
def test_map_qos_spec_bad_key_combination_miniops_maxbpspergib(self):
qos_spec = {'minIOPS': 33000, 'maxBPSperGiB': 10000000}
self.assertRaises(exception.Invalid,
na_utils.map_qos_spec,
qos_spec,
fake.VOLUME)
def test_map_qos_spec_bad_key_combination_min_iops_max_bps(self):
qos_spec = {'minIOPS': 33000, 'maxBPS': 10000000}
self.assertRaises(exception.Invalid,
na_utils.map_qos_spec,
qos_spec,
fake.VOLUME)
def test_map_qos_spec_miniops_greater_than_maxiops(self):
qos_spec = {'minIOPS': 33001, 'maxIOPS': 33000}
self.assertRaises(exception.Invalid,
na_utils.map_qos_spec,
qos_spec,
fake.VOLUME)
def test_map_qos_spec_maxiops(self):
qos_spec = {'maxIOPs': 33000}
mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
@ -297,6 +342,20 @@ class NetAppDriverUtilsTestCase(test.TestCase):
self.assertEqual(expected, result)
def test_map_qos_spec_miniopspergib_maxiopspergib(self):
qos_spec = {'minIOPSperGiB': 1000, 'maxIOPSperGiB': 1000}
mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
mock_get_name.return_value = 'fake_qos_policy'
expected = {
'policy_name': 'fake_qos_policy',
'min_throughput': '42000iops',
'max_throughput': '42000iops',
}
result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
self.assertEqual(expected, result)
def test_map_qos_spec_maxbps(self):
qos_spec = {'maxBPS': 1000000}
mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
@ -329,7 +388,20 @@ class NetAppDriverUtilsTestCase(test.TestCase):
mock_get_name.return_value = 'fake_qos_policy'
expected = {
'policy_name': 'fake_qos_policy',
'max_throughput': None,
}
result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
self.assertEqual(expected, result)
def test_map_qos_spec_miniops_maxiops(self):
qos_spec = {'minIOPs': 25000, 'maxIOPs': 33000}
mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name')
mock_get_name.return_value = 'fake_qos_policy'
expected = {
'policy_name': 'fake_qos_policy',
'min_throughput': '25000iops',
'max_throughput': '33000iops',
}
result = na_utils.map_qos_spec(qos_spec, fake.VOLUME)
@ -586,6 +658,16 @@ class NetAppDriverUtilsTestCase(test.TestCase):
na_utils.get_export_host_junction_path,
share)
@ddt.data(True, False)
def test_qos_min_feature_name(self, is_nfs):
name = 'node'
feature_name = na_utils.qos_min_feature_name(is_nfs, name)
if is_nfs:
self.assertEqual('QOS_MIN_NFS_' + name, feature_name)
else:
self.assertEqual('QOS_MIN_BLOCK_' + name, feature_name)
class OpenStackInfoTestCase(test.TestCase):

5
cinder/volume/drivers/netapp/dataontap/block_cmode.py

@ -402,7 +402,10 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
msg = _('Invalid QoS specification detected while getting QoS '
'policy for volume %s') % volume['id']
raise exception.VolumeBackendAPIException(data=msg)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
pool = volume_utils.extract_host(volume['host'], level='pool')
qos_min_support = self.ssc_library.is_qos_min_supported(pool)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info,
qos_min_support)
return qos_policy_group_info
def _get_volume_model_update(self, volume):

129
cinder/volume/drivers/netapp/dataontap/client/client_cmode.py

@ -34,6 +34,8 @@ from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
DEFAULT_MAX_PAGE_LENGTH = 50
ONTAP_SELECT_MODEL = 'FDvM300'
ONTAP_C190 = 'C190'
@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
@ -62,6 +64,26 @@ class Client(client_base.Client):
ontapi_1_30 = ontapi_version >= (1, 30)
ontapi_1_100 = ontapi_version >= (1, 100)
ontapi_1_1xx = (1, 100) <= ontapi_version < (1, 200)
ontapi_1_60 = ontapi_version >= (1, 160)
nodes_info = self._get_cluster_nodes_info()
for node in nodes_info:
qos_min_block = False
qos_min_nfs = False
if node['model'] == ONTAP_SELECT_MODEL:
qos_min_block = node['is_all_flash_select'] and ontapi_1_60
qos_min_nfs = qos_min_block
elif ONTAP_C190 in node['model']:
qos_min_block = node['is_all_flash'] and ontapi_1_60
qos_min_nfs = qos_min_block
else:
qos_min_block = node['is_all_flash'] and ontapi_1_20
qos_min_nfs = node['is_all_flash'] and ontapi_1_30
qos_name = na_utils.qos_min_feature_name(True, node['name'])
self.features.add_feature(qos_name, supported=qos_min_nfs)
qos_name = na_utils.qos_min_feature_name(False, node['name'])
self.features.add_feature(qos_name, supported=qos_min_block)
self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
self.features.add_feature('USER_CAPABILITY_LIST',
@ -147,6 +169,45 @@ class Client(client_base.Client):
result.get_child_by_name('next-tag').set_content('')
return result
def _get_cluster_nodes_info(self):
"""Return a list of models of the nodes in the cluster"""
api_args = {
'desired-attributes': {
'node-details-info': {
'node': None,
'node-model': None,
'is-all-flash-select-optimized': None,
'is-all-flash-optimized': None,
}
}
}
nodes = []
try:
result = self.send_iter_request('system-node-get-iter', api_args,
enable_tunneling=False)
system_node_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for system_node in system_node_list.get_children():
node = {
'model': system_node.get_child_content('node-model'),
'name': system_node.get_child_content('node'),
'is_all_flash': system_node.get_child_content(
'is-all-flash-optimized') == 'true',
'is_all_flash_select': system_node.get_child_content(
'is-all-flash-select-optimized') == 'true',
}
nodes.append(node)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPINOTFOUND:
LOG.debug('Cluster nodes can only be collected with '
'cluster scoped credentials.')
else:
LOG.exception('Failed to get the cluster nodes.')
return nodes
def list_vservers(self, vserver_type='data'):
"""Get the names of vservers present, optionally filtered by type."""
query = {
@ -538,7 +599,8 @@ class Client(client_base.Client):
}
return self.connection.send_request('file-assign-qos', api_args, False)
def provision_qos_policy_group(self, qos_policy_group_info):
def provision_qos_policy_group(self, qos_policy_group_info,
qos_min_support):
"""Create QOS policy group on the backend if appropriate."""
if qos_policy_group_info is None:
return
@ -546,17 +608,19 @@ class Client(client_base.Client):
# Legacy QOS uses externally provisioned QOS policy group,
# so we don't need to create one on the backend.
legacy = qos_policy_group_info.get('legacy')
if legacy is not None:
if legacy:
return
spec = qos_policy_group_info.get('spec')
if spec is not None:
if spec:
if spec.get('min_throughput') and not qos_min_support:
msg = _('QoS min_throughput is not supported by this back '
'end.')
raise na_utils.NetAppDriverException(msg)
if not self.qos_policy_group_exists(spec['policy_name']):
self.qos_policy_group_create(spec['policy_name'],
spec['max_throughput'])
self.qos_policy_group_create(spec)
else:
self.qos_policy_group_modify(spec['policy_name'],
spec['max_throughput'])
self.qos_policy_group_modify(spec)
def qos_policy_group_exists(self, qos_policy_group_name):
"""Checks if a QOS policy group exists."""
@ -577,22 +641,24 @@ class Client(client_base.Client):
False)
return self._has_records(result)
def qos_policy_group_create(self, qos_policy_group_name, max_throughput):
def _qos_spec_to_api_args(self, spec, **kwargs):
"""Convert a QoS spec to ZAPI args."""
formatted_spec = {k.replace('_', '-'): v for k, v in spec.items() if v}
formatted_spec['policy-group'] = formatted_spec.pop('policy-name')
formatted_spec = {**formatted_spec, **kwargs}
return formatted_spec
def qos_policy_group_create(self, spec):
"""Creates a QOS policy group."""
api_args = {
'policy-group': qos_policy_group_name,
'max-throughput': max_throughput,
'vserver': self.vserver,
}
api_args = self._qos_spec_to_api_args(
spec, vserver=self.vserver)
return self.connection.send_request(
'qos-policy-group-create', api_args, False)
def qos_policy_group_modify(self, qos_policy_group_name, max_throughput):
def qos_policy_group_modify(self, spec):
"""Modifies a QOS policy group."""
api_args = {
'policy-group': qos_policy_group_name,
'max-throughput': max_throughput,
}
api_args = self._qos_spec_to_api_args(spec)
return self.connection.send_request(
'qos-policy-group-modify', api_args, False)
@ -835,22 +901,6 @@ class Client(client_base.Client):
return True
def list_cluster_nodes(self):
"""Get all available cluster nodes."""
api_args = {
'desired-attributes': {
'node-details-info': {
'node': None,
},
},
}
result = self.send_iter_request('system-node-get-iter', api_args)
nodes_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [node_info.get_child_content('node') for node_info
in nodes_info_list.get_children()]
def get_operational_lif_addresses(self):
"""Gets the IP addresses of operational LIFs on the vserver."""
@ -1233,6 +1283,11 @@ class Client(client_base.Client):
return True
def is_qos_min_supported(self, is_nfs, node_name):
"""Check if the node supports QoS minimum."""
qos_min_name = na_utils.qos_min_feature_name(is_nfs, node_name)
return getattr(self.features, qos_min_name, False).__bool__()
def create_flexvol(self, flexvol_name, aggregate_name, size_gb,
space_guarantee_type=None, snapshot_policy=None,
language=None, dedupe_enabled=False,
@ -1415,6 +1470,9 @@ class Client(client_base.Client):
'raid-type': None,
'is-hybrid': None,
},
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
@ -1432,12 +1490,15 @@ class Client(client_base.Client):
aggr_attributes = aggrs[0]
aggr_raid_attrs = aggr_attributes.get_child_by_name(
'aggr-raid-attributes') or netapp_api.NaElement('none')
aggr_ownership_attrs = aggrs[0].get_child_by_name(
'aggr-ownership-attributes') or netapp_api.NaElement('none')
aggregate = {
'name': aggr_attributes.get_child_content('aggregate-name'),
'raid-type': aggr_raid_attrs.get_child_content('raid-type'),
'is-hybrid': strutils.bool_from_string(
aggr_raid_attrs.get_child_content('is-hybrid')),
'node-name': aggr_ownership_attrs.get_child_content('home-name'),
}
return aggregate

5
cinder/volume/drivers/netapp/dataontap/nfs_cmode.py

@ -165,7 +165,10 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume, extra_specs)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
pool = volume_utils.extract_host(volume['host'], level='pool')
qos_min_support = self.ssc_library.is_qos_min_supported(pool)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info,
qos_min_support)
self._set_qos_policy_group_on_volume(volume, qos_policy_group_info,
qos_policy_group_is_adaptive)
except Exception:

25
cinder/volume/drivers/netapp/dataontap/utils/capabilities.py

@ -117,6 +117,15 @@ class CapabilitiesLibrary(object):
aggregates.add(flexvol_info['netapp_aggregate'])
return list(aggregates)
def is_qos_min_supported(self, pool_name):
for __, flexvol_info in self.ssc.items():
if ('netapp_qos_min_support' in flexvol_info and
'pool_name' in flexvol_info and
flexvol_info['pool_name'] == pool_name):
return flexvol_info['netapp_qos_min_support'] == 'true'
return False
def update_ssc(self, flexvol_map):
"""Periodically runs to update Storage Service Catalog data.
@ -143,7 +152,11 @@ class CapabilitiesLibrary(object):
# Get aggregate info
aggregate_name = ssc_volume.get('netapp_aggregate')
ssc_volume.update(self._get_ssc_aggregate_info(aggregate_name))
aggr_info = self._get_ssc_aggregate_info(aggregate_name)
node_name = aggr_info.pop('netapp_node_name')
ssc_volume.update(aggr_info)
ssc_volume.update(self._get_ssc_qos_min_info(node_name))
ssc[flexvol_name] = ssc_volume
@ -212,6 +225,13 @@ class CapabilitiesLibrary(object):
return {'netapp_flexvol_encryption': six.text_type(encrypted).lower()}
def _get_ssc_qos_min_info(self, node_name):
"""Gather Qos minimum info and recast into SSC-style stats."""
supported = self.zapi_client.is_qos_min_supported(
self.protocol == 'nfs', node_name)
return {'netapp_qos_min_support': six.text_type(supported).lower()}
def _get_ssc_mirror_info(self, flexvol_name):
"""Gather SnapMirror info and recast into SSC-style volume stats."""
@ -227,8 +247,10 @@ class CapabilitiesLibrary(object):
raid_type = None
hybrid = None
disk_types = None
node_name = None
else:
aggregate = self.zapi_client.get_aggregate(aggregate_name)
node_name = aggregate.get('node-name')
raid_type = aggregate.get('raid-type')
hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
if 'is-hybrid' in aggregate else None)
@ -239,6 +261,7 @@ class CapabilitiesLibrary(object):
'netapp_raid_type': raid_type,
'netapp_hybrid_aggregate': hybrid,
'netapp_disk_type': disk_types,
'netapp_node_name': node_name,
}
def get_matching_flexvols_for_extra_specs(self, extra_specs):

127
cinder/volume/drivers/netapp/utils.py

@ -30,7 +30,6 @@ import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import netutils
import six
from cinder import context
from cinder import exception
@ -51,7 +50,16 @@ DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored',
'netapp_nodedup': 'netapp_dedup',
'netapp_nocompression': 'netapp_compression',
'netapp_thick_provisioned': 'netapp_thin_provisioned'}
QOS_KEYS = frozenset(['maxIOPS', 'maxIOPSperGiB', 'maxBPS', 'maxBPSperGiB'])
MIN_QOS_KEYS = frozenset([
'minIOPS',
'minIOPSperGiB',
])
MAX_QOS_KEYS = frozenset([
'maxIOPS',
'maxIOPSperGiB',
'maxBPS',
'maxBPSperGiB',
])
BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
# Secret length cannot be less than 96 bits. http://tools.ietf.org/html/rfc3723
@ -87,7 +95,7 @@ def check_flags(required_flags, configuration):
def to_bool(val):
"""Converts true, yes, y, 1 to True, False otherwise."""
if val:
strg = six.text_type(val).lower()
strg = str(val).lower()
if (strg == 'true' or strg == 'y'
or strg == 'yes' or strg == 'enabled'
or strg == '1'):
@ -152,7 +160,7 @@ def trace_filter_func_api(all_args):
def round_down(value, precision='0.00'):
return float(decimal.Decimal(six.text_type(value)).quantize(
return float(decimal.Decimal(str(value)).quantize(
decimal.Decimal(precision), rounding=decimal.ROUND_DOWN))
@ -176,7 +184,7 @@ def get_iscsi_connection_properties(lun_id, volume, iqns,
for a in addresses]
lun_id = int(lun_id)
if isinstance(iqns, six.string_types):
if isinstance(iqns, str):
iqns = [iqns] * len(addresses)
target_portals = ['%s:%s' % (a, p) for a, p in zip(addresses, ports)]
@ -208,17 +216,28 @@ def validate_qos_spec(qos_spec):
"""Check validity of Cinder qos spec for our backend."""
if qos_spec is None:
return
normalized_qos_keys = [key.lower() for key in QOS_KEYS]
keylist = []
for key, value in qos_spec.items():
lower_case_key = key.lower()
if lower_case_key not in normalized_qos_keys:
msg = _('Unrecognized QOS keyword: "%s"') % key
raise exception.Invalid(msg)
keylist.append(lower_case_key)
# Modify the following check when we allow multiple settings in one spec.
if len(keylist) > 1:
msg = _('Only one limit can be set in a QoS spec.')
normalized_min_keys = [key.lower() for key in MIN_QOS_KEYS]
normalized_max_keys = [key.lower() for key in MAX_QOS_KEYS]
unrecognized_keys = [
k for k in qos_spec.keys()
if k.lower() not in normalized_max_keys + normalized_min_keys]
if unrecognized_keys:
msg = _('Unrecognized QOS keywords: "%s"') % unrecognized_keys
raise exception.Invalid(msg)
min_dict = {k: v for k, v in qos_spec.items()
if k.lower() in normalized_min_keys}
if len(min_dict) > 1:
msg = _('Only one minimum limit can be set in a QoS spec.')
raise exception.Invalid(msg)
max_dict = {k: v for k, v in qos_spec.items()
if k.lower() in normalized_max_keys}
if len(max_dict) > 1:
msg = _('Only one maximum limit can be set in a QoS spec.')
raise exception.Invalid(msg)
@ -231,28 +250,67 @@ def get_volume_type_from_volume(volume):
return volume_types.get_volume_type(ctxt, type_id)
def map_qos_spec(qos_spec, volume):
"""Map Cinder QOS spec to limit/throughput-value as used in client API."""
if qos_spec is None:
return None
def _get_min_throughput_from_qos_spec(qos_spec, volume_size):
"""Returns the minimum QoS throughput.
The QoS min specs are exclusive of one another and it accepts values in
IOPS only.
"""
if 'miniops' in qos_spec:
min_throughput = '%siops' % qos_spec['miniops']
elif 'miniopspergib' in qos_spec:
min_throughput = '%siops' % str(
</