NetApp: Report hybrid aggregates in share stats

NetApp cDOT controllers can mix SSDs and spinning disks
in the same aggregate, where the SSDs are used for a
cache.  This commit reports the hybrid aggregate attribute
as well as the aggregate name to the scheduler for each
pool for use in extra specs matching.

Implements: blueprint netapp-report-cdot-hybrid-aggrs-manila
Change-Id: Iaa0bcd79789449f977b48f1de2adf997c936db61
This commit is contained in:
Clinton Knight 2016-05-11 12:28:26 -07:00
parent 420ef92662
commit 4c4ee90854
7 changed files with 470 additions and 147 deletions

View File

@ -66,6 +66,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.features.add_feature('IPSPACES', supported=ontapi_1_30)
self.features.add_feature('SUBNETS', supported=ontapi_1_30)
self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30)
self.features.add_feature('ADVANCED_DISK_PARTITIONING',
supported=ontapi_1_30)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
@ -2367,76 +2369,116 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
LOG.warning(_LW('Failed to invoke EMS. %s') % e)
@na_utils.trace
def get_aggregate_raid_types(self, aggregate_names):
"""Get the RAID type of one or more aggregates."""
def get_aggregate(self, aggregate_name):
"""Get aggregate attributes needed for the storage service catalog."""
if not aggregate_name:
return {}
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
'is-hybrid': None,
},
},
}
aggr_list = self._get_aggregates(aggregate_names=aggregate_names,
try:
aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
desired_attributes=desired_attributes)
except netapp_api.NaApiError:
msg = _('Failed to get info for aggregate %s.')
LOG.exception(msg % aggregate_name)
return {}
aggr_raid_dict = {}
for aggr in aggr_list:
aggr_name = aggr.get_child_content('aggregate-name')
aggr_raid_attrs = aggr.get_child_by_name('aggr-raid-attributes')
if len(aggrs) < 1:
return {}
aggr_raid_dict[aggr_name] = aggr_raid_attrs.get_child_content(
'raid-type')
aggr_attributes = aggrs[0]
aggr_raid_attrs = aggr_attributes.get_child_by_name(
'aggr-raid-attributes') or netapp_api.NaElement('none')
return aggr_raid_dict
aggregate = {
'name': aggr_attributes.get_child_content('aggregate-name'),
'raid-type': aggr_raid_attrs.get_child_content('raid-type'),
'is-hybrid': strutils.bool_from_string(
aggr_raid_attrs.get_child_content('is-hybrid')),
}
return aggregate
@na_utils.trace
def get_aggregate_disk_types(self, aggregate_names):
"""Get the disk type of one or more aggregates."""
def get_aggregate_disk_types(self, aggregate_name):
"""Get the disk type(s) of an aggregate."""
aggr_disk_type_dict = {}
disk_types = set()
disk_types.update(self._get_aggregate_disk_types(aggregate_name))
if self.features.ADVANCED_DISK_PARTITIONING:
disk_types.update(self._get_aggregate_disk_types(aggregate_name,
shared=True))
for aggregate_name in aggregate_names:
return list(disk_types) if disk_types else None
# Only get 1 disk, since apart from hybrid aggregates all disks
# must be the same type.
api_args = {
'max-records': 1,
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-aggregate-info': {
'aggregate-name': aggregate_name,
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
@na_utils.trace
def _get_aggregate_disk_types(self, aggregate_name, shared=False):
"""Get the disk type(s) of an aggregate."""
disk_types = set()
if shared:
disk_raid_info = {
'disk-shared-info': {
'aggregate-list': {
'shared-aggregate-info': {
'aggregate-name': aggregate_name,
},
},
},
}
result = self.send_request('storage-disk-get-iter', api_args)
else:
disk_raid_info = {
'disk-aggregate-info': {
'aggregate-name': aggregate_name,
},
}
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
storage_disk_info_list = attributes_list.get_children()
api_args = {
'query': {
'storage-disk-info': {
'disk-raid-info': disk_raid_info,
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
try:
result = self.send_iter_request('storage-disk-get-iter', api_args)
except netapp_api.NaApiError:
msg = _('Failed to get disk info for aggregate %s.')
LOG.exception(msg % aggregate_name)
return disk_types
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for storage_disk_info in attributes_list.get_children():
if len(storage_disk_info_list) >= 1:
storage_disk_info = storage_disk_info_list[0]
disk_raid_info = storage_disk_info.get_child_by_name(
'disk-raid-info')
if disk_raid_info:
disk_type = disk_raid_info.get_child_content(
'effective-disk-type')
if disk_type:
aggr_disk_type_dict[aggregate_name] = disk_type
'disk-raid-info') or netapp_api.NaElement('none')
disk_type = disk_raid_info.get_child_content(
'effective-disk-type')
if disk_type:
disk_types.add(disk_type)
return aggr_disk_type_dict
return disk_types
@na_utils.trace
def check_for_cluster_credentials(self):

View File

@ -278,6 +278,7 @@ class NetAppCmodeFileStorageLibrary(object):
'dedupe': [True, False],
'compression': [True, False],
'thin_provisioning': [True, False],
'netapp_aggregate': aggr_name,
}
# Add storage service catalog data.
@ -1107,13 +1108,18 @@ class NetAppCmodeFileStorageLibrary(object):
if not self._have_cluster_creds:
return
raid_types = self._client.get_aggregate_raid_types(aggregate_names)
for aggregate_name, raid_type in raid_types.items():
ssc_stats[aggregate_name]['netapp_raid_type'] = raid_type
for aggregate_name in aggregate_names:
disk_types = self._client.get_aggregate_disk_types(aggregate_names)
for aggregate_name, disk_type in disk_types.items():
ssc_stats[aggregate_name]['netapp_disk_type'] = disk_type
aggregate = self._client.get_aggregate(aggregate_name)
hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
if 'is-hybrid' in aggregate else None)
disk_types = self._client.get_aggregate_disk_types(aggregate_name)
ssc_stats[aggregate_name].update({
'netapp_raid_type': aggregate.get('raid-type'),
'netapp_hybrid_aggregate': hybrid,
'netapp_disk_type': disk_types,
})
def _find_active_replica(self, replica_list):
# NOTE(ameade): Find current active replica. There can only be one

View File

@ -43,6 +43,7 @@ SHARE_AGGREGATE_NAME = 'fake_aggr1'
SHARE_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2')
SHARE_AGGREGATE_RAID_TYPES = ('raid4', 'raid_dp')
SHARE_AGGREGATE_DISK_TYPE = 'FCAL'
SHARE_AGGREGATE_DISK_TYPES = ['SATA', 'SSD']
SHARE_NAME = 'fake_share'
SHARE_SIZE = '1000000000'
SHARE_NAME_2 = 'fake_share_2'
@ -1145,6 +1146,127 @@ AGGR_GET_ITER_RESPONSE = etree.XML("""
'aggr2': SHARE_AGGREGATE_NAMES[1],
})
AGGR_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>1758646411</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>512</files-private-used>
<files-total>30384</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>30384</inodefile-private-capacity>
<inodefile-public-capacity>30384</inodefile-public-capacity>
<maxfiles-available>30384</maxfiles-available>
<maxfiles-possible>243191</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>3</disk-count>
<ha-policy>cfo</ha-policy>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr1)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>16</raid-size>
<raid-status>raid_dp, normal</raid-status>
<raid-type>raid_dp</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>245760</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>95</percent-used-capacity>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>898048000</used-including-snapshot-reserve>
<volume-footprints>897802240</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>1</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
<aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>unknown</striping-type>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'aggr1': SHARE_AGGREGATE_NAMES[0]})
VOLUME_GET_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
@ -1476,13 +1598,34 @@ STORAGE_DISK_GET_ITER_RESPONSE = etree.XML("""
<storage-disk-info>
<disk-name>cluster3-01:v5.19</disk-name>
<disk-raid-info>
<effective-disk-type>%s</effective-disk-type>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
<num-records>1</num-records>
<num-records>4</num-records>
</results>
""" % SHARE_AGGREGATE_DISK_TYPE)
""" % {
'type0': SHARE_AGGREGATE_DISK_TYPES[0],
'type1': SHARE_AGGREGATE_DISK_TYPES[1],
})
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML("""
<results status="passed">

View File

@ -189,7 +189,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
max_page_length=10)
num_records = result.get_child_content('num-records')
self.assertEqual('1', num_records)
self.assertEqual('4', num_records)
args = copy.deepcopy(storage_disk_get_iter_args)
args['max-records'] = 10
@ -4184,90 +4184,185 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.call('ems-autosupport-log', fake.EMS_MESSAGE)])
self.assertEqual(1, client_cmode.LOG.warning.call_count)
def test_get_aggregate_raid_types(self):
def test_get_aggregate_none_specified(self):
api_response = netapp_api.NaElement(fake.AGGR_GET_RAID_TYPE_RESPONSE)
result = self.client.get_aggregate('')
self.assertEqual({}, result)
def test_get_aggregate(self):
api_response = netapp_api.NaElement(
fake.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'send_iter_request',
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_raid_types(
fake.SHARE_AGGREGATE_NAMES)
result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME)
aggr_get_iter_args = {
'query': {
'aggr-attributes': {
'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES),
}
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
'is-hybrid': None,
},
},
'desired-attributes': {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
}
}
}
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake.SHARE_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
expected = {
fake.SHARE_AGGREGATE_NAMES[0]:
fake.SHARE_AGGREGATE_RAID_TYPES[0],
fake.SHARE_AGGREGATE_NAMES[1]:
fake.SHARE_AGGREGATE_RAID_TYPES[1]
'name': fake.SHARE_AGGREGATE_NAME,
'raid-type': 'raid_dp',
'is-hybrid': False,
}
self.assertEqual(expected, result)
self.client.send_iter_request.assert_has_calls([
mock.call('aggr-get-iter', aggr_get_iter_args)])
self.assertDictEqual(expected, result)
def test_get_aggregate_raid_types_not_found(self):
def test_get_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_raid_types(
fake.SHARE_AGGREGATE_NAMES)
result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME)
self.assertDictEqual({}, result)
self.assertEqual({}, result)
def test_get_aggregate_disk_types(self):
def test_get_aggregate_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME)
self.assertEqual({}, result)
@ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
@ddt.unpack
def test_get_aggregate_disk_types(self, types, expected):
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types',
mock.Mock(return_value=types))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME)
self.assertItemsEqual(expected, result)
mock_get_aggregate_disk_types.assert_called_once_with(
fake.SHARE_AGGREGATE_NAME)
def test_get_aggregate_disk_types_not_found(self):
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types',
mock.Mock(return_value=set()))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME)
self.assertIsNone(result)
mock_get_aggregate_disk_types.assert_called_once_with(
fake.SHARE_AGGREGATE_NAME)
def test_get_aggregate_disk_types_shared(self):
self.client.features.add_feature('ADVANCED_DISK_PARTITIONING')
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types',
mock.Mock(side_effect=[set(['SSD']), set(['SATA'])]))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME)
self.assertIsInstance(result, list)
self.assertItemsEqual(['SATA', 'SSD'], result)
mock_get_aggregate_disk_types.assert_has_calls([
mock.call(fake.SHARE_AGGREGATE_NAME),
mock.call(fake.SHARE_AGGREGATE_NAME, shared=True),
])
@ddt.data({
'shared': False,
'query_disk_raid_info': {
'disk-aggregate-info': {
'aggregate-name': fake.SHARE_AGGREGATE_NAME,
},
},
}, {
'shared': True,
'query_disk_raid_info': {
'disk-shared-info': {
'aggregate-list': {
'shared-aggregate-info': {
'aggregate-name':
fake.SHARE_AGGREGATE_NAME,
},
},
},
},
})
@ddt.unpack
def test__get_aggregate_disk_types_ddt(self, shared, query_disk_raid_info):
api_response = netapp_api.NaElement(
fake.STORAGE_DISK_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAMES)
result = self.client._get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME, shared=shared)
expected = {
fake.SHARE_AGGREGATE_NAMES[0]:
fake.SHARE_AGGREGATE_DISK_TYPE,
fake.SHARE_AGGREGATE_NAMES[1]:
fake.SHARE_AGGREGATE_DISK_TYPE
storage_disk_get_iter_args = {
'query': {
'storage-disk-info': {
'disk-raid-info': query_disk_raid_info,
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
self.client.send_iter_request.assert_called_once_with(
'storage-disk-get-iter', storage_disk_get_iter_args)
self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES),
self.client.send_request.call_count)
self.assertDictEqual(expected, result)
expected = set(fake.SHARE_AGGREGATE_DISK_TYPES)
self.assertEqual(expected, result)
def test_get_aggregate_disk_types_not_found(self):
def test__get_aggregate_disk_types_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAMES)
result = self.client._get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME)
self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES),
self.client.send_request.call_count)
self.assertDictEqual({}, result)
self.assertEqual(set(), result)
def test__get_aggregate_disk_types_api_error(self):
self.mock_object(self.client,
'send_iter_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client._get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAME)
self.assertEqual(set([]), result)
def test_check_for_cluster_credentials(self):

View File

@ -378,27 +378,28 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertListEqual(fake.POOLS, result)
@ddt.data(
{
'capacities': fake.AGGREGATE_CAPACITIES,
'pools': fake.POOLS,
},
{
'capacities': fake.AGGREGATE_CAPACITIES_VSERVER_CREDS,
'pools': fake.POOLS_VSERVER_CREDS
}
)
@ddt.unpack
def test_get_pools(self, capacities, pools):
def test_get_pools(self):
self.mock_object(self.library,
'_get_aggregate_space',
mock.Mock(return_value=capacities))
self.mock_object(
self.library, '_get_aggregate_space',
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES))
self.library._have_cluster_creds = True
self.library._ssc_stats = fake.SSC_INFO
result = self.library._get_pools()
self.assertListEqual(pools, result)
self.assertListEqual(fake.POOLS, result)
def test_get_pools_vserver_creds(self):
self.mock_object(
self.library, '_get_aggregate_space',
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES_VSERVER_CREDS))
self.library._have_cluster_creds = False
result = self.library._get_pools()
self.assertListEqual(fake.POOLS_VSERVER_CREDS, result)
def test_handle_ems_logging(self):
@ -2048,35 +2049,58 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
def test_update_ssc_aggr_info(self):
self.library._have_cluster_creds = True
self.mock_object(self.client,
'get_aggregate_raid_types',
mock.Mock(return_value=fake.SSC_RAID_TYPES))
self.mock_object(self.client,
'get_aggregate_disk_types',
mock.Mock(return_value=fake.SSC_DISK_TYPES))
mock_get_aggregate = self.mock_object(
self.client, 'get_aggregate',
mock.Mock(side_effect=fake.SSC_AGGREGATES))
mock_get_aggregate_disk_types = self.mock_object(
self.client, 'get_aggregate_disk_types',
mock.Mock(side_effect=fake.SSC_DISK_TYPES))
ssc_stats = {
fake.AGGREGATES[0]: {},
fake.AGGREGATES[1]: {}
fake.AGGREGATES[1]: {},
}
self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats)
self.assertDictEqual(fake.SSC_INFO, ssc_stats)
mock_get_aggregate.assert_has_calls([
mock.call(fake.AGGREGATES[0]),
mock.call(fake.AGGREGATES[1]),
])
mock_get_aggregate_disk_types.assert_has_calls([
mock.call(fake.AGGREGATES[0]),
mock.call(fake.AGGREGATES[1]),
])
def test_update_ssc_aggr_info_not_found(self):
self.library._have_cluster_creds = True
self.mock_object(self.client,
'get_aggregate_raid_types',
'get_aggregate',
mock.Mock(return_value={}))
self.mock_object(self.client,
'get_aggregate_disk_types',
mock.Mock(return_value={}))
ssc_stats = {}
mock.Mock(return_value=None))
ssc_stats = {
fake.AGGREGATES[0]: {},
fake.AGGREGATES[1]: {},
}
self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats)
self.assertDictEqual({}, ssc_stats)
expected = {
fake.AGGREGATES[0]: {
'netapp_raid_type': None,
'netapp_disk_type': None,
'netapp_hybrid_aggregate': None,
},
fake.AGGREGATES[1]: {
'netapp_raid_type': None,
'netapp_disk_type': None,
'netapp_hybrid_aggregate': None,
}
}
self.assertDictEqual(expected, ssc_stats)
def test_update_ssc_aggr_info_no_cluster_creds(self):

View File

@ -493,16 +493,19 @@ AGGREGATE_CAPACITIES_VSERVER_CREDS = {
SSC_INFO = {
AGGREGATES[0]: {
'netapp_raid_type': 'raid4',
'netapp_disk_type': 'FCAL'
'netapp_disk_type': 'FCAL',
'netapp_hybrid_aggregate': 'false',
},
AGGREGATES[1]: {
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD'
'netapp_disk_type': ['SATA', 'SSD'],
'netapp_hybrid_aggregate': 'true',
}
}
POOLS = [
{'pool_name': AGGREGATES[0],
'netapp_aggregate': AGGREGATES[0],
'total_capacity_gb': 3.3,
'free_capacity_gb': 1.1,
'allocated_capacity_gb': 2.2,
@ -512,9 +515,11 @@ POOLS = [
'compression': [True, False],
'thin_provisioning': [True, False],
'netapp_raid_type': 'raid4',
'netapp_disk_type': 'FCAL'
'netapp_disk_type': 'FCAL',
'netapp_hybrid_aggregate': 'false',
},
{'pool_name': AGGREGATES[1],
'netapp_aggregate': AGGREGATES[1],
'total_capacity_gb': 6.0,
'free_capacity_gb': 2.0,
'allocated_capacity_gb': 4.0,
@ -524,12 +529,14 @@ POOLS = [
'compression': [True, False],
'thin_provisioning': [True, False],
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD'
'netapp_disk_type': ['SATA', 'SSD'],
'netapp_hybrid_aggregate': 'true',
},
]
POOLS_VSERVER_CREDS = [
{'pool_name': AGGREGATES[0],
'netapp_aggregate': AGGREGATES[0],
'total_capacity_gb': 'unknown',
'free_capacity_gb': 1.1,
'allocated_capacity_gb': 0.0,
@ -538,10 +545,9 @@ POOLS_VSERVER_CREDS = [
'dedupe': [True, False],
'compression': [True, False],
'thin_provisioning': [True, False],
'netapp_raid_type': 'raid4',
'netapp_disk_type': 'FCAL'
},
{'pool_name': AGGREGATES[1],
'netapp_aggregate': AGGREGATES[1],
'total_capacity_gb': 'unknown',
'free_capacity_gb': 2.0,
'allocated_capacity_gb': 0.0,
@ -550,20 +556,23 @@ POOLS_VSERVER_CREDS = [
'dedupe': [True, False],
'compression': [True, False],
'thin_provisioning': [True, False],
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD'
},
]
SSC_RAID_TYPES = {
AGGREGATES[0]: 'raid4',
AGGREGATES[1]: 'raid_dp'
}
SSC_AGGREGATES = [
{
'name': AGGREGATES[0],
'raid-type': 'raid4',
'is-hybrid': False,
},
{
'name': AGGREGATES[1],
'raid-type': 'raid_dp',
'is-hybrid': True,
},
]
SSC_DISK_TYPES = {
AGGREGATES[0]: 'FCAL',
AGGREGATES[1]: 'SSD'
}
SSC_DISK_TYPES = ['FCAL', ['SATA', 'SSD']]
def get_config_cmode():

View File

@ -0,0 +1,4 @@
---
features:
- Add support for hybrid aggregates to the NetApp cDOT drivers.