Merge "NetApp ONTAP: Implement FlexGroup pool"

This commit is contained in:
Zuul 2021-03-17 17:15:48 +00:00 committed by Gerrit Code Review
commit 3f44ef8035
17 changed files with 1101 additions and 191 deletions

View File

@ -665,23 +665,87 @@ AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
VOLUME_SIZE_TOTAL = 19922944
VOLUME_SIZE_AVAILABLE = 19791872
VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<attributes-list>
<volume-attributes>
<volume-space-attributes>
<size-available>%(available_size)s</size-available>
<size-total>%(total_size)s</size-total>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
</results>
VOLUME_GET_ITER_CAPACITY_ATTR_STR = """
<volume-attributes>
<volume-id-attributes>
<style-extended>flexgroup</style-extended>
</volume-id-attributes>
<volume-space-attributes>
<size-available>%(available_size)s</size-available>
<size-total>%(total_size)s</size-total>
</volume-space-attributes>
</volume-attributes>
""" % {
'available_size': VOLUME_SIZE_AVAILABLE,
'total_size': VOLUME_SIZE_TOTAL,
}
VOLUME_GET_ITER_CAPACITY_ATTR = etree.XML(VOLUME_GET_ITER_CAPACITY_ATTR_STR)
VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<attributes-list> %(volume)s </attributes-list>
</results>
""" % {
'volume': VOLUME_GET_ITER_CAPACITY_ATTR_STR,
})
VOLUME_GET_ITER_STYLE_RESPONSE = etree.XML("""
<results status="passed">
<num-records>3</num-records>
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexgroup</style-extended>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexgroup-constituent</style-extended>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexgroup-constituent</style-extended>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
</results>
""")
VOLUME_FLEXGROUP_STYLE = etree.XML("""
<volume-attributes>
<volume-id-attributes>
<style-extended>flexgroup</style-extended>
</volume-id-attributes>
</volume-attributes>
""")
VOLUME_GET_ITER_SAME_STYLE_RESPONSE = etree.XML("""
<results status="passed">
<num-records>3</num-records>
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexvol</style-extended>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexvol</style-extended>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<style-extended>flexvol</style-extended>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
</results>
""")
VOLUME_GET_ITER_LIST_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
@ -706,44 +770,102 @@ VOLUME_GET_ITER_LIST_RESPONSE = etree.XML("""
'vserver': VOLUME_VSERVER_NAME,
})
VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<type>rw</type>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
<size>12345</size>
</volume-space-attributes>
<volume-snapshot-attributes>
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
<language-code>en_US</language-code>
</volume-language-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
VOLUME_GET_ITER_SSC_RESPONSE_STR = """
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<type>rw</type>
<style-extended>flexvol</style-extended>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
<size>12345</size>
</volume-space-attributes>
<volume-snapshot-attributes>
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
<language-code>en_US</language-code>
</volume-language-attributes>
</volume-attributes>
""" % {
'aggr': VOLUME_AGGREGATE_NAMES[0],
'volume': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
}
VOLUME_GET_ITER_SSC_RESPONSE_ATTR = etree.XML(
VOLUME_GET_ITER_SSC_RESPONSE_STR)
VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>%(volume)s</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume': VOLUME_GET_ITER_SSC_RESPONSE_STR,
})
VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP = """
<volume-attributes>
<volume-id-attributes>
<aggr-list>
<aggr-name>%(aggr)s</aggr-name>
</aggr-list>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<type>rw</type>
<style-extended>flexgroup</style-extended>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
<size>12345</size>
</volume-space-attributes>
<volume-snapshot-attributes>
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
<language-code>en_US</language-code>
</volume-language-attributes>
</volume-attributes>
""" % {
'aggr': VOLUME_AGGREGATE_NAMES[0],
'volume': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
}
VOLUME_GET_ITER_SSC_RESPONSE_ATTR_FLEXGROUP = etree.XML(
VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP)
VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP = etree.XML("""
<results status="passed">
<attributes-list>%(volume)s</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume': VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP,
})
VOLUME_INFO_SSC = {
@ -759,6 +881,23 @@ VOLUME_INFO_SSC = {
'size': '12345',
'space-guarantee': 'none',
'qos-policy-group': 'fake_qos_policy_group_name',
'style-extended': 'flexvol',
}
VOLUME_INFO_SSC_FLEXGROUP = {
'name': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
'junction-path': '/%s' % VOLUME_NAMES[0],
'aggregate': [VOLUME_AGGREGATE_NAMES[0]],
'space-guarantee-enabled': True,
'language': 'en_US',
'percentage-snapshot-reserve': '5',
'snapshot-policy': 'default',
'type': 'rw',
'size': '12345',
'space-guarantee': 'none',
'qos-policy-group': 'fake_qos_policy_group_name',
'style-extended': 'flexgroup',
}
SIS_GET_ITER_SSC_RESPONSE = etree.XML("""

View File

@ -1432,23 +1432,28 @@ class NetAppCmodeClientTestCase(test.TestCase):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
expected_flex_vol = 'fake_flex_vol'
volume_attr_str = ("""
<volume-attributes>
<volume-id-attributes>
<name>%(flex_vol)s</name>
</volume-id-attributes>
</volume-attributes>
""" % {'flex_vol': expected_flex_vol})
volume_attr = netapp_api.NaElement(etree.XML(volume_attr_str))
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(flex_vol)s</name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
</results>""" % {'flex_vol': expected_flex_vol}))
<attributes-list>%(vol)s</attributes-list>
</results>""" % {'vol': volume_attr_str}))
self.connection.invoke_successfully.return_value = response
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_attr)
actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver,
fake_junc)
self.assertEqual(expected_flex_vol, actual_flex_vol)
mock_get_unique_vol.assert_called_once_with(response)
def test_clone_file(self):
expected_flex_vol = "fake_flex_vol"
@ -1721,6 +1726,10 @@ class NetAppCmodeClientTestCase(test.TestCase):
fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE)
mock_send_iter_request = self.mock_object(
self.client, 'send_iter_request', return_value=api_response)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_CAPACITY_ATTR)
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_response)
capacity = self.client.get_flexvol_capacity(**kwargs)
@ -1738,6 +1747,9 @@ class NetAppCmodeClientTestCase(test.TestCase):
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'style-extended': None,
},
'volume-space-attributes': {
'size-available': None,
'size-total': None,
@ -1747,6 +1759,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
}
mock_send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
mock_get_unique_vol.assert_called_once_with(api_response)
self.assertEqual(fake_client.VOLUME_SIZE_TOTAL, capacity['size-total'])
self.assertEqual(fake_client.VOLUME_SIZE_AVAILABLE,
@ -1810,13 +1823,25 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertEqual([], result)
def test_get_flexvol(self):
@ddt.data(False, True)
def test_get_flexvol(self, is_flexgroup):
if is_flexgroup:
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR_FLEXGROUP)
else:
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR)
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_response)
result = self.client.get_flexvol(
flexvol_name=fake_client.VOLUME_NAMES[0],
@ -1846,7 +1871,11 @@ class NetAppCmodeClientTestCase(test.TestCase):
'owning-vserver-name': None,
'junction-path': None,
'type': None,
'aggr-list': {
'aggr-name': None,
},
'containing-aggregate-name': None,
'style-extended': None,
},
'volume-mirror-attributes': {
'is-data-protection-mirror': None,
@ -1872,19 +1901,12 @@ class NetAppCmodeClientTestCase(test.TestCase):
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertEqual(fake_client.VOLUME_INFO_SSC, result)
mock_get_unique_vol.assert_called_once_with(api_response)
def test_get_flexvol_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.get_flexvol,
flexvol_name=fake_client.VOLUME_NAMES[0])
if is_flexgroup:
self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result)
else:
self.assertEqual(fake_client.VOLUME_INFO_SSC, result)
def test_create_flexvol(self):
self.mock_object(self.client.connection, 'send_request')
@ -3811,7 +3833,8 @@ class NetAppCmodeClientTestCase(test.TestCase):
'snapshot_policy': 'default',
'snapshot_reserve': '5',
'space_guarantee_type': 'none',
'volume_type': 'rw'
'volume_type': 'rw',
'is_flexgroup': False,
}
actual_prov_opts = self.client.get_provisioning_options_from_flexvol(
@ -3914,3 +3937,22 @@ class NetAppCmodeClientTestCase(test.TestCase):
mock_name.assert_called_once_with(True, 'node')
self.assertFalse(result)
def test_get_unique_volume(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_STYLE_RESPONSE)
volume_elem = netapp_api.NaElement(fake_client.VOLUME_FLEXGROUP_STYLE)
volume_id_attr = self.client.get_unique_volume(api_response)
xml_exp = str(volume_elem).replace(" ", "").replace("\n", "")
xml_res = str(volume_id_attr).replace(" ", "").replace("\n", "")
self.assertEqual(xml_exp, xml_res)
def test_get_unique_volume_raise_exception(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.get_unique_volume,
api_response)

View File

@ -334,7 +334,9 @@ SNAPSHOT = {
'volume_size': SIZE,
'volume_id': VOLUME_ID,
'volume_name': VOLUME_NAME,
'volume_type_id': 'fake_id',
'busy': False,
'id': 'fake_id'
}
VOLUME_REF = {'name': 'fake_vref_name', 'size': 42}
@ -616,6 +618,7 @@ VOLUME_GROUP = {
'id': VOLUME_GROUP_ID,
'status': 'fake_status',
'name': VG_GROUP_NAME,
'host': 'fake_host',
}
VG_CONTEXT = {}

View File

@ -54,6 +54,9 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
'pool3': {
'netapp_aggregate': 'aggr2',
},
'pool4': {
'netapp_aggregate': ['aggr1', 'aggr2'],
}
}
self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3'])
@ -165,7 +168,8 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
self.assertEqual(expected_performance_counters,
self.perf_library.performance_counters)
expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75}
expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75,
'pool4': perf_base.DEFAULT_UTILIZATION}
self.assertEqual(expected_pool_utilization,
self.perf_library.pool_utilization)
@ -202,6 +206,7 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
'pool1': perf_base.DEFAULT_UTILIZATION,
'pool2': perf_base.DEFAULT_UTILIZATION,
'pool3': perf_base.DEFAULT_UTILIZATION,
'pool4': perf_base.DEFAULT_UTILIZATION,
}
self.assertEqual(expected_pool_utilization,
self.perf_library.pool_utilization)
@ -244,6 +249,7 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
'pool1': perf_base.DEFAULT_UTILIZATION,
'pool2': perf_base.DEFAULT_UTILIZATION,
'pool3': perf_base.DEFAULT_UTILIZATION,
'pool4': perf_base.DEFAULT_UTILIZATION,
}
self.assertEqual(expected_pool_utilization,
self.perf_library.pool_utilization)
@ -285,6 +291,7 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
'pool1': perf_base.DEFAULT_UTILIZATION,
'pool2': perf_base.DEFAULT_UTILIZATION,
'pool3': perf_base.DEFAULT_UTILIZATION,
'pool4': perf_base.DEFAULT_UTILIZATION,
}
self.assertEqual(expected_pool_utilization,
self.perf_library.pool_utilization)

View File

@ -369,6 +369,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'netapp_is_flexgroup': 'false',
},
}
mock_get_ssc = self.mock_object(self.library.ssc_library,
@ -435,6 +436,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
'netapp_disk_type': 'SSD',
'replication_enabled': False,
'online_extend_support': True,
'netapp_is_flexgroup': 'false',
}]
expected[0].update({'QoS_support': cluster_credentials})

View File

@ -129,6 +129,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_volume(self, model_update):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
@ -146,6 +147,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
volume = copy.deepcopy(fake.NFS_VOLUME)
volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.assertRaises(exception.InvalidHost,
self.driver.create_volume,
@ -153,6 +155,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
@ -263,7 +266,9 @@ class NetAppNfsDriverTestCase(test.TestCase):
fake.CLONE_SOURCE,
fake.NFS_VOLUME)
def test_create_volume_from_snapshot(self):
@ddt.data(True, False)
def test_create_volume_from_snapshot(self, is_flexgroup):
provider_location = fake.POOL_NAME
volume = fake.VOLUME
expected_source = {
'name': fake.SNAPSHOT_NAME,
@ -272,22 +277,86 @@ class NetAppNfsDriverTestCase(test.TestCase):
}
mock_clone_call = self.mock_object(
self.driver, '_clone_source_to_destination_volume',
return_value='fake')
return_value=provider_location)
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.mock_object(self.driver, '_is_flexgroup',
return_value=is_flexgroup)
mock_super_create = self.mock_object(
nfs.NfsDriver, 'create_volume_from_snapshot',
return_value=provider_location)
mock_do_qos = self.mock_object(
self.driver, '_do_qos_for_file_flexgroup',
return_value=provider_location)
retval = self.driver.create_volume_from_snapshot(volume, fake.SNAPSHOT)
self.assertEqual('fake', retval)
mock_clone_call.assert_called_once_with(expected_source, volume)
self.assertEqual(provider_location, retval)
if is_flexgroup:
mock_clone_call.assert_not_called()
mock_super_create.assert_called_once_with(volume, fake.SNAPSHOT)
mock_do_qos.assert_called_once_with(volume, provider_location)
else:
mock_clone_call.assert_called_once_with(expected_source, volume)
mock_do_qos.assert_not_called()
mock_super_create.not_called()
def test_create_cloned_volume(self):
@ddt.data(True, False)
def test_create_cloned_volume(self, is_flexgroup):
provider_location = fake.POOL_NAME
volume = fake.VOLUME
src_vref = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
return_value=provider_location)
mock_clone_call = self.mock_object(
self.driver, '_clone_source_to_destination_volume',
return_value=provider_location)
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.mock_object(self.driver, '_is_flexgroup',
return_value=is_flexgroup)
mock_super_create = self.mock_object(
nfs.NfsDriver, 'create_cloned_volume',
return_value=provider_location)
mock_do_qos = self.mock_object(
self.driver, '_do_qos_for_file_flexgroup',
return_value=provider_location)
result = self.driver.create_cloned_volume(volume, src_vref)
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
src_vref)
self.assertEqual(provider_location, result)
if is_flexgroup:
mock_clone_call.assert_not_called()
mock_super_create.assert_called_once_with(volume, src_vref)
mock_do_qos.assert_called_once_with(volume, provider_location)
else:
mock_clone_call.assert_called_once_with(src_vref, volume)
mock_do_qos.assert_not_called()
mock_super_create.not_called()
def test_do_qos_for_file_flexgroup(self):
volume = {'provider_location': 'fake'}
extra_specs = 'fake_extra'
model = {'provider_location': 'fake'}
vol_model = {'replication': 'fake'}
expected_model = {
'replication': vol_model['replication'],
'provider_location': model['provider_location'],
}
self.mock_object(self.driver, '_do_qos_for_volume')
self.mock_object(self.driver, '_get_volume_model_update',
return_value=vol_model)
mock_extra = self.mock_object(na_utils, 'get_volume_extra_specs',
return_value=extra_specs)
model_updated = self.driver._do_qos_for_file_flexgroup(volume, model)
self.assertEqual(model_updated, expected_model)
mock_extra.assert_called_once_with(volume)
def test_do_qos_for_file_flexgroup_error(self):
self.mock_object(na_utils, 'get_volume_extra_specs',
side_effect=exception.NotFound)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._do_qos_for_file_flexgroup,
fake.VOLUME, 'fake_model')
def test_do_qos_for_volume(self):
self.assertRaises(NotImplementedError,
@ -295,18 +364,81 @@ class NetAppNfsDriverTestCase(test.TestCase):
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
def test_create_snapshot(self):
@ddt.data(True, False)
def test_create_snapshot(self, is_flexgroup):
self.mock_object(self.driver, '_is_flexgroup',
return_value=is_flexgroup)
mock_clone_backing_file_for_volume = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_snap_flexgroup = self.mock_object(
self.driver, '_create_snapshot_for_flexgroup')
self.driver.create_snapshot(fake.SNAPSHOT)
mock_clone_backing_file_for_volume.assert_called_once_with(
fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'],
fake.SNAPSHOT['volume_id'], is_snapshot=True)
if is_flexgroup:
mock_snap_flexgroup.assert_called_once_with(fake.SNAPSHOT)
mock_clone_backing_file_for_volume.assert_not_called()
else:
mock_snap_flexgroup.assert_not_called()
mock_clone_backing_file_for_volume.assert_called_once_with(
fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'],
fake.SNAPSHOT['volume_id'], is_snapshot=True)
def test_delete_snapshot(self):
def test_create_snapshot_for_flexgroup(self):
source_vol = {
'id': fake.SNAPSHOT['volume_id'],
'name': fake.SNAPSHOT['volume_name'],
'volume_type_id': fake.SNAPSHOT['volume_type_id'],
}
snap_vol = {
'name': '%s.%s' % (fake.SNAPSHOT['volume_name'],
fake.SNAPSHOT['id']),
'host': fake.HOST_NAME,
}
mock_super_snapshot = self.mock_object(nfs.NfsDriver,
'create_snapshot')
mock_extra_specs = self.mock_object(na_utils,
'get_volume_extra_specs')
mock_extra_specs.return_value = fake.EXTRA_SPECS
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
mock_get_host = self.mock_object(self.driver,
'_get_volume_host')
mock_get_host.return_value = fake.HOST_NAME
mock_set_policy = self.mock_object(self.driver,
'_set_qos_policy_group_on_volume')
self.driver._create_snapshot_for_flexgroup(fake.SNAPSHOT)
mock_super_snapshot.assert_has_calls([
mock.call(fake.SNAPSHOT)])
mock_get_host.assert_has_calls([
mock.call(source_vol['id'])])
mock_extra_specs.assert_has_calls([
mock.call(source_vol)])
mock_get_info.assert_has_calls([
mock.call(source_vol, fake.EXTRA_SPECS)])
mock_set_policy.assert_has_calls([
mock.call(snap_vol, fake.QOS_POLICY_GROUP_INFO, False)])
def test_create_snapshot_for_flexgroup_error(self):
self.mock_object(nfs.NfsDriver, 'create_snapshot',
side_effect=exception.NotFound)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._create_snapshot_for_flexgroup,
fake.SNAPSHOT)
def test_set_qos_policy_group_on_volume(self):
self.assertRaises(NotImplementedError,
self.driver._set_qos_policy_group_on_volume,
fake.NFS_VOLUME,
fake.QOS_POLICY_GROUP_INFO,
False)
@ddt.data(True, False)
def test_delete_snapshot(self, is_flexgroup):
updates = {
'name': fake.SNAPSHOT_NAME,
'volume_size': fake.SIZE,
@ -316,11 +448,20 @@ class NetAppNfsDriverTestCase(test.TestCase):
}
snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, **updates)
self.mock_object(self.driver, '_delete_file')
self.mock_object(self.driver, '_is_flexgroup',
return_value=is_flexgroup)
mock_super_delete = self.mock_object(nfs.NfsDriver,
'delete_snapshot')
self.driver.delete_snapshot(snapshot)
self.driver._delete_file.assert_called_once_with(snapshot.volume_id,
snapshot.name)
if is_flexgroup:
mock_super_delete.assert_called_once_with(snapshot)
self.driver._delete_file.assert_not_called()
else:
mock_super_delete.assert_not_called()
self.driver._delete_file.assert_called_once_with(
snapshot.volume_id, snapshot.name)
@ddt.data(fake.NFS_SHARE, fake.NFS_SHARE_IPV6)
def test__get_volume_location(self, provider):
@ -339,6 +480,10 @@ class NetAppNfsDriverTestCase(test.TestCase):
fake.VOLUME_NAME, fake.CLONE_SOURCE_NAME,
fake.VOLUME_ID, share=None)
def test__is_flexgroup(self):
self.assertRaises(NotImplementedError,
self.driver._is_flexgroup)
def test__get_provider_location(self):
updates = {'provider_location': fake.PROVIDER_LOCATION}
volume = fake_volume.fake_volume_obj(self.ctxt, **updates)
@ -348,6 +493,15 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual(fake.PROVIDER_LOCATION, retval)
def test__get_volume_host(self):
updates = {'host': fake.HOST_NAME}
volume = fake_volume.fake_volume_obj(self.ctxt, **updates)
self.mock_object(self.driver.db, 'volume_get', return_value=volume)
retval = self.driver._get_volume_host(fake.VOLUME_ID)
self.assertEqual(fake.HOST_NAME, retval)
@ddt.data(None, processutils.ProcessExecutionError)
def test__volume_not_present(self, side_effect):
self.mock_object(self.driver, '_get_volume_path')
@ -382,6 +536,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_copy_image_to_volume_base_exception(self):
mock_info_log = self.mock_object(nfs_base.LOG, 'info')
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume',
side_effect=exception.NfsException)
@ -393,6 +548,9 @@ class NetAppNfsDriverTestCase(test.TestCase):
def test_copy_image_to_volume(self):
mock_log = self.mock_object(nfs_base, 'LOG')
self.mock_object(self.driver, '_is_flexgroup',
return_value=False)
self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg')
mock_copy_image = self.mock_object(
remotefs.RemoteFSDriver, 'copy_image_to_volume')
mock_register_image = self.mock_object(
@ -955,3 +1113,37 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertFalse(cloned)
mock_call__is_share_clone_compatible.assert_not_called()
mock_call__do_clone_rel_img_cache.assert_not_called()
def test__find_share(self):
mock_extract = self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
pool_name = self.driver._find_share(fake.VOLUME)
self.assertEqual(pool_name, fake.POOL_NAME)
mock_extract.assert_called_once_with(fake.VOLUME['host'],
level='pool')
def test__find_share_error(self):
mock_extract = self.mock_object(volume_utils, 'extract_host',
return_value=None)
self.assertRaises(exception.InvalidHost,
self.driver._find_share,
fake.VOLUME)
mock_extract.assert_called_once_with(fake.VOLUME['host'],
level='pool')
def test__ensure_flexgroup_not_in_cg_raises(self):
self.mock_object(self.driver, '_is_flexgroup',
return_value=True)
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
fake_v1 = {
'group': 'fake_group',
'host': 'fake_host',
'id': 'fake_id'
}
self.assertRaises(na_utils.NetAppDriverException,
self.driver._ensure_flexgroup_not_in_cg,
fake_v1)

View File

@ -155,11 +155,16 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertEqual(1, mock_debug_log.call_count)
self.assertEqual(expected_stats, self.driver._stats)
@ddt.data({'replication_backends': [], 'cluster_credentials': False},
@ddt.data({'replication_backends': [],
'cluster_credentials': False, 'is_fg': False},
{'replication_backends': ['target_1', 'target_2'],
'cluster_credentials': True})
'cluster_credentials': True, 'is_fg': False},
{'replication_backends': ['target_1', 'target_2'],
'cluster_credentials': True, 'is_fg': True}
)
@ddt.unpack
def test_get_pool_stats(self, replication_backends, cluster_credentials):
def test_get_pool_stats(self, replication_backends, cluster_credentials,
is_fg):
self.driver.using_cluster_credentials = cluster_credentials
self.driver.zapi_client = mock.Mock()
ssc = {
@ -171,10 +176,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'netapp_aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'netapp_aggregate': ['aggr1'] if is_fg else 'aggr1',
'netapp_raid_type': ['raid_dp'] if is_fg else 'raid_dp',
'netapp_disk_type': ['SSD'] if is_fg else 'SSD',
'consistent_group_snapshot_enabled': True,
'netapp_is_flexgroup': 'true' if is_fg else 'false',
},
}
mock_get_ssc = self.mock_object(self.driver.ssc_library,
@ -246,13 +252,14 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'consistent_group_snapshot_enabled': True,
'replication_enabled': False,
'online_extend_support': False,
'netapp_is_flexgroup': 'false',
}]
expected[0].update({'QoS_support': cluster_credentials})
if not cluster_credentials:
expected[0].update({
'netapp_aggregate_used_percent': 0,
'netapp_dedupe_used_percent': 0
'netapp_dedupe_used_percent': 0,
})
if replication_backends:
@ -263,6 +270,17 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'replication_type': 'async',
})
if is_fg:
expected[0].update({
'netapp_is_flexgroup': 'true',
'netapp_disk_type': ['SSD'],
'netapp_raid_type': ['raid_dp'],
'netapp_aggregate': ['aggr1'],
'netapp_dedupe_used_percent': 0,
'consistencygroup_support': False,
'consistent_group_snapshot_enabled': False,
})
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
if cluster_credentials:
@ -538,15 +556,20 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_volume')
mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=False)
self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME)
mock_flexgroup.assert_called_once_with(host=fake.NFS_VOLUME['host'])
mock_filer_delete.assert_called_once_with(
fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name'])
self.assertEqual(0, mock_super_delete.call_count)
@ddt.data(True, False)
def test_delete_backing_file_for_volume_exception_path(self, super_exc):
mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=False)
mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception')
exception_call_count = 2 if super_exc else 1
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
@ -558,21 +581,28 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME)
mock_flexgroup.assert_called_once_with(host=fake.NFS_VOLUME['host'])
mock_filer_delete.assert_called_once_with(
fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name'])
mock_super_delete.assert_called_once_with(fake.NFS_VOLUME)
self.assertEqual(exception_call_count, mock_exception_log.call_count)
def test_delete_snapshot(self):
mock_get_location = self.mock_object(self.driver,
'_get_provider_location')
mock_get_location.return_value = fake.PROVIDER_LOCATION
@ddt.data(True, False)
def test_delete_snapshot(self, is_flexgroup):
mock_delete_backing = self.mock_object(
self.driver, '_delete_backing_file_for_snapshot')
self.mock_object(self.driver, '_is_flexgroup',
return_value=is_flexgroup)
mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver,
'delete_snapshot')
self.driver.delete_snapshot(fake.test_snapshot)
mock_delete_backing.assert_called_once_with(fake.test_snapshot)
if is_flexgroup:
mock_super_delete.assert_called_once_with(fake.test_snapshot)
mock_delete_backing.assert_not_called()
else:
mock_super_delete.assert_not_called()
mock_delete_backing.assert_called_once_with(fake.test_snapshot)
def test_delete_backing_file_for_snapshot(self):
mock_filer_delete = self.mock_object(self.driver, '_delete_file')
@ -1199,6 +1229,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
nfs_base.NetAppNfsDriver._direct_nfs_clone = mock.Mock(
return_value=False)
drv._copy_from_cache = mock.Mock(return_value=True)
drv._is_flexgroup = mock.Mock(return_value=False)
drv.clone_image(context, volume, image_location, image_meta,
image_service)
@ -1206,6 +1237,20 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._copy_from_cache.assert_called_once_with(
volume, image_id, [('share', 'img')])
drv.clone_image(context, volume, image_location, image_meta,
image_service)
def test_clone_image_flexgroup(self):
self.driver._is_flexgroup = mock.Mock(return_value=True)
volume = {'host': 'openstack@nfscmode#192.128.1.1:/mnt_point'}
context = object()
model, cloned = self.driver.clone_image(
context, volume, 'fake_loc', 'fake_img', 'fake_img_service')
self.assertFalse(cloned)
self.assertIsNone(model)
self.driver._is_flexgroup.assert_called_once_with(host=volume['host'])
def test_clone_image_copyoffload_from_img_service(self):
drv = self.driver
context = object()
@ -1225,6 +1270,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
nfs_base.NetAppNfsDriver._post_clone_image = mock.Mock(
return_value=True)
drv._copy_from_img_service = mock.Mock(return_value=True)
drv._is_flexgroup = mock.Mock(return_value=False)
retval = drv.clone_image(
context, volume, image_location, image_meta, image_service)
@ -1239,7 +1285,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_log = self.mock_object(nfs_cmode, 'LOG')
drv = self.driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
volume = {'id': 'vol_id', 'name': 'name', 'host': 'host'}
image_service = object()
image_id = 'image_id'
image_meta = {'id': image_id}
@ -1251,6 +1297,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
nfs_base.NetAppNfsDriver._direct_nfs_clone = mock.Mock(
return_value=False)
drv._copy_from_img_service = mock.Mock(side_effect=Exception())
drv._is_flexgroup = mock.Mock(return_value=False)
retval = drv.clone_image(
context, volume, image_location, image_meta, image_service)
@ -1477,19 +1524,60 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_ssc_library.assert_called_once_with()
def test_create_group(self):
mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=False)
self.mock_object(volume_utils,
'is_group_a_cg_snapshot_type',
return_value=False)
model_update = self.driver.create_group(
fake.VG_CONTEXT, fake.VOLUME_GROUP)
self.assertEqual('available', model_update['status'])
mock_flexgroup.assert_called_once_with(host=fake.VOLUME_GROUP['host'])
def test_create_group_raises(self):
mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=True)
mock_is_cg = self.mock_object(volume_utils,
'is_group_a_cg_snapshot_type',
return_value=True)
self.assertRaises(
na_utils.NetAppDriverException,
self.driver.create_group,
fake.VG_CONTEXT, fake.VOLUME_GROUP)
mock_flexgroup.assert_called_once_with(host=fake.VOLUME_GROUP['host'])
mock_is_cg.assert_called_once_with(fake.VOLUME_GROUP)
def test_update_group(self):
mock_is_cg = self.mock_object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(fake.VG_CONTEXT, "foo"))
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
mock_is_cg.assert_called_once_with("foo")
def test_update_group_raises(self):
mock_is_cg = self.mock_object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
mock_is_flexgroup = self.mock_object(
self.driver, '_is_flexgroup',
return_value=True)
self.assertRaises(
na_utils.NetAppDriverException,
self.driver.update_group,
fake.VG_CONTEXT,
"foo",
add_volumes=[fake.VOLUME])
mock_is_cg.assert_called_once_with("foo")
mock_is_flexgroup.assert_called_once_with(host=fake.VOLUME['host'])
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
@ -1528,6 +1616,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.mock_object(self.driver, '_get_volume_model_update',
return_value=volume_model_update)
mock_is_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=False)
model_update, volumes_model_update = (
self.driver.create_group_from_src(
@ -1541,6 +1631,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
}]
if volume_model_update:
expected_volumes_model_updates[0].update(volume_model_update)
mock_is_flexgroup.assert_called_once_with(
host=fake.SOURCE_VG_VOLUME['host'])
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.SOURCE_VG_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
@ -1555,6 +1648,25 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertIsNone(model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
@ddt.data(
{'error': na_utils.NetAppDriverException, 'is_cg': True},
{'error': NotImplementedError, 'is_cg': False})
@ddt.unpack
def test_create_group_from_src_raises(self, error, is_cg):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=is_cg)
mock_is_flexgroup = self.mock_object(self.driver, '_is_flexgroup',
return_value=True)
self.assertRaises(
error, self.driver.create_group_from_src,
fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME],
source_group=fake.VOLUME_GROUP,
sorted_source_vols=[fake.SOURCE_VG_VOLUME])
mock_is_flexgroup.assert_called_once_with(
host=fake.SOURCE_VG_VOLUME['host'])
def test_create_group_from_src_invalid_parms(self):
model_update, volumes_model_update = (
self.driver.create_group_from_src(
@ -1567,10 +1679,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
volume_utils, 'is_group_a_cg_snapshot_type', return_value=True)
mock__get_flexvol_names = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
self.mock_object(self.driver, '_is_flexgroup', return_value=False)
self.mock_object(self.driver.zapi_client, 'create_cg_snapshot',
side_effect=netapp_api.NaApiError)
self.assertRaises(na_utils.NetAppDriverException,
self.driver.create_group_snapshot,
fake.VG_CONTEXT,
@ -1584,8 +1696,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
def test_create_group_snapshot(self):
mock_is_cg_snapshot = self.mock_object(
volume_utils, 'is_group_a_cg_snapshot_type', return_value=False)
mock__clone_backing_file_for_volume = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_create_snapshot = self.mock_object(
self.driver, 'create_snapshot')
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(fake.VG_CONTEXT,
@ -1595,9 +1707,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP)
mock__clone_backing_file_for_volume.assert_called_once_with(
fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'],
fake.SNAPSHOT['volume_id'], is_snapshot=True)
mock_create_snapshot.assert_called_once_with(fake.SNAPSHOT)
def test_create_consistent_group_snapshot(self):
mock_is_cg_snapshot = self.mock_object(
@ -1611,6 +1721,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
mock_is_flexgroup = self.mock_object(
self.driver, '_is_flexgroup')
mock_is_flexgroup.return_value = False
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(fake.VG_CONTEXT,
@ -1619,6 +1732,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock_is_flexgroup.assert_called_once_with(
host=fake.VG_SNAPSHOT['volume']['host'])
mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP)
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.VG_SNAPSHOT['volume']['host']])
@ -1633,9 +1748,29 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.VG_POOL_NAME, fake.VOLUME_GROUP_ID)
def test_create_consistent_group_snapshot_flexgroup(self):
mock_is_cg_snapshot = self.mock_object(
volume_utils, 'is_group_a_cg_snapshot_type', return_value=True)
mock_is_flexgroup = self.mock_object(
self.driver, '_is_flexgroup')
mock_is_flexgroup.return_value = True
self.assertRaises(na_utils.NetAppDriverException,
self.driver.create_group_snapshot,
fake.VG_CONTEXT,
fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT])
mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP)
mock_is_flexgroup.assert_called_once_with(
host=fake.VG_SNAPSHOT['volume']['host'])
def test_create_group_snapshot_busy_snapshot(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
mock_is_flexgroup = self.mock_object(
self.driver, '_is_flexgroup')
mock_is_flexgroup.return_value = False
self.driver.zapi_client = mock.Mock()
snapshot = fake.VG_SNAPSHOT
snapshot['volume'] = fake.VG_VOLUME
@ -1655,6 +1790,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
mock_is_flexgroup.assert_called_once_with(
host=snapshot['volume']['host'])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.VG_POOL_NAME]), fake.VG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
@ -1667,7 +1804,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.VG_POOL_NAME, fake.VG_SNAPSHOT_ID)
def test_delete_group_volume_delete_failure(self):
self.mock_object(self.driver, '_delete_file', side_effect=Exception)
self.mock_object(self.driver, 'delete_volume', side_effect=Exception)
model_update, volumes = self.driver.delete_group(
fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME])
@ -1677,12 +1814,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
def test_delete_group(self):
mock_delete_file = self.mock_object(
self.driver, '_delete_file')
self.driver, 'delete_volume')
model_update, volumes = self.driver.delete_group(
fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
mock_delete_file.assert_called_once_with(
fake.VG_VOLUME_ID, fake.VG_VOLUME_NAME)
mock_delete_file.assert_called_once_with(fake.VG_VOLUME)

View File

@ -44,6 +44,7 @@ SSC = {
'netapp_flexvol_encryption': 'true',
'netapp_qos_min_support': 'true',
'pool_name': 'volume1',
'netapp_is_flexgroup': 'false',
},
'volume2': {
'thick_provisioning_support': False,
@ -59,6 +60,7 @@ SSC = {
'netapp_flexvol_encryption': 'false',
'netapp_qos_min_support': 'false',
'pool_name': 'volume2',
'netapp_is_flexgroup': 'false',
},
}
@ -68,12 +70,14 @@ SSC_FLEXVOL_INFO = {
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_aggregate': 'aggr1',
'netapp_is_flexgroup': 'false',
},
'volume2': {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_thin_provisioned': 'true',
'netapp_aggregate': 'aggr2',
'netapp_is_flexgroup': 'false',
},
}
@ -140,6 +144,7 @@ PROVISIONING_OPTS = {
'snapshot_reserve': '12',
'volume_type': 'rw',
'size': 20,
'is_flexgroup': False,
}
ENCRYPTED_PROVISIONING_OPTS = {
@ -153,6 +158,7 @@ ENCRYPTED_PROVISIONING_OPTS = {
'volume_type': 'rw',
'size': 20,
'encrypt': 'true',
'is_flexgroup': False,
}

View File

@ -171,7 +171,8 @@ class CapabilitiesLibraryTestCase(test.TestCase):
mock_get_ssc_mirror_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_aggregate_info.assert_has_calls([
mock.call('aggr1'), mock.call('aggr2')])
mock.call('aggr1', is_flexgroup=False),
mock.call('aggr2', is_flexgroup=False)])
mock_get_ssc_encryption_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_qos_min_info.assert_has_calls([
@ -206,6 +207,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_aggregate': 'fake_aggr1',
'netapp_is_flexgroup': 'false',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
@ -233,6 +235,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'thick_provisioning_support': lun_space_guarantee,
'thin_provisioning_support': not lun_space_guarantee,
'netapp_aggregate': 'fake_aggr1',
'netapp_is_flexgroup': 'false',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
@ -258,6 +261,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_aggregate': 'fake_aggr1',
'netapp_is_flexgroup': 'false',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
@ -286,6 +290,7 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'thick_provisioning_support': not nfs_sparsed_volumes,
'thin_provisioning_support': nfs_sparsed_volumes,
'netapp_aggregate': 'fake_aggr1',
'netapp_is_flexgroup': 'false',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
@ -347,8 +352,12 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.zapi_client.is_flexvol_mirrored.assert_called_once_with(
fake_client.VOLUME_NAMES[0], fake.SSC_VSERVER)
@ddt.data([], ['netapp_raid_type'])
def test_get_ssc_aggregate_info(self, invalid_extra_specs):
@ddt.data({'invalid_extra_specs': [], 'is_fg': False},
{'invalid_extra_specs': ['netapp_raid_type'],
'is_fg': False},
{'invalid_extra_specs': [], 'is_fg': True})
@ddt.unpack
def test_get_ssc_aggregate_info(self, invalid_extra_specs, is_fg):
self.ssc_library.invalid_extra_specs = invalid_extra_specs
self.mock_object(
self.ssc_library.zapi_client, 'get_aggregate',
@ -357,8 +366,12 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.ssc_library.zapi_client, 'get_aggregate_disk_types',
return_value=fake_client.AGGREGATE_DISK_TYPES)
result = self.ssc_library._get_ssc_aggregate_info(
fake_client.VOLUME_AGGREGATE_NAME)
aggr_name = fake_client.VOLUME_AGGREGATE_NAME
if is_fg:
aggr_name = [fake_client.VOLUME_AGGREGATE_NAME]
result = self.ssc_library._get_ssc_aggregate_info(aggr_name,
is_flexgroup=is_fg)
if invalid_extra_specs:
expected = {
@ -376,6 +389,17 @@ class CapabilitiesLibraryTestCase(test.TestCase):
'netapp_hybrid_aggregate': 'true',
'netapp_node_name': fake_client.NODE_NAME,
}
if is_fg:
result['netapp_disk_type'] = sorted(
result['netapp_disk_type'])
expected['netapp_disk_type'] = sorted(
expected['netapp_disk_type'])
expected['netapp_raid_type'] = [
fake_client.AGGREGATE_RAID_TYPE]
expected['netapp_node_name'] = [
fake_client.NODE_NAME]
expected['netapp_hybrid_aggregate'] = ['true']
self.zapi_client.get_aggregate.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
self.zapi_client.get_aggregate_disk_types.assert_called_once_with(
@ -543,3 +567,19 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.assertEqual(expected, result)
self.zapi_client.is_qos_min_supported.assert_called_once_with(False,
'node')
@ddt.data(False, True)
def test_get_ssc_qos_min_info_flexgroup(self, qos_min_support):
self.mock_object(
self.ssc_library.zapi_client, 'is_qos_min_supported',
return_value=qos_min_support)
result = self.ssc_library._get_ssc_qos_min_info(['node'])
expected = {
'netapp_qos_min_support': 'true' if qos_min_support else 'false',
}
self.assertEqual(expected, result)
self.zapi_client.is_qos_min_supported.assert_called_once_with(False,
'node')

View File

@ -504,14 +504,21 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
self.dest_flexvol_name)
@ddt.data({'size': 1, 'aggr_map': {}},
{'size': 1, 'aggr_map': {'aggr02': 'aggr20'}},
{'size': None, 'aggr_map': {'aggr01': 'aggr10'}})
@ddt.data({'size': 1, 'aggr_map': {},
'is_flexgroup': False},
{'size': 1, 'aggr_map': {'aggr02': 'aggr20'},
'is_flexgroup': False},
{'size': None, 'aggr_map': {'aggr01': 'aggr10'},
'is_flexgroup': False},
{'size': 1, 'aggr_map': {'aggr01': 'aggr10'},
'is_flexgroup': True})
@ddt.unpack
def test_create_destination_flexvol_exception(self, size, aggr_map):
def test_create_destination_flexvol_exception(self, size, aggr_map,
is_flexgroup):
self.mock_object(
self.mock_src_client, 'get_provisioning_options_from_flexvol',
return_value={'size': size, 'aggregate': 'aggr01'})
return_value={'size': size, 'aggregate': 'aggr01',
'is_flexgroup': is_flexgroup})
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
return_value=aggr_map)
mock_client_call = self.mock_object(
@ -521,10 +528,10 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
self.dm_mixin.create_destination_flexvol,
self.src_backend, self.dest_backend,
self.src_flexvol_name, self.dest_flexvol_name)
if size:
if size and is_flexgroup is False:
self.dm_mixin._get_replication_aggregate_map.\
assert_called_once_with(self.src_backend, self.dest_backend)
else:
elif is_flexgroup is False:
self.assertFalse(
self.dm_mixin._get_replication_aggregate_map.called)
self.assertFalse(mock_client_call.called)
@ -539,6 +546,7 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
expected_prov_opts.pop('volume_type', None)
expected_prov_opts.pop('size', None)
expected_prov_opts.pop('aggregate', None)
expected_prov_opts.pop('is_flexgroup', None)
mock_get_provisioning_opts_call = self.mock_object(
self.mock_src_client, 'get_provisioning_options_from_flexvol',
return_value=provisioning_opts)
@ -575,6 +583,7 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
expected_prov_opts.pop('volume_type', None)
expected_prov_opts.pop('size', None)
expected_prov_opts.pop('aggregate', None)
expected_prov_opts.pop('is_flexgroup', None)
mock_get_provisioning_opts_call = self.mock_object(
self.mock_src_client, 'get_provisioning_options_from_flexvol',
return_value=provisioning_opts)

View File

@ -848,9 +848,8 @@ class Client(client_base.Client):
result = self._invoke_vserver_api(vol_iter, vserver)
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
attr_list = result.get_child_by_name('attributes-list')
vols = attr_list.get_children()
vol_id = vols[0].get_child_by_name('volume-id-attributes')
volume_attr = self.get_unique_volume(result)
vol_id = volume_attr.get_child_by_name('volume-id-attributes')
return vol_id.get_child_content('name')
msg_fmt = {'vserver': vserver, 'junction': junction}
raise exception.NotFound(_("No volume on cluster with vserver "
@ -1019,6 +1018,9 @@ class Client(client_base.Client):
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'style-extended': None,
},
'volume-space-attributes': {
'size-available': None,
'size-total': None,
@ -1028,14 +1030,12 @@ class Client(client_base.Client):
}
result = self.send_iter_request('volume-get-iter', api_args)
if self._get_record_count(result) != 1:
if self._get_record_count(result) < 1:
msg = _('Volume %s not found.')
msg_args = flexvol_path or flexvol_name
raise na_utils.NetAppDriverException(msg % msg_args)
attributes_list = result.get_child_by_name('attributes-list')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes')
volume_attributes = self.get_unique_volume(result)
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
@ -1049,6 +1049,26 @@ class Client(client_base.Client):
'size-available': size_available,
}
def get_unique_volume(self, get_volume_result):
"""Get the unique FlexVol or FleGroup volume from a get volume list"""
volume_list = []
attributes_list = get_volume_result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for volume_attributes in attributes_list.get_children():
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
style = volume_id_attributes.get_child_content('style-extended')
if style == 'flexvol' or style == 'flexgroup':
volume_list.append(volume_attributes)
if len(volume_list) != 1:
msg = _('Could not find unique volume. Volumes foud: %(vol)s.')
msg_args = {'vol': volume_list}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
return volume_list[0]
def list_flexvols(self):
"""Returns the names of the flexvols on the controller."""
@ -1120,8 +1140,12 @@ class Client(client_base.Client):
'name': None,
'owning-vserver-name': None,
'junction-path': None,
'aggr-list': {
'aggr-name': None,
},
'containing-aggregate-name': None,
'type': None,
'style-extended': None,
},
'volume-mirror-attributes': {
'is-data-protection-mirror': None,
@ -1147,19 +1171,19 @@ class Client(client_base.Client):
}
result = self.send_iter_request('volume-get-iter', api_args)
if self._get_record_count(result) != 1:
msg = _('Could not find unique volume %(vol)s.')
msg_args = {'vol': flexvol_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes') or netapp_api.NaElement('none')
volume_attributes = self.get_unique_volume(result)
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
aggr = volume_id_attributes.get_child_content(
'containing-aggregate-name')
if not aggr:
aggr_list_attr = volume_id_attributes.get_child_by_name(
'aggr-list') or netapp_api.NaElement('none')
aggr = [aggr_elem.get_content()
for aggr_elem in
aggr_list_attr.get_children()]
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes') or netapp_api.NaElement('none')
volume_qos_attributes = volume_attributes.get_child_by_name(
@ -1175,8 +1199,7 @@ class Client(client_base.Client):
'owning-vserver-name'),
'junction-path': volume_id_attributes.get_child_content(
'junction-path'),
'aggregate': volume_id_attributes.get_child_content(
'containing-aggregate-name'),
'aggregate': aggr,
'type': volume_id_attributes.get_child_content('type'),
'space-guarantee-enabled': strutils.bool_from_string(
volume_space_attributes.get_child_content(
@ -1193,6 +1216,9 @@ class Client(client_base.Client):
'snapshot-policy'),
'language': volume_language_attributes.get_child_content(
'language-code'),
'style-extended': volume_id_attributes.get_child_content(
'style-extended'),
}
return volume
@ -2387,6 +2413,7 @@ class Client(client_base.Client):
'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'],
'volume_type': flexvol_info['type'],
'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)),
'is_flexgroup': flexvol_info['style-extended'] == 'flexgroup',
}
return provisioning_opts

View File

@ -139,6 +139,7 @@ class NetAppNfsDriver(driver.ManageableVD,
:param volume: volume reference
"""
LOG.debug('create_volume on %s', volume['host'])
self._ensure_flexgroup_not_in_cg(volume)
self._ensure_shares_mounted()
# get share as pool name
@ -172,21 +173,62 @@ class NetAppNfsDriver(driver.ManageableVD,
'vol': volume['name'], 'pool': pool_name})
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
'id': snapshot['volume_id'],
}
return self._clone_source_to_destination_volume(source, volume)
"""Creates a volume from a snapshot.
For a FlexGroup pool, the operation relies on the NFS generic driver
because the ONTAP clone file is not supported by FlexGroup yet.
"""
self._ensure_flexgroup_not_in_cg(volume)
if self._is_flexgroup(vol_id=snapshot['volume_id']):
model = super(NetAppNfsDriver, self).create_volume_from_snapshot(
volume, snapshot)
return self._do_qos_for_file_flexgroup(volume, model)
else:
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
'id': snapshot['volume_id'],
}
return self._clone_source_to_destination_volume(source, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
source = {'name': src_vref['name'],
'size': src_vref['size'],
'id': src_vref['id']}
"""Creates a clone of the specified volume.
return self._clone_source_to_destination_volume(source, volume)
For a FlexGroup pool, the operation relies on the NFS generic driver
because the ONTAP clone file is not supported by FlexGroup yet.
"""
self._ensure_flexgroup_not_in_cg(volume)
if self._is_flexgroup(vol_id=src_vref['id']):
model = super(NetAppNfsDriver, self).create_cloned_volume(
volume, src_vref)
return self._do_qos_for_file_flexgroup(volume, model)
else:
source = {'name': src_vref['name'],
'size': src_vref['size'],
'id': src_vref['id']}
return self._clone_source_to_destination_volume(source, volume)
def _do_qos_for_file_flexgroup(self, volume, model):
"""Creates the QoS for a file inside the FlexGroup."""
try:
extra_specs = na_utils.get_volume_extra_specs(volume)
volume['provider_location'] = model['provider_location']
self._do_qos_for_volume(volume, extra_specs)
model_update = (
self._get_volume_model_update(volume) or {})
model_update['provider_location'] = model[
'provider_location']
return model_update
except Exception as e:
LOG.exception('Exception while setting the QoS for the %(vol_id)s'
' volume inside a FlexGroup pool. Exception: '
' %(exc)s',
{'vol_id': volume['id'], 'exc': e})
msg = _("Volume %s could not set QoS.")
raise exception.VolumeBackendAPIException(data=msg % volume['id'])
def _clone_source_to_destination_volume(self, source, destination_volume):
share = self._get_volume_location(source['id'])
@ -259,15 +301,69 @@ class NetAppNfsDriver(driver.ManageableVD,
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self._clone_backing_file_for_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'],
is_snapshot=True)
"""Creates a snapshot.
For a FlexGroup pool, the operation relies on the NFS generic driver
because the ONTAP clone file is not supported by FlexGroup yet.
"""
if self._is_flexgroup(vol_id=snapshot['volume_id']):
self._create_snapshot_for_flexgroup(snapshot)
else:
self._clone_backing_file_for_volume(snapshot['volume_name'],
snapshot['name'],
snapshot['volume_id'],
is_snapshot=True)
def _create_snapshot_for_flexgroup(self, snapshot):
"""Creates the snapshot falling back to the Generic NFS driver.
The generic NFS driver snapshot creates a new file which is gonna be
the active one (used to attach). So, it must assign the QoS to this
new file too. It does not require to create the policy group, though,
only reusing the created one for the source volume.
"""
try:
super(NetAppNfsDriver, self).create_snapshot(snapshot)
source_vol = {
'id': snapshot['volume_id'],
'name': snapshot['volume_name'],
'volume_type_id': snapshot['volume_type_id'],
}
extra_specs = na_utils.get_volume_extra_specs(source_vol)
qos_policy_group_is_adaptive = volume_utils.is_boolean_str(
extra_specs.get('netapp:qos_policy_group_is_adaptive'))
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
source_vol, extra_specs)
snap_vol = {
'name': '%s.%s' % (snapshot['volume_name'], snapshot['id']),
'host': self._get_volume_host(source_vol['id'])
}
self._set_qos_policy_group_on_volume(snap_vol,
qos_policy_group_info,
qos_policy_group_is_adaptive)
except Exception as e:
LOG.exception('Exception while creating the %(snap_id)s snapshot'
' of the %(vol_id)s volume inside a FlexGroup pool.'
' Exception: %(exc)s',
{'snap_id': snapshot['id'],
'vol_id': snapshot['volume_id'],
'exc': e})
msg = _("Snapshot could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg)
def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info,
qos_policy_group_is_adaptive):
"""Set the qos policy group for a volume"""
raise NotImplementedError()
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self._delete_file(snapshot.volume_id, snapshot.name)
if self._is_flexgroup(vol_id=snapshot.volume_id):
super(NetAppNfsDriver, self).delete_snapshot(snapshot)
else:
self._delete_file(snapshot.volume_id, snapshot.name)
def _delete_file(self, file_id, file_name):
nfs_share = self._get_provider_location(file_id)
@ -299,6 +395,10 @@ class NetAppNfsDriver(driver.ManageableVD,
"""Clone backing file for Cinder volume."""
raise NotImplementedError()
def _is_flexgroup(self, vol_id=None, host=None):
"""Discover if a given volume is a FlexGroup or not"""
raise NotImplementedError()
def _get_backing_flexvol_names(self):
"""Returns backing flexvol names."""
raise NotImplementedError()
@ -312,6 +412,11 @@ class NetAppNfsDriver(driver.ManageableVD,
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_volume_host(self, volume_id):
"""Returns volume host for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.host
def _volume_not_present(self, nfs_mount, volume_name):
"""Check if volume exists."""
try:
@ -366,11 +471,16 @@ class NetAppNfsDriver(driver.ManageableVD,
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
self._ensure_flexgroup_not_in_cg(volume)
super(NetAppNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info('Copied image to volume %s using regular download.',
volume['id'])
self._register_image_in_cache(volume, image_id)
if not self._is_flexgroup(host=volume['host']):
# NOTE(felipe_rodrigues): FlexGroup does not support FlexClone
# file, so the NetApp image cache cannot be used.
self._register_image_in_cache(volume, image_id)
def _register_image_in_cache(self, volume, image_id):
"""Stores image in the cache."""
@ -531,6 +641,12 @@ class NetAppNfsDriver(driver.ManageableVD,
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
if self._is_flexgroup(host=volume['host']):
# NOTE(felipe_rodrigues): FlexGroup does not support FlexClone
# file, so the clone_image cannot be used together with the Netapp
# cache. Instead, it can use the core cache implementation.
return None, False
image_id = image_meta['id']
cloned = False
post_clone = False
@ -1076,3 +1192,26 @@ class NetAppNfsDriver(driver.ManageableVD,
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info('Cinder NFS volume with current path "%(cr)s" is '
'no longer being managed.', {'cr': vol_path})
def _find_share(self, volume):
"""Returns the NFS share for the created volume.
The method is used by base class to determine the
provider_location share of the new volume.
:param volume: the volume to be created.
"""
pool_name = volume_utils.extract_host(volume['host'], level='pool')
if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
return pool_name
def _ensure_flexgroup_not_in_cg(self, volume):
if (self._is_flexgroup(host=volume['host']) and volume['group'] and
volume_utils.is_group_a_cg_snapshot_type(volume['group'])):
msg = _("Cannot create %s volume on FlexGroup pool with "
"consistency group.")
raise na_utils.NetAppDriverException(msg % volume['id'])

View File

@ -284,12 +284,17 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
pool['multiattach'] = True
pool['online_extend_support'] = False
is_flexgroup = ssc_vol_info.get('netapp_is_flexgroup') == 'true'
if is_flexgroup:
pool['consistencygroup_support'] = False
pool['consistent_group_snapshot_enabled'] = False
# Add up-to-date capacity info
nfs_share = ssc_vol_info['pool_name']
capacity = self._get_share_capacity_info(nfs_share)
pool.update(capacity)
if self.using_cluster_credentials:
if self.using_cluster_credentials and not is_flexgroup:
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
else:
@ -298,9 +303,24 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
dedupe_used)
aggregate_name = ssc_vol_info.get('netapp_aggregate')
aggr_capacity = aggr_capacities.get(aggregate_name, {})
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
'percent-used', 0)
aggr_used = 0
if isinstance(aggregate_name, list):
# For FlexGroup, the aggregate percentage can be seen as the
# average of all aggregates.
aggr_used_total = 0
aggr_num = 0
for aggr in aggregate_name:
aggr_capacity = aggr_capacities.get(aggr, {})
aggr_used_total += aggr_capacity.get('percent-used', 0)
aggr_num += 1
if aggr_num:
aggr_used = aggr_used_total / aggr_num
else:
aggr_capacity = aggr_capacities.get(aggregate_name, {})
aggr_used = aggr_capacity.get('percent-used', 0)
pool['netapp_aggregate_used_percent'] = aggr_used
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
@ -429,18 +449,26 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
def _delete_backing_file_for_volume(self, volume):
"""Deletes file on nfs share that backs a cinder volume."""
is_flexgroup = self._is_flexgroup(host=volume['host'])
try:
LOG.debug('Deleting backing file for volume %s.', volume['id'])
self._delete_file(volume['id'], volume['name'])
except Exception:
LOG.exception('Could not delete volume %s on backend, '
'falling back to exec of "rm" command.',
volume['id'])
try:
if is_flexgroup:
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
except Exception:
else:
self._delete_file(volume['id'], volume['name'])
except Exception:
if is_flexgroup:
LOG.exception('Exec of "rm" command on backing file for '
'%s was unsuccessful.', volume['id'])
else:
LOG.exception('Could not delete volume %s on backend, '
'falling back to exec of "rm" command.',
volume['id'])
try:
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
except Exception:
LOG.exception('Exec of "rm" command on backing file for '
'%s was unsuccessful.', volume['id'])
def _delete_file(self, file_id, file_name):
(host_ip, junction_path) = self._get_export_ip_path(volume_id=file_id)
@ -454,7 +482,10 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self._delete_backing_file_for_snapshot(snapshot)
if self._is_flexgroup(snapshot['volume_id']):
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
else:
self._delete_backing_file_for_snapshot(snapshot)
def _delete_backing_file_for_snapshot(self, snapshot):
"""Deletes file on nfs share that backs a cinder volume."""
@ -738,6 +769,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
:returns: Hard-coded model update for generic volume group model.
"""
model_update = {'status': fields.GroupStatus.AVAILABLE}
if (self._is_flexgroup(host=group['host']) and
volume_utils.is_group_a_cg_snapshot_type(group)):
msg = _("Cannot create %s consistency group on FlexGroup pool.")
raise na_utils.NetAppDriverException(msg % group['id'])
return model_update
def delete_group(self, context, group, volumes):
@ -750,7 +786,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
volumes_model_update = []
for volume in volumes:
try:
self._delete_file(volume['id'], volume['name'])
self.delete_volume(volume)
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
@ -769,6 +805,12 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
if volume_utils.is_group_a_cg_snapshot_type(group):
for vol in add_volumes:
if self._is_flexgroup(host=vol['host']):
msg = _("Cannot add volume from FlexGroup pool to "
"consistency group.")
raise na_utils.NetAppDriverException(msg)
return None, None, None
@ -793,13 +835,20 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""
try:
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
# NOTE(felipe_rodrigues): ONTAP FlexGroup does not support
# consistency group snapshot, so all members must be inside
# a FlexVol pool.
for snapshot in snapshots:
if self._is_flexgroup(host=snapshot['volume']['host']):
msg = _("Cannot create consistency group snapshot with"
" volumes on a FlexGroup pool.")
raise na_utils.NetAppDriverException(msg)
self._create_consistent_group_snapshot(group_snapshot,
snapshots)
else:
for snapshot in snapshots:
self._clone_backing_file_for_volume(
snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], is_snapshot=True)
self.create_snapshot(snapshot)
except Exception as ex:
err_msg = (_("Create group snapshot failed (%s).") % ex)
LOG.exception(err_msg, resource=group_snapshot)
@ -853,7 +902,22 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
volumes_model_update.append(update)
elif source_group and sorted_source_vols:
hosts = [source_vol['host'] for source_vol in sorted_source_vols]
hosts = []
for source_vol in sorted_source_vols:
# NOTE(felipe_rodrigues): ONTAP FlexGroup does not support
# consistency group snapshot, so if any source volume is on a
# FlexGroup, the operation must be create from a not-cg,
# falling back to the generic group support.
if self._is_flexgroup(host=source_vol['host']):
if volume_utils.is_group_a_cg_snapshot_type(group):
msg = _("Cannot create consistency group with volume "
"on a FlexGroup pool.")
raise na_utils.NetAppDriverException(msg)
else:
# falls back to generic support
raise NotImplementedError()
hosts.append(source_vol['host'])
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
@ -885,3 +949,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
model_update = {'status': fields.GroupStatus.ERROR}
return model_update, volumes_model_update
def _is_flexgroup(self, vol_id=None, host=None):
"""Discover if a volume is a FlexGroup or not"""
if host is None:
host = self._get_volume_host(vol_id)
pool_name = volume_utils.extract_host(host, level='pool')
return self.ssc_library.is_flexgroup(pool_name)

View File

@ -97,12 +97,15 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
pool_utilization = {}
for pool_name, pool_info in ssc_pools.items():
aggr_name = pool_info.get('netapp_aggregate', 'unknown')
node_name = aggr_node_map.get(aggr_name)
if node_name:
if isinstance(aggr_name, list):
# NOTE(felipe_rodrigues): for FlexGroup pool, the utilization
# is not calculate.
pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION
else:
node_name = aggr_node_map.get(aggr_name)
pool_utilization[pool_name] = node_utilization.get(
node_name, perf_base.DEFAULT_UTILIZATION)
else:
pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION
self.pool_utilization = pool_utilization
@ -121,7 +124,12 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
aggr_names = set()
for pool_name, pool_info in ssc_pools.items():
aggr_names.add(pool_info.get('netapp_aggregate'))
aggr = pool_info.get('netapp_aggregate')
if isinstance(aggr, list):
# NOTE(felipe_rodrigues): for FlexGroup pool, the utilization
# is not calculate
continue
aggr_names.add(aggr)
return aggr_names
def _get_nodes_for_aggregates(self, aggr_names):

View File

@ -114,7 +114,11 @@ class CapabilitiesLibrary(object):
aggregates = set()
for __, flexvol_info in self.ssc.items():
if 'netapp_aggregate' in flexvol_info:
aggregates.add(flexvol_info['netapp_aggregate'])
aggr = flexvol_info['netapp_aggregate']
if isinstance(aggr, list):
aggregates.update(aggr)
else:
aggregates.add(aggr)
return list(aggregates)
def is_qos_min_supported(self, pool_name):
@ -152,7 +156,9 @@ class CapabilitiesLibrary(object):
# Get aggregate info
aggregate_name = ssc_volume.get('netapp_aggregate')
aggr_info = self._get_ssc_aggregate_info(aggregate_name)
is_flexgroup = isinstance(aggregate_name, list)
aggr_info = self._get_ssc_aggregate_info(
aggregate_name, is_flexgroup=is_flexgroup)
node_name = aggr_info.pop('netapp_node_name')
ssc_volume.update(aggr_info)
@ -176,12 +182,14 @@ class CapabilitiesLibrary(object):
(volume_info.get('space-guarantee') == 'file' or
volume_info.get('space-guarantee') == 'volume'))
thick = self._get_thick_provisioning_support(netapp_thick)
is_flexgroup = volume_info.get('style-extended') == 'flexgroup'
return {
'netapp_thin_provisioned': six.text_type(not netapp_thick).lower(),
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'netapp_aggregate': volume_info.get('aggregate'),
'netapp_is_flexgroup': six.text_type(is_flexgroup).lower(),
}
def _get_thick_provisioning_support(self, netapp_thick):
@ -227,8 +235,19 @@ class CapabilitiesLibrary(object):
def _get_ssc_qos_min_info(self, node_name):
"""Gather Qos minimum info and recast into SSC-style stats."""
supported = self.zapi_client.is_qos_min_supported(
self.protocol == 'nfs', node_name)
supported = True
is_nfs = self.protocol == 'nfs'
if isinstance(node_name, list):
# NOTE(felipe_rodrigues): it cannot choose which node the volume
# is created, so the pool must have all nodes as QoS min supported
# for enabling this feature.
for n_name in node_name:
if not self.zapi_client.is_qos_min_supported(is_nfs, n_name):
supported = False
break
else:
supported = self.zapi_client.is_qos_min_supported(is_nfs,
node_name)
return {'netapp_qos_min_support': six.text_type(supported).lower()}
@ -240,14 +259,37 @@ class CapabilitiesLibrary(object):
return {'netapp_mirrored': six.text_type(mirrored).lower()}
def _get_ssc_aggregate_info(self, aggregate_name):
"""Gather aggregate info and recast into SSC-style volume stats."""
def _get_ssc_aggregate_info(self, aggregate_name, is_flexgroup=False):
"""Gather aggregate info and recast into SSC-style volume stats.
:param aggregate_name: a list of aggregate names for FlexGroup or
a single aggregate name for FlexVol
:param is_flexgroup: bool informing the type of aggregate_name param
"""
if 'netapp_raid_type' in self.invalid_extra_specs:
raid_type = None
hybrid = None
disk_types = None
node_name = None
elif is_flexgroup:
raid_type = set()
hybrid = set()
disk_types = set()
node_name = set()
for aggr in aggregate_name:
aggregate = self.zapi_client.get_aggregate(aggr)
node_name.add(aggregate.get('node-name'))
raid_type.add(aggregate.get('raid-type'))
hybrid.add((six.text_type(
aggregate.get('is-hybrid')).lower()
if 'is-hybrid' in aggregate else None))
disks = set(self.zapi_client.get_aggregate_disk_types(aggr))
disk_types = disk_types.union(disks)
node_name = list(node_name)
raid_type = list(raid_type)
hybrid = list(hybrid)
disk_types = list(disk_types)
else:
aggregate = self.zapi_client.get_aggregate(aggregate_name)
node_name = aggregate.get('node-name')
@ -327,3 +369,12 @@ class CapabilitiesLibrary(object):
modified_extra_specs[key] = False
return modified_extra_specs
def is_flexgroup(self, pool_name):
for __, flexvol_info in self.ssc.items():
if ('netapp_is_flexgroup' in flexvol_info and
'pool_name' in flexvol_info and
flexvol_info['pool_name'] == pool_name):
return flexvol_info['netapp_is_flexgroup'] == 'true'
return False

View File

@ -418,6 +418,11 @@ class DataMotionMixin(object):
src_flexvol_name)
)
if provisioning_options.pop('is_flexgroup', False):
msg = _("Destination volume cannot be created as FlexGroup for "
"replication, it must already exist there.")
raise na_utils.NetAppDriverException(msg)
# If the source is encrypted then the destination needs to be
# encrypted too. Using is_flexvol_encrypted because it includes
# a simple check to ensure that the NVE feature is supported.

View File

@ -0,0 +1,32 @@
---
features:
- |
NetApp ONTAP driver: added support for FlexGroup pool using the NFS
mode. There are several considerations for using the driver with it:
1. The FlexGroup pool has a different view of aggregate capabilites,
changing them by a list of elements, instead of a single element. They
are ``netapp_aggregate``, ``netapp_raid_type``, ``netapp_disk_type`` and
``netapp_hybrid_aggregate``. The ``netapp_aggregate_used_percent``
capability is an average of used percent of all FlexGroup's aggregates.
2. The ``utilization`` capability is not calculated to FlexGroup pools, it is
always set to default of 50.
3. The driver cannot support consistency group with volumes that are over
FlexGroup pools.
4. For volumes over the FlexGroup pool, the operations of clone volume,
create snapshot and create volume from an image are implemented as the NFS
generic driver. Hence, it does not rely on the ONTAP storage to perform
those operations.
5. A driver with FlexGroup pools has snapshot support disabled by default. To
enable, you must set ``nfs_snapshot_support`` to true in the backend's configuration
section of the cinder configuration file.
6. The driver image cache is not applied for volumes over FlexGroup pools.
It can use the core image cache for avoiding downloading twice, though.
7. Given that the FlexGroup pool may be on several cluster nodes, the QoS minimum
support is only enabled if all nodes support it.