NetApp ONTAP: Remove support for 7mode systems

The Unified Driver for NetApp Storage in Cinder
supports two families of ONTAP, 7mode and Clustered
Data ONTAP.

ONTAP 7 is now officially nearing the end-of-life
of product support. The deprecation notice [1]
for these drivers in Cinder was issued in the Newton
Release, so it is time to remove them from tree.

[1] http://lists.openstack.org/pipermail/openstack-operators/2016-November/011957.html

Implements: bp remove-netapp-7mode-drivers

Change-Id: I129ca060a89275ffd56481b8f64367b0d803cff5
This commit is contained in:
Goutham Pacha Ravi 2017-10-04 12:38:06 -04:00
parent 0690f14707
commit 425f45a311
33 changed files with 45 additions and 5127 deletions

View File

@ -303,7 +303,6 @@ def list_opts():
cinder_volume_drivers_netapp_options.netapp_transport_opts,
cinder_volume_drivers_netapp_options.netapp_basicauth_opts,
cinder_volume_drivers_netapp_options.netapp_cluster_opts,
cinder_volume_drivers_netapp_options.netapp_7mode_opts,
cinder_volume_drivers_netapp_options.netapp_provisioning_opts,
cinder_volume_drivers_netapp_options.netapp_img_cache_opts,
cinder_volume_drivers_netapp_options.netapp_eseries_opts,

View File

@ -277,81 +277,6 @@ SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE = etree.XML("""
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_busy_snapshot</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_NOT_PRESENT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>NOT_THE_RIGHT_SNAPSHOT</name>
<busy>false</busy>
<volume>%(vol_name)s</volume>
</snapshot-info>
</snapshots>
</results>
""" % {'vol_name': fake.SNAPSHOT['volume_id']})
NODE_NAME = 'fake_node1'
NODE_NAMES = ('fake_node1', 'fake_node2')
VOLUME_AGGREGATE_NAME = 'fake_aggr1'
@ -1178,22 +1103,6 @@ PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML("""
</results>
""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]})
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML("""
<results status="passed">
<timestamp>1454146292</timestamp>
<instances>
<instance-data>
<name>system</name>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>13215732322</value>
</counter-data>
</counters>
</instance-data>
</instances>
</results>""")
PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>

View File

@ -1,863 +0,0 @@
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import ddt
from lxml import etree
import mock
import paramiko
import six
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd'}
@ddt.ddt
class NetApp7modeClientTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeClientTestCase, self).setUp()
self.fake_volume = six.text_type(uuid.uuid4())
self.mock_object(client_7mode.Client, '_init_ssh_client')
with mock.patch.object(client_7mode.Client,
'get_ontapi_version',
return_value=(1, 20)):
self.client = client_7mode.Client([self.fake_volume],
**CONNECTION_INFO)
self.client.ssh_client = mock.MagicMock()
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.fake_lun = six.text_type(uuid.uuid4())
def test_get_iscsi_target_details_no_targets(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<iscsi-portal-list-entries>
</iscsi-portal-list-entries>
</results>"""))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([], target_list)
def test_get_iscsi_target_details(self):
expected_target = {
"address": "127.0.0.1",
"port": "1337",
"tpgroup-tag": "7777",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<iscsi-portal-list-entries>
<iscsi-portal-list-entry-info>
<ip-address>%(address)s</ip-address>
<ip-port>%(port)s</ip-port>
<tpgroup-tag>%(tpgroup-tag)s</tpgroup-tag>
</iscsi-portal-list-entry-info>
</iscsi-portal-list-entries>
</results>""" % expected_target))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([expected_target], target_list)
def test_get_iscsi_service_details_with_no_iscsi_service(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
</results>"""))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertIsNone(iqn)
def test_get_iscsi_service_details(self):
expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<node-name>%s</node-name>
</results>""" % expected_iqn))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertEqual(expected_iqn, iqn)
def test_get_lun_list(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<luns>
<lun-info></lun-info>
<lun-info></lun-info>
</luns>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_list()
self.assertEqual(2, len(luns))
def test_get_igroup_by_initiators_none_found(self):
initiators = fake.FC_FORMATTED_INITIATORS[0]
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<initiator-groups>
</initiator-groups>
</results>"""))
self.connection.invoke_successfully.return_value = response
igroup = self.client.get_igroup_by_initiators(initiators)
self.assertEqual([], igroup)
def test_get_igroup_by_initiators(self):
initiators = [fake.FC_FORMATTED_INITIATORS[0]]
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
</initiator-groups>
</results>""" % fake.IGROUP1))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple(self):
initiators = fake.FC_FORMATTED_INITIATORS
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
<initiator-group-info>
<initiator-group-name>openstack-igroup2</initiator-group-name>
<initiator-group-type>fcp</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info> </initiator-groups>
</results>""" % fake.IGROUP1))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)
def test_clone_lun(self):
fake_clone_start = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<clone-op-id>1337</clone-op-id>
<volume-uuid>volume-uuid</volume-uuid>
</clone-id-info>
</clone-id>
</results>"""))
fake_clone_status = netapp_api.NaElement(
etree.XML("""<results status="passed">
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>"""))
self.connection.invoke_successfully.side_effect = [fake_clone_start,
fake_clone_status]
self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN')
self.assertEqual(2, self.connection.invoke_successfully.call_count)
def test_clone_lun_api_error(self):
fake_clone_start = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<clone-op-id>1337</clone-op-id>
<volume-uuid>volume-uuid</volume-uuid>
</clone-id-info>
</clone-id>
</results>"""))
fake_clone_status = netapp_api.NaElement(
etree.XML("""<results status="passed">
<status>
<ops-info>
<clone-state>error</clone-state>
</ops-info>
</status>
</results>"""))
self.connection.invoke_successfully.side_effect = [fake_clone_start,
fake_clone_status]
self.assertRaises(netapp_api.NaApiError, self.client.clone_lun,
'path', 'new_path', 'fakeLUN', 'newFakeLUN')
def test_clone_lun_multiple_zapi_calls(self):
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
fake_clone_start = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<clone-op-id>1337</clone-op-id>
<volume-uuid>volume-uuid</volume-uuid>
</clone-id-info>
</clone-id>
</results>"""))
fake_clone_status = netapp_api.NaElement(
etree.XML("""<results status="passed">
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>"""))
self.connection.invoke_successfully.side_effect = [fake_clone_start,
fake_clone_status,
fake_clone_start,
fake_clone_status]
self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN',
block_count=bc)
self.assertEqual(4, self.connection.invoke_successfully.call_count)
def test_clone_lun_wait_for_clone_to_finish(self):
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
fake_clone_start = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<clone-op-id>1337</clone-op-id>
<volume-uuid>volume-uuid</volume-uuid>
</clone-id-info>
</clone-id>
</results>"""))
fake_clone_status = netapp_api.NaElement(
etree.XML("""<results status="passed">
<status>
<ops-info>
<clone-state>running</clone-state>
</ops-info>
</status>
</results>"""))
fake_clone_status_completed = netapp_api.NaElement(
etree.XML("""<results status="passed">
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>"""))
fake_responses = [fake_clone_start,
fake_clone_status,
fake_clone_status_completed,
fake_clone_start,
fake_clone_status_completed]
self.connection.invoke_successfully.side_effect = fake_responses
with mock.patch('time.sleep') as mock_sleep:
self.client.clone_lun('path', 'new_path', 'fakeLUN',
'newFakeLUN', block_count=bc)
mock_sleep.assert_called_once_with(1)
self.assertEqual(5, self.connection.invoke_successfully.call_count)
def test_get_lun_by_args(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<luns>
<lun-info></lun-info>
</luns>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_by_args()
self.assertEqual(1, len(luns))
def test_get_lun_by_args_no_lun_found(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<luns>
</luns>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_by_args()
self.assertEqual(0, len(luns))
def test_get_lun_by_args_with_args_specified(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<luns>
<lun-info></lun-info>
</luns>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args(path=path)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
lun_info_args = actual_request.get_children()
# Assert request is made with correct arguments
self.assertEqual('path', lun_info_args[0].get_name())
self.assertEqual(path, lun_info_args[0].get_content())
self.assertEqual(1, len(lun))
def test_get_filer_volumes(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<volumes>
<volume-info></volume-info>
</volumes>
</results>"""))
self.connection.invoke_successfully.return_value = response
volumes = self.client.get_filer_volumes()
self.assertEqual(1, len(volumes))
def test_get_filer_volumes_no_volumes(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<volumes>
</volumes>
</results>"""))
self.connection.invoke_successfully.return_value = response
volumes = self.client.get_filer_volumes()
self.assertEqual([], volumes)
def test_get_lun_map(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
self.connection.invoke_successfully.return_value = mock.Mock()
self.client.get_lun_map(path=path)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
lun_info_args = actual_request.get_children()
# Assert request is made with correct arguments
self.assertEqual('path', lun_info_args[0].get_name())
self.assertEqual(path, lun_info_args[0].get_content())
def test_set_space_reserve(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
self.connection.invoke_successfully.return_value = mock.Mock()
self.client.set_space_reserve(path, 'true')
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
lun_info_args = actual_request.get_children()
# The children list is not generated in a stable order,
# so figure out which entry is which.
if lun_info_args[0].get_name() == 'path':
path_arg = lun_info_args[0]
enable_arg = lun_info_args[1]
else:
path_arg = lun_info_args[1]
enable_arg = lun_info_args[0]
# Assert request is made with correct arguments
self.assertEqual('path', path_arg.get_name())
self.assertEqual(path, path_arg.get_content())
self.assertEqual('enable', enable_arg.get_name())
self.assertEqual('true', enable_arg.get_content())
def test_get_actual_path_for_export(self):
fake_export_path = 'fake_export_path'
expected_actual_pathname = 'fake_actual_pathname'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<actual-pathname>%(path)s</actual-pathname>
</results>""" % {'path': expected_actual_pathname}))
self.connection.invoke_successfully.return_value = response
actual_pathname = self.client.get_actual_path_for_export(
fake_export_path)
__, __, _kwargs = self.connection.invoke_successfully.mock_calls[0]
enable_tunneling = _kwargs['enable_tunneling']
self.assertEqual(expected_actual_pathname, actual_pathname)
self.assertTrue(enable_tunneling)
def test_clone_file(self):
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf'
fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2'
fake_clone_id_response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<volume-uuid>%(volume)s</volume-uuid>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-id-info>
</clone-id>
</results>""" % {'volume': fake_volume_id,
'clone_id': fake_clone_op_id}))
fake_clone_list_response = netapp_api.NaElement(
etree.XML("""<results>
<clone-list-status>
<clone-id-info>
<volume-uuid>%(volume)s</volume-uuid>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-id-info>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-list-status>
<status>
<ops-info>
<clone-state>completed</clone-state>
</ops-info>
</status>
</results>""" % {'volume': fake_volume_id,
'clone_id': fake_clone_op_id}))
self.connection.invoke_successfully.side_effect = [
fake_clone_id_response, fake_clone_list_response]
self.client.clone_file(expected_src_path,
expected_dest_path,
source_snapshot=fake.CG_SNAPSHOT_ID)
__, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
enable_tunneling = _kwargs['enable_tunneling']
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual(
fake.CG_SNAPSHOT_ID,
actual_request.get_child_by_name('snapshot-name').get_content())
self.assertIsNone(actual_request.get_child_by_name(
'destination-exists'))
self.assertTrue(enable_tunneling)
def test_clone_file_when_clone_fails(self):
"""Ensure clone is cleaned up on failure."""
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf'
fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2'
fake_clone_id_response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<clone-id>
<clone-id-info>
<volume-uuid>%(volume)s</volume-uuid>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-id-info>
</clone-id>
</results>""" % {'volume': fake_volume_id,
'clone_id': fake_clone_op_id}))
fake_clone_list_response = netapp_api.NaElement(
etree.XML("""<results>
<clone-list-status>
<clone-id-info>
<volume-uuid>%(volume)s</volume-uuid>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-id-info>
<clone-op-id>%(clone_id)s</clone-op-id>
</clone-list-status>
<status>
<ops-info>
<clone-state>failed</clone-state>
</ops-info>
</status>
</results>""" % {'volume': fake_volume_id,
'clone_id': fake_clone_op_id}))
fake_clone_clear_response = mock.Mock()
self.connection.invoke_successfully.side_effect = [
fake_clone_id_response, fake_clone_list_response,
fake_clone_clear_response]
self.assertRaises(netapp_api.NaApiError,
self.client.clone_file,
expected_src_path,
expected_dest_path)
__, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
enable_tunneling = _kwargs['enable_tunneling']
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertIsNone(actual_request.get_child_by_name(
'destination-exists'))
self.assertTrue(enable_tunneling)
__, _args, _kwargs = self.connection.invoke_successfully.mock_calls[1]
actual_request = _args[0]
enable_tunneling = _kwargs['enable_tunneling']
actual_clone_id = actual_request.get_child_by_name('clone-id')
actual_clone_id_info = actual_clone_id.get_child_by_name(
'clone-id-info')
actual_clone_op_id = actual_clone_id_info.get_child_by_name(
'clone-op-id').get_content()
actual_volume_uuid = actual_clone_id_info.get_child_by_name(
'volume-uuid').get_content()
self.assertEqual(fake_clone_op_id, actual_clone_op_id)
self.assertEqual(fake_volume_id, actual_volume_uuid)
self.assertTrue(enable_tunneling)
# Ensure that the clone-clear call is made upon error
__, _args, _kwargs = self.connection.invoke_successfully.mock_calls[2]
actual_request = _args[0]
enable_tunneling = _kwargs['enable_tunneling']
actual_clone_id = actual_request \
.get_child_by_name('clone-id').get_content()
self.assertEqual(fake_clone_op_id, actual_clone_id)
self.assertTrue(enable_tunneling)
def test_get_file_usage(self):
expected_bytes = "2048"
fake_path = 'fake_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<unique-bytes>%(unique-bytes)s</unique-bytes>
</results>""" % {'unique-bytes': expected_bytes}))
self.connection.invoke_successfully.return_value = response
actual_bytes = self.client.get_file_usage(fake_path)
self.assertEqual(expected_bytes, actual_bytes)
def test_get_ifconfig(self):
expected_response = mock.Mock()
self.connection.invoke_successfully.return_value = expected_response
actual_response = self.client.get_ifconfig()
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
self.assertEqual('net-ifconfig-get', actual_request.get_name())
self.assertEqual(expected_response, actual_response)
def test_get_fc_target_wwpns(self):
wwpn1 = '50:0a:09:81:90:fe:eb:a5'
wwpn2 = '50:0a:09:82:90:fe:eb:a5'
response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<fcp-port-names>
<fcp-port-name-info>
<port-name>%(wwpn1)s</port-name>
<is-used>true</is-used>
<fcp-adapter>1a</fcp-adapter>
</fcp-port-name-info>
<fcp-port-name-info>
<port-name>%(wwpn2)s</port-name>
<is-used>true</is-used>
<fcp-adapter>1b</fcp-adapter>
</fcp-port-name-info>
</fcp-port-names>
</results>""" % {'wwpn1': wwpn1, 'wwpn2': wwpn2}))
self.connection.invoke_successfully.return_value = response
wwpns = self.client.get_fc_target_wwpns()
self.assertSetEqual(set(wwpns), set([wwpn1, wwpn2]))
def test_get_flexvol_capacity(self):
expected_total_bytes = 1000
expected_available_bytes = 750
fake_flexvol_path = '/fake/vol'
response = netapp_api.NaElement(
etree.XML("""
<results status="passed">
<volumes>
<volume-info>
<size-total>%(total_bytes)s</size-total>
<size-available>%(available_bytes)s</size-available>
</volume-info>
</volumes>
</results>""" % {'total_bytes': expected_total_bytes,
'available_bytes': expected_available_bytes}))
self.connection.invoke_successfully.return_value = response
result = self.client.get_flexvol_capacity(fake_flexvol_path)
expected = {
'size-total': expected_total_bytes,
'size-available': expected_available_bytes,
}
self.assertEqual(expected, result)
def test_get_performance_instance_names(self):
mock_send_request = self.mock_object(self.client, 'send_request')
mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE)
result = self.client.get_performance_instance_names('processor')
expected = ['processor0', 'processor1']
self.assertEqual(expected, result)
perf_object_instance_list_info_args = {'objectname': 'processor'}
mock_send_request.assert_called_once_with(
'perf-object-instance-list-info',
perf_object_instance_list_info_args, enable_tunneling=False)
def test_get_performance_counters(self):
mock_send_request = self.mock_object(self.client, 'send_request')
mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE)
instance_names = ['system']
counter_names = ['avg_processor_busy']
result = self.client.get_performance_counters('system',
instance_names,
counter_names)
expected = [
{
'avg_processor_busy': '13215732322',
'instance-name': 'system',
'timestamp': '1454146292',
}
]
self.assertEqual(expected, result)
perf_object_get_instances_args = {
'objectname': 'system',
'instances': [
{'instance': instance} for instance in instance_names
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
mock_send_request.assert_called_once_with(
'perf-object-get-instances', perf_object_get_instances_args,
enable_tunneling=False)
def test_get_system_name(self):
mock_send_request = self.mock_object(self.client, 'send_request')
mock_send_request.return_value = netapp_api.NaElement(
fake_client.SYSTEM_GET_INFO_RESPONSE)
result = self.client.get_system_name()
self.assertEqual(fake_client.NODE_NAME, result)
def test_check_iscsi_initiator_exists_when_no_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock(
side_effect=netapp_api.NaApiError)
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertFalse(initiator_exists)
def test_check_iscsi_initiator_exists_when_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock()
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertTrue(initiator_exists)
def test_set_iscsi_chap_authentication(self):
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
self.mock_object(self.client.ssh_client, 'execute_command')
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
command = ('iscsi security add -i iqn.2015-06.com.netapp:fake_iqn '
'-s CHAP -p passw0rd -n fake_user')
self.client.ssh_client.execute_command.assert_has_calls(
[mock.call(ssh, command)]
)
def test_get_snapshot_if_snapshot_present_not_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE)
self.connection.invoke_successfully.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertFalse(snapshot['busy'])
def test_get_snapshot_if_snapshot_present_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE)
self.connection.invoke_successfully.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertTrue(snapshot['busy'])
def test_get_snapshot_if_snapshot_not_present(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(fake_client.SNAPSHOT_NOT_PRESENT_7MODE)
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot,
expected_vol_name, expected_snapshot_name)
@ddt.data({
'mock_return':
fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE,
'expected': [{
'name': client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME,
'instance_id': 'abcd-ef01-2345-6789',
'volume_name': fake.SNAPSHOT['volume_id'],
}]
}, {
'mock_return': fake_client.NO_RECORDS_RESPONSE,
'expected': [],
}, {
'mock_return':
fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY,
'expected': [],
})
@ddt.unpack
def test_get_snapshots_marked_for_deletion(self, mock_return, expected):
api_response = netapp_api.NaElement(mock_return)
volume_list = [fake.SNAPSHOT['volume_id']]
self.mock_object(self.client,
'send_request',
return_value=api_response)
result = self.client.get_snapshots_marked_for_deletion(volume_list)
api_args = {
'target-name': fake.SNAPSHOT['volume_id'],
'target-type': 'volume',
'terse': 'true',
}
self.client.send_request.assert_called_once_with(
'snapshot-list-info', api_args)
self.assertListEqual(expected, result)

View File

@ -311,23 +311,6 @@ FAKE_CMODE_POOL_MAP = {
},
}
FAKE_7MODE_VOLUME = {
'all': [
netapp_api.NaElement(
etree.XML("""<volume-info xmlns="http://www.netapp.com/filer/admin">
<name>open123</name>
</volume-info>""")),
netapp_api.NaElement(
etree.XML("""<volume-info xmlns="http://www.netapp.com/filer/admin">
<name>mixed3</name>
</volume-info>""")),
netapp_api.NaElement(
etree.XML("""<volume-info xmlns="http://www.netapp.com/filer/admin">
<name>open1234</name>
</volume-info>"""))
],
}
FILE_LIST = ['file1', 'file2', 'file3']
FAKE_LUN = netapp_api.NaElement.create_node_with_children(
@ -355,35 +338,6 @@ FAKE_LUN = netapp_api.NaElement.create_node_with_children(
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
FAKE_7MODE_VOL1 = [netapp_api.NaElement(
etree.XML("""<volume-info xmlns="http://www.netapp.com/filer/admin">
<name>open123</name>
<state>online</state>
<size-total>0</size-total>
<size-used>0</size-used>
<size-available>0</size-available>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
</volume-info>"""))]
FAKE_7MODE_POOLS = [
{
'pool_name': 'open123',
'consistencygroup_support': True,
'QoS_support': False,
'reserved_percentage': 0,
'total_capacity_gb': 0.0,
'free_capacity_gb': 0.0,
'max_over_subscription_ratio': 20.0,
'multiattach': False,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
}
]
CG_VOLUME_NAME = 'fake_cg_volume'
CG_GROUP_NAME = 'fake_consistency_group'
CG_POOL_NAME = 'cdot'

View File

@ -1,253 +0,0 @@
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \
import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.performance import perf_base
@ddt.ddt
class Performance7modeLibraryTestCase(test.TestCase):
def setUp(self):
super(Performance7modeLibraryTestCase, self).setUp()
with mock.patch.object(perf_7mode.Performance7modeLibrary,
'_init_counter_info'):
self.zapi_client = mock.Mock()
self.zapi_client.get_system_name.return_value = fake.NODE
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
self.perf_library.system_object_name = 'system'
self.perf_library.avg_processor_busy_base_counter_name = (
'cpu_elapsed_time1')
def test_init_counter_info_not_supported(self):
self.zapi_client.features.SYSTEM_METRICS = False
mock_get_base_counter_name = self.mock_object(
self.perf_library, '_get_base_counter_name')
self.perf_library._init_counter_info()
self.assertIsNone(self.perf_library.system_object_name)
self.assertIsNone(
self.perf_library.avg_processor_busy_base_counter_name)
self.assertFalse(mock_get_base_counter_name.called)
def test_init_counter_info_api_error(self):
self.zapi_client.features.SYSTEM_METRICS = True
mock_get_base_counter_name = self.mock_object(
self.perf_library, '_get_base_counter_name',
side_effect=netapp_api.NaApiError)
self.perf_library._init_counter_info()
self.assertEqual('system', self.perf_library.system_object_name)
self.assertEqual(
'cpu_elapsed_time1',
self.perf_library.avg_processor_busy_base_counter_name)
mock_get_base_counter_name.assert_called_once_with(
'system', 'avg_processor_busy')
def test_init_counter_info_system(self):
self.zapi_client.features.SYSTEM_METRICS = True
mock_get_base_counter_name = self.mock_object(
self.perf_library, '_get_base_counter_name',
return_value='cpu_elapsed_time1')
self.perf_library._init_counter_info()
self.assertEqual('system', self.perf_library.system_object_name)
self.assertEqual(
'cpu_elapsed_time1',
self.perf_library.avg_processor_busy_base_counter_name)
mock_get_base_counter_name.assert_called_once_with(
'system', 'avg_processor_busy')
def test_update_performance_cache(self):
self.perf_library.performance_counters = list(range(11, 21))
mock_get_node_utilization_counters = self.mock_object(
self.perf_library, '_get_node_utilization_counters',
return_value=21)
mock_get_node_utilization = self.mock_object(
self.perf_library, '_get_node_utilization',
return_value=25)
self.perf_library.update_performance_cache()
self.assertEqual(list(range(12, 22)),
self.perf_library.performance_counters)
self.assertEqual(25, self.perf_library.utilization)
mock_get_node_utilization_counters.assert_called_once_with()
mock_get_node_utilization.assert_called_once_with(12, 21, fake.NODE)
def test_update_performance_cache_first_pass(self):
mock_get_node_utilization_counters = self.mock_object(
self.perf_library, '_get_node_utilization_counters',
return_value=11)
mock_get_node_utilization = self.mock_object(
self.perf_library, '_get_node_utilization', return_value=25)
self.perf_library.update_performance_cache()
self.assertEqual([11], self.perf_library.performance_counters)
mock_get_node_utilization_counters.assert_called_once_with()
self.assertFalse(mock_get_node_utilization.called)
def test_update_performance_cache_counters_unavailable(self):
self.perf_library.performance_counters = list(range(11, 21))
self.perf_library.utilization = 55.0
mock_get_node_utilization_counters = self.mock_object(
self.perf_library, '_get_node_utilization_counters',
return_value=None)
mock_get_node_utilization = self.mock_object(
self.perf_library, '_get_node_utilization', return_value=25)
self.perf_library.update_performance_cache()
self.assertEqual(list(range(11, 21)),
self.perf_library.performance_counters)
self.assertEqual(55.0, self.perf_library.utilization)
mock_get_node_utilization_counters.assert_called_once_with()
self.assertFalse(mock_get_node_utilization.called)
def test_update_performance_cache_not_supported(self):
self.zapi_client.features.SYSTEM_METRICS = False
mock_get_node_utilization_counters = self.mock_object(
self.perf_library, '_get_node_utilization_counters')
self.perf_library.update_performance_cache()
self.assertEqual([], self.perf_library.performance_counters)
self.assertEqual(perf_base.DEFAULT_UTILIZATION,
self.perf_library.utilization)
self.assertFalse(mock_get_node_utilization_counters.called)
def test_get_node_utilization(self):
self.perf_library.utilization = 47.1
result = self.perf_library.get_node_utilization()
self.assertEqual(47.1, result)
def test_get_node_utilization_counters(self):
mock_get_node_utilization_system_counters = self.mock_object(
self.perf_library, '_get_node_utilization_system_counters',
return_value=['A', 'B', 'C'])
mock_get_node_utilization_wafl_counters = self.mock_object(
self.perf_library, '_get_node_utilization_wafl_counters',
return_value=['D', 'E', 'F'])
mock_get_node_utilization_processor_counters = self.mock_object(
self.perf_library, '_get_node_utilization_processor_counters',
return_value=['G', 'H', 'I'])
result = self.perf_library._get_node_utilization_counters()
expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
self.assertEqual(expected, result)
mock_get_node_utilization_system_counters.assert_called_once_with()
mock_get_node_utilization_wafl_counters.assert_called_once_with()
mock_get_node_utilization_processor_counters.assert_called_once_with()
def test_get_node_utilization_counters_api_error(self):
self.mock_object(self.perf_library,
'_get_node_utilization_system_counters',
side_effect=netapp_api.NaApiError)
result = self.perf_library._get_node_utilization_counters()
self.assertIsNone(result)
def test_get_node_utilization_system_counters(self):
mock_get_performance_instance_names = self.mock_object(
self.zapi_client, 'get_performance_instance_names',
return_value=fake.SYSTEM_INSTANCE_NAMES)
mock_get_performance_counters = self.mock_object(
self.zapi_client, 'get_performance_counters',
return_value=fake.SYSTEM_COUNTERS)
result = self.perf_library._get_node_utilization_system_counters()
self.assertEqual(fake.SYSTEM_COUNTERS, result)
mock_get_performance_instance_names.assert_called_once_with('system')
mock_get_performance_counters.assert_called_once_with(
'system', fake.SYSTEM_INSTANCE_NAMES,
['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time'])
def test_get_node_utilization_wafl_counters(self):
mock_get_performance_instance_names = self.mock_object(
self.zapi_client, 'get_performance_instance_names',
return_value=fake.WAFL_INSTANCE_NAMES)
mock_get_performance_counters = self.mock_object(
self.zapi_client, 'get_performance_counters',
return_value=fake.WAFL_COUNTERS)
mock_get_performance_counter_info = self.mock_object(
self.zapi_client, 'get_performance_counter_info',
return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO)
result = self.perf_library._get_node_utilization_wafl_counters()
self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result)
mock_get_performance_instance_names.assert_called_once_with('wafl')
mock_get_performance_counters.assert_called_once_with(
'wafl', fake.WAFL_INSTANCE_NAMES,
['total_cp_msecs', 'cp_phase_times'])
mock_get_performance_counter_info.assert_called_once_with(
'wafl', 'cp_phase_times')
def test_get_node_utilization_processor_counters(self):
mock_get_performance_instance_names = self.mock_object(
self.zapi_client, 'get_performance_instance_names',
return_value=fake.PROCESSOR_INSTANCE_NAMES)
mock_get_performance_counters = self.mock_object(
self.zapi_client, 'get_performance_counters',
return_value=fake.PROCESSOR_COUNTERS)
self.mock_object(
self.zapi_client, 'get_performance_counter_info',
return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO)
result = self.perf_library._get_node_utilization_processor_counters()
self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result)
mock_get_performance_instance_names.assert_called_once_with(
'processor')
mock_get_performance_counters.assert_called_once_with(
'processor', fake.PROCESSOR_INSTANCE_NAMES,
['domain_busy', 'processor_elapsed_time'])

View File

@ -1,966 +0,0 @@
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage 7-mode library
"""
import ddt
from lxml import etree
import mock
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.objects import fields
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \
as client_fakes
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
@ddt.ddt
class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI library."""
def setUp(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
kwargs = {
'configuration': self.get_config_7mode(),
'host': 'openstack@7modeblock',
}
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.library.perf_library = mock.Mock()
self.library.vfiler = mock.Mock()
# Deprecated option
self.library.configuration.netapp_volume_list = None
def get_config_7mode(self):
config = na_fakes.create_configuration_7mode()
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@mock.patch.object(perf_7mode, 'Performance7modeLibrary', mock.Mock())
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_root_volume_name')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_do_partner_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
self.mock_object(client_base.Client, '_init_ssh_client')
mock_get_root_volume_name.return_value = 'vol0'
context = mock.Mock()
self.library.do_setup(context)
super_do_setup.assert_called_once_with(context)
mock_do_partner_setup.assert_called_once_with()
mock_get_root_volume_name.assert_called_once_with()
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup(self):
self.mock_object(client_base.Client, '_init_ssh_client')
self.library.configuration.netapp_partner_backend_name = 'partner'
self.library._do_partner_setup()
self.assertIsNotNone(self.library.partner_zapi_client)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup_no_partner(self):
self.mock_object(client_base.Client, '_init_ssh_client')
self.library._do_partner_setup()
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
@mock.patch.object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = ['open1', 'open2']
mock_add_looping_tasks = self.mock_object(
self.library, '_add_looping_tasks')
self.library.check_for_setup_error()
mock_add_looping_tasks.assert_called_once_with()
super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = []
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
@ddt.data(None, (1, 8))
def test_check_for_setup_error_unsupported_or_no_version(self, version):
self.zapi_client.get_ontapi_version.return_value = version
self.assertRaises(exception.VolumeBackendAPIException,
self.library.check_for_setup_error)
def test_handle_ems_logging(self):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
dot_utils, 'build_ems_log_message_0',
return_value='fake_base_ems_log_message')
self.mock_object(
dot_utils, 'build_ems_log_message_1',
return_value='fake_pool_ems_log_message')
mock_send_ems_log_message = self.mock_object(
self.zapi_client, 'send_ems_log_message')
self.library._handle_ems_logging()
mock_send_ems_log_message.assert_has_calls([
mock.call('fake_base_ems_log_message'),
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.library.driver_name, self.library.app_version,
self.library.driver_mode)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.library.driver_name, self.library.app_version, None,
self.library.volume_list, [])
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.library._get_volume_model_update(fake.VOLUME_REF))
@ddt.data(None, fake.VFILER)
def test__get_owner(self, vfiler):
self.library.configuration.netapp_server_hostname = 'openstack'
self.library.vfiler = vfiler
expected_owner = 'openstack'
retval = self.library._get_owner()
if vfiler:
expected_owner += ':' + vfiler
self.assertEqual(expected_owner, retval)
def test_find_mapped_lun_igroup(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
<initiator-alias-info>
<initiator-alias>Centos</initiator-alias>
</initiator-alias-info>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>""" % fake.IGROUP1))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('2', lun_id)
def test_find_mapped_lun_igroup_initiator_mismatch(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-igroup1</initiator-group-name>
<initiator-group-type>fcp</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_no_igroups(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups />
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)
def test_has_luns_mapped_to_initiators_local_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.library.partner_zapi_client = mock.Mock()
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_has_luns_mapped_to_initiators_partner_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_maps(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = False
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_partner(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(
initiator_list, include_partner=False)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
@ddt.data(True, False)
def test_clone_lun_zero_block_count(self, is_snapshot):
"""Test for when clone lun is not passed a block count."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
is_snapshot=is_snapshot)
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0,
source_snapshot=None, src_block=0)
def test_clone_lun_blocks(self):
"""Test for when clone lun is passed block information."""
block_count = 10
src_block = 10
dest_block = 30
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
block_count=block_count, src_block=src_block,
dest_block=dest_block)
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=block_count,
dest_block=dest_block, src_block=src_block,
source_snapshot=None)
def test_clone_lun_no_space_reservation(self):
"""Test for when space_reservation is not passed."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.lun_space_reservation = 'false'
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0,
source_snapshot=None)
def test_clone_lun_qos_supplied(self):
"""Test for qos supplied in clone lun invocation."""
self.assertRaises(exception.VolumeDriverException,
self.library._clone_lun,
'fakeLUN',
'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_get_fc_target_wwpns(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns()
self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result))
def test_get_fc_target_wwpns_no_partner(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns(include_partner=False)
self.assertSetEqual(set(ports1), set(result))
def test_create_lun(self):
self.library.vol_refresh_voluntary = False
self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_create_lun_with_qos_policy_group(self):
self.assertRaises(exception.VolumeDriverException,
self.library._create_lun, fake.VOLUME_ID,
fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_check_volume_type_for_lun_legacy_qos_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS)
self.assertEqual(0, mock_get_volume_type.call_count)
def test_check_volume_type_for_lun_no_volume_type(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = None
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None)
self.assertEqual(0, mock_get_backend_spec.call_count)
def test_check_volume_type_for_lun_qos_spec_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
mock_get_backend_spec.return_value = na_fakes.QOS_SPEC
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS)
def test_get_preferred_target_from_list(self):
result = self.library._get_preferred_target_from_list(
fake.ISCSI_TARGET_DETAILS_LIST)
self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result)
def test_mark_qos_policy_group_for_deletion(self):
result = self.library._mark_qos_policy_group_for_deletion(
fake.QOS_POLICY_GROUP_INFO)
self.assertIsNone(result)
def test_setup_qos_for_volume(self):
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertIsNone(result)
def test_manage_existing_lun_same_name(self):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
def test_get_pool_stats_no_volumes(self):
self.library.vols = []
result = self.library._get_pool_stats()
self.assertListEqual([], result)
@ddt.data({'netapp_lun_space_reservation': 'enabled'},
{'netapp_lun_space_reservation': 'disabled'})
@ddt.unpack
def test_get_pool_stats(self, netapp_lun_space_reservation):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
self.library.root_volume_name = 'vol0'
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10.0
self.library.configuration.netapp_lun_space_reservation = (
netapp_lun_space_reservation)
self.library.vols = netapp_api.NaElement(
client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name(
'volumes').get_children()
self.library.perf_library.get_node_utilization = (
mock.Mock(return_value=30.0))
thick = netapp_lun_space_reservation == 'enabled'
result = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{
'pool_name': 'vol1',
'consistencygroup_support': True,
'QoS_support': False,
'thin_provisioning_support': not thick,
'thick_provisioning_support': thick,
'free_capacity_gb': 1339.27,
'total_capacity_gb': 1342.21,
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0,
'multiattach': False,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
}]
self.assertEqual(expected, result)
def test_get_filtered_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid."""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_filtered_pools)
@ddt.data('.*?3$|mix.+', '(.+?[0-9]+) ', '^.+3$', '^[a-z].*?[^4]$')
def test_get_filtered_pools_match_select_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed3, open1234', '.+')
def test_get_filtered_pools_match_all_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][2].get_child_content('name'),
filtered_pools[2]
)
@ddt.data('abc|stackopen|openstack|abc.*', 'abc',
'stackopen, openstack, open', '^$')
def test_get_filtered_pools_non_matching_patterns(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertListEqual([], filtered_pools)
def test_get_pool_stats_no_ssc_vols(self):
self.library.vols = {}
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
def test_get_pool_stats_with_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = [
fake.FAKE_7MODE_VOL1[0].get_child_content('name')
]
self.library.root_volume_name = ''
self.library.perf_library.get_node_utilization = (
mock.Mock(return_value=30.0))
pools = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
self.assertListEqual(fake.FAKE_7MODE_POOLS, pools)
def test_get_pool_stats_no_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = ['open1', 'open2']
self.library.root_volume_name = ''
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
@ddt.data((None, False, False),
(30, True, False),
(30, False, True))
@ddt.unpack
def test__refresh_volume_info_already_running(self,
vol_refresh_time,
vol_refresh_voluntary,
is_newer):
mock_warning_log = self.mock_object(block_7mode.LOG, 'warning')
self.library.vol_refresh_time = vol_refresh_time
self.library.vol_refresh_voluntary = vol_refresh_voluntary
self.library.vol_refresh_interval = 30
self.mock_object(timeutils, 'is_newer_than', return_value=is_newer)
self.mock_object(na_utils, 'set_safe_attr', return_value=False)
retval = self.library._refresh_volume_info()
self.assertIsNone(retval)
# Assert no values are unset by the method
self.assertEqual(vol_refresh_voluntary,
self.library.vol_refresh_voluntary)
self.assertEqual(vol_refresh_time, self.library.vol_refresh_time)
if timeutils.is_newer_than.called:
timeutils.is_newer_than.assert_called_once_with(
vol_refresh_time, self.library.vol_refresh_interval)
na_utils.set_safe_attr.assert_has_calls([
mock.call(self.library, 'vol_refresh_running', True),
mock.call(self.library, 'vol_refresh_running', False)])
self.assertEqual(1, mock_warning_log.call_count)
def test__refresh_volume_info(self):
mock_warning_log = self.mock_object(block_7mode.LOG, 'warning')
self.library.vol_refresh_time = None
self.library.vol_refresh_voluntary = True
self.mock_object(timeutils, 'is_newer_than')
self.mock_object(self.library.zapi_client, 'get_filer_volumes')
self.mock_object(self.library, '_get_filtered_pools',
return_value=['vol1', 'vol2'])
self.mock_object(na_utils, 'set_safe_attr', return_value=True)
retval = self.library._refresh_volume_info()
self.assertIsNone(retval)
self.assertEqual(False, self.library.vol_refresh_voluntary)
self.assertEqual(['vol1', 'vol2'], self.library.volume_list)
self.assertIsNotNone(self.library.vol_refresh_time)
na_utils.set_safe_attr.assert_has_calls([
mock.call(self.library, 'vol_refresh_running', True),
mock.call(self.library, 'vol_refresh_running', False)])
self.assertFalse(mock_warning_log.called)
def test__refresh_volume_info_exception(self):
mock_warning_log = self.mock_object(block_7mode.LOG, 'warning')
self.library.vol_refresh_time = None
self.library.vol_refresh_voluntary = True
self.mock_object(timeutils, 'is_newer_than')
self.mock_object(na_utils, 'set_safe_attr', return_value=True)
self.mock_object(self.library.zapi_client,
'get_filer_volumes',
side_effect=exception.NetAppDriverException)
self.mock_object(self.library, '_get_filtered_pools')
retval = self.library._refresh_volume_info()
self.assertIsNone(retval)
self.assertFalse(self.library._get_filtered_pools.called)
self.assertEqual(1, mock_warning_log.call_count)
def test_delete_volume(self):
self.library.vol_refresh_voluntary = False
mock_super_delete_volume = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_volume')
self.library.delete_volume(fake.VOLUME)
mock_super_delete_volume.assert_called_once_with(fake.VOLUME)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_delete_snapshot(self):
self.library.vol_refresh_voluntary = False
mock_super_delete_snapshot = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_snapshot')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_add_looping_tasks')
self.library._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
result = self.library._get_backing_flexvol_names()
self.assertEqual('vol2', result[2])
def test_create_cgsnapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_extract_host = self.mock_object(
volume_utils, 'extract_host', return_value=fake.POOL_NAME)
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
mock_extract_host.assert_called_once_with(fake.CG_VOLUME['host'],
level='pool')
self.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_lun.assert_called_once_with(
fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME,
source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
mock_delete_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_create_cgsnapshot_busy_snapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_extract_host = self.mock_object(
volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name'])
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
mock_mark_snapshot_for_deletion = self.mock_object(
self.zapi_client, 'mark_snapshot_for_deletion')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
mock_extract_host.assert_called_once_with(
fake.CG_VOLUME['host'], level='pool')
self.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_lun.assert_called_once_with(
fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME,
source_snapshot=fake.CG_SNAPSHOT_ID)
mock_delete_snapshot.assert_not_called()
mock_mark_snapshot_for_deletion.assert_called_once_with(
fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_cgsnapshot(self):
mock_delete_snapshot = self.mock_object(
self.library, '_delete_lun')
self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT])
mock_delete_snapshot.assert_called_once_with(fake.CG_SNAPSHOT['name'])
def test_delete_cgsnapshot_not_found(self):
self.mock_object(block_base, 'LOG')
self.mock_object(self.library, '_get_lun_attr', return_value=None)
self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT])
self.assertEqual(0, block_base.LOG.error.call_count)
self.assertEqual(1, block_base.LOG.warning.call_count)
self.assertEqual(0, block_base.LOG.info.call_count)
def test_create_volume_with_cg(self):
volume_size_in_bytes = int(fake.CG_VOLUME_SIZE) * units.Gi
self._create_volume_test_helper()
self.library.create_volume(fake.CG_VOLUME)
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.CG_VOLUME_NAME, volume_size_in_bytes,
fake.CG_LUN_METADATA, None)
self.library._get_volume_model_update.assert_called_once_with(
fake.CG_VOLUME)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
def _create_volume_test_helper(self):
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.mock_object(block_base, 'LOG')
self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
self.mock_object(self.library, '_setup_qos_for_volume',
return_value=None)
self.mock_object(self.library, '_create_lun')
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.mock_object(self.library, '_get_volume_model_update')
def test_create_consistency_group(self):
model_update = self.library.create_consistencygroup(
fake.CONSISTENCY_GROUP)
self.assertEqual('available', model_update['status'])
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(block_7mode, 'LOG')
self.mock_object(self.library, '_delete_lun', side_effect=Exception)
model_update, volumes = self.library.delete_consistencygroup(
fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
self.assertEqual(1, block_7mode.LOG.exception.call_count)
def test_delete_consistencygroup_not_found(self):
self.mock_object(block_7mode, 'LOG')
self.mock_object(self.library, '_get_lun_attr', return_value=None)
model_update, volumes = self.library.delete_consistencygroup(
fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual(0, block_7mode.LOG.error.call_count)
self.assertEqual(0, block_7mode.LOG.info.call_count)
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_cg_snapshot(self,
volume_model_update):
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME], cgsnapshot=fake.CG_SNAPSHOT,
snapshots=[fake.CG_VOLUME_SNAPSHOT])
clone_source_to_destination_args = {
'name': fake.CG_SNAPSHOT['name'],
'size': fake.CG_SNAPSHOT['volume_size'],
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
self.assertEqual(expected_return_value, actual_return_value)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_cg(self, volume_model_update):
lun_name = fake.SOURCE_CG_VOLUME['name']
mock_lun = block_base.NetAppLun(
lun_name, lun_name, '3', {'UUID': 'fake_uuid'})
self.mock_object(self.library, '_get_lun_from_table',
return_value=mock_lun)
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.SOURCE_CONSISTENCY_GROUP,
source_vols=[fake.SOURCE_CG_VOLUME])
clone_source_to_destination_args = {
'name': fake.SOURCE_CG_VOLUME['name'],
'size': fake.SOURCE_CG_VOLUME['size'],
}
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
self.assertEqual(expected_return_value, actual_return_value)

View File

@ -183,8 +183,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.library.driver_name, self.library.app_version,
self.library.driver_mode)
self.library.driver_name, self.library.app_version)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.library.driver_name, self.library.app_version,
self.library.vserver, volume_list, [])

View File

@ -17,11 +17,8 @@ Mock unit tests for the NetApp block storage driver interfaces
from cinder import test
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap import fc_7mode
from cinder.volume.drivers.netapp.dataontap import fc_cmode
from cinder.volume.drivers.netapp.dataontap import iscsi_7mode
from cinder.volume.drivers.netapp.dataontap import iscsi_cmode
@ -33,20 +30,14 @@ class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase):
self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary,
'__init__',
return_value=None)
self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary,
'__init__',
return_value=None)
self.iscsi_7mode_driver = iscsi_7mode.NetApp7modeISCSIDriver()
self.iscsi_cmode_driver = iscsi_cmode.NetAppCmodeISCSIDriver()
self.fc_7mode_driver = fc_7mode.NetApp7modeFibreChannelDriver()
self.fc_cmode_driver = fc_cmode.NetAppCmodeFibreChannelDriver()
def test_driver_interfaces_match(self):
"""Ensure the NetApp block storage driver interfaces match.
The four block storage Cinder drivers from NetApp (iSCSI/FC,
7-mode/C-mode) are merely passthrough shim layers atop a common
The two block storage Cinder drivers from NetApp (iSCSI/FC)
are merely passthrough shim layers atop a common
block storage library. Bugs have been introduced when a Cinder
method was exposed via a subset of those driver shims. This test
ensures they remain in sync and the library features are uniformly

View File

@ -1,425 +0,0 @@
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {
'configuration': self.get_config_7mode(),
'host': 'openstack@7modenfs',
}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
self.driver.perf_library = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'share': None, 'is_snapshot': False},
{'share': None, 'is_snapshot': True},
{'share': 'fake_share', 'is_snapshot': False},
{'share': 'fake_share', 'is_snapshot': True})
@ddt.unpack
def test_clone_backing_file_for_volume(self, share, is_snapshot):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path',
return_value=(fake.SHARE_IP, fake.EXPORT_PATH))
mock_get_actual_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export',
return_value='fake_path')
self.driver._clone_backing_file_for_volume(
fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share,
is_snapshot=is_snapshot)
mock_get_export_ip_path.assert_called_once_with(
fake.VOLUME_ID, share)
mock_get_actual_path_for_export.assert_called_once_with(
fake.EXPORT_PATH)
self.driver.zapi_client.clone_file.assert_called_once_with(
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone',
None)
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES // units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES // units.Gi, '0.01')
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
return_value=capacity)
self.mock_object(self.driver.perf_library,
'get_node_utilization',
return_value=30.0)
result = self.driver._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'consistencygroup_support': True,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'multiattach': False,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.driver._get_volume_model_update(fake.VOLUME_REF))
def test_delete_cgsnapshot(self):
mock_delete_file = self.mock_object(self.driver, '_delete_file')
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT]))
mock_delete_file.assert_called_once_with(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name'])
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
def test_get_snapshot_backing_flexvol_names(self):
snapshots = [
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
{'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
]
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_flexvol_names_from_hosts(hosts)
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)
def test_check_for_setup_error(self):
mock_get_ontapi_version = self.mock_object(
self.driver.zapi_client, 'get_ontapi_version')
mock_get_ontapi_version.return_value = ['1', '10']
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
mock_super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
self.driver.check_for_setup_error()
mock_get_ontapi_version.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
mock_super_check_for_setup_error.assert_called_once_with()
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_add_looping_tasks')
self.driver._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_handle_ems_logging(self):
volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
self.driver, '_get_backing_flexvol_names',
return_value=volume_list)
self.mock_object(
dot_utils, 'build_ems_log_message_0',
return_value='fake_base_ems_log_message')
self.mock_object(
dot_utils, 'build_ems_log_message_1',
return_value='fake_pool_ems_log_message')
mock_send_ems_log_message = self.mock_object(
self.driver.zapi_client, 'send_ems_log_message')
self.driver._handle_ems_logging()
mock_send_ems_log_message.assert_has_calls([
mock.call('fake_base_ems_log_message'),
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.driver.driver_name, self.driver.app_version,
self.driver.driver_mode)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.driver.driver_name, self.driver.app_version, None,
volume_list, [])
def test_get_backing_flexvol_names(self):
result = self.driver._get_backing_flexvol_names()
self.assertEqual('path', result[0])
def test_create_consistency_group(self):
model_update = self.driver.create_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup(self):
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(fake.CG_CONTEXT, "foo"))
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src(self, volume_model_update):
volume_model_update = volume_model_update or {}
volume_model_update.update(
{'provider_location': fake.PROVIDER_LOCATION})
mock_create_volume_from_snapshot = self.mock_object(
self.driver, 'create_volume_from_snapshot',
return_value=volume_model_update)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT]))
expected_volumes_model_updates = [{'id': fake.VOLUME['id']}]
expected_volumes_model_updates[0].update(volume_model_update)
mock_create_volume_from_snapshot.assert_called_once_with(
fake.VOLUME, fake.SNAPSHOT)
self.assertIsNone(model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_source_vols(
self, volume_model_update):
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id']
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.mock_object(self.driver, '_get_volume_model_update',
return_value=volume_model_update)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.CONSISTENCY_GROUP,
source_vols=[fake.NFS_VOLUME]))
expected_volumes_model_updates = [{
'id': fake.NFS_VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
if volume_model_update:
expected_volumes_model_updates[0].update(volume_model_update)
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.NFS_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake_snapshot_name)
mock_clone_backing_file.assert_called_once_with(
fake.NFS_VOLUME['name'], fake.VOLUME['name'],
fake.NFS_VOLUME['id'], source_snapshot=fake_snapshot_name)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.assertIsNone(model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
def test_create_consistencygroup_from_src_invalid_parms(self):
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME]))
self.assertIn('error', model_update['status'])
def test_create_cgsnapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_create_cgsnapshot_busy_snapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name'])
mock_mark_snapshot_for_deletion = self.mock_object(
self.driver.zapi_client, 'mark_snapshot_for_deletion')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_not_called()
mock_mark_snapshot_for_deletion.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(self.driver, '_delete_file', side_effect=Exception)
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
def test_delete_consistencygroup(self):
mock_delete_file = self.mock_object(
self.driver, '_delete_file')
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
mock_delete_file.assert_called_once_with(
fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)

View File

@ -455,8 +455,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.driver.driver_name, self.driver.app_version,
self.driver.driver_mode)
self.driver.driver_name, self.driver.app_version)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.driver.driver_name, self.driver.app_version,
self.driver.vserver, volume_list, [])

View File

@ -108,17 +108,14 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
@ddt.ddt
class NetAppDataOntapUtilsTestCase(test.TestCase):
@ddt.data('cluster', '7mode')
def test_build_ems_log_message_0(self, driver_mode):
def test_build_ems_log_message_0(self):
self.mock_object(
socket, 'gethostname', return_value='fake_hostname')
result = utils.build_ems_log_message_0(
'fake_driver_name', 'fake_app_version', driver_mode)
'fake_driver_name', 'fake_app_version')
dest = ('cluster node' if driver_mode == 'cluster'
else '7 mode controller')
expected = {
'computer-name': 'fake_hostname',
'event-source': 'Cinder driver fake_driver_name',
@ -127,7 +124,7 @@ class NetAppDataOntapUtilsTestCase(test.TestCase):
'log-level': '5',
'auto-support': 'false',
'event-id': '0',
'event-description': 'OpenStack Cinder connected to %s' % dest,
'event-description': 'OpenStack Cinder connected to cluster node',
}
self.assertEqual(expected, result)

View File

@ -127,12 +127,6 @@ def create_configuration():
return config
def create_configuration_7mode():
config = create_configuration()
config.append_config_values(na_opts.netapp_7mode_opts)
return config
def create_configuration_cmode():
config = create_configuration()
config.append_config_values(na_opts.netapp_cluster_opts)

View File

@ -132,4 +132,4 @@ class NetAppDriverFactoryTestCase(test.TestCase):
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver.create_driver,
'ontap_7mode', 'carrier_pigeon', **kwargs)
'ontap_cluster', 'carrier_pigeon', **kwargs)

View File

@ -42,12 +42,6 @@ NETAPP_UNIFIED_DRIVER_REGISTRY = {
'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver',
'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver'
},
'ontap_7mode':
{
'iscsi': DATAONTAP_PATH + '.iscsi_7mode.NetApp7modeISCSIDriver',
'nfs': DATAONTAP_PATH + '.nfs_7mode.NetApp7modeNfsDriver',
'fc': DATAONTAP_PATH + '.fc_7mode.NetApp7modeFibreChannelDriver'
},
'eseries':
{
'iscsi': ESERIES_PATH + '.iscsi_driver.NetAppEseriesISCSIDriver',

View File

@ -1,608 +0,0 @@
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp 7-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary):
"""NetApp block storage library for Data ONTAP (7-mode)."""
def __init__(self, driver_name, driver_protocol, **kwargs):
super(NetAppBlockStorage7modeLibrary, self).__init__(driver_name,
driver_protocol,
**kwargs)
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
self.driver_mode = '7mode'
def do_setup(self, context):
super(NetAppBlockStorage7modeLibrary, self).do_setup(context)
self.volume_list = []
self.vfiler = self.configuration.netapp_vfiler
self.zapi_client = client_7mode.Client(
self.volume_list,
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.vfiler)
self._do_partner_setup()
self.vol_refresh_time = None
self.vol_refresh_interval = 1800
self.vol_refresh_running = False
self.vol_refresh_voluntary = False
self.root_volume_name = self._get_root_volume_name()
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
# This driver has been marked 'deprecated' in the Ocata release and
# can be removed in Queens.
msg = _("The 7-mode Data ONTAP driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
def _do_partner_setup(self):
partner_backend = self.configuration.netapp_partner_backend_name
if partner_backend:
config = configuration.Configuration(na_opts.netapp_7mode_opts,
partner_backend)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_transport_opts)
self.partner_zapi_client = client_7mode.Client(
None,
transport_type=config.netapp_transport_type,
username=config.netapp_login,
password=config.netapp_password,
hostname=config.netapp_server_hostname,
port=config.netapp_server_port,
vfiler=None)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._refresh_volume_info()
if not self.volume_list:
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise exception.NetAppDriverException(msg)
self._add_looping_tasks()
super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetAppBlockStorage7modeLibrary, self)._add_looping_tasks()
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, None, self.volume_list, [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
if qos_policy_group_name is not None:
msg = _('Data ONTAP operating in 7-Mode does not support QoS '
'policy groups.')
raise exception.VolumeDriverException(msg)
self.zapi_client.create_lun(
volume_name, lun_name, size, metadata, qos_policy_group_name)
self.vol_refresh_voluntary = True
def _get_root_volume_name(self):
# switch to volume-get-root-name API when possible
vols = self.zapi_client.get_filer_volumes()
for vol in vols:
volume_name = vol.get_child_content('name')
if self._get_vol_option(volume_name, 'root') == 'true':
return volume_name
LOG.warning('Could not determine root volume name on %s.',
self._get_owner())
return None
def _get_owner(self):
if self.vfiler:
owner = '%s:%s' % (self.configuration.netapp_server_hostname,
self.vfiler)
else:
owner = self.configuration.netapp_server_hostname
return owner
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
owner = self._get_owner()
return '%s:%s' % (owner, metadata['Path'])
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
initiator_set = set(initiator_list)
result = self.zapi_client.get_lun_map(path)
initiator_groups = result.get_child_by_name('initiator-groups')
if initiator_groups:
for initiator_group_info in initiator_groups.get_children():
initiator_set_for_igroup = set()
for initiator_info in initiator_group_info.get_child_by_name(
'initiators').get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = initiator_group_info.get_child_content(
'initiator-group-name')
lun_id = initiator_group_info.get_child_content(
'lun-id')
return igroup, lun_id
return None, None
def _has_luns_mapped_to_initiators(self, initiator_list,
include_partner=True):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
if self.zapi_client.has_luns_mapped_to_initiators(initiator_list):
return True
if include_partner and self.partner_zapi_client and \
self.partner_zapi_client.has_luns_mapped_to_initiators(
initiator_list):
return True
return False
def _clone_lun(self, name, new_name, space_reserved=None,
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0, source_snapshot=None, is_snapshot=False):
"""Clone LUN with the given handle to the new name.
:param: is_snapshot Not used, present for method signature consistency
"""
if not space_reserved:
space_reserved = self.lun_space_reservation
if qos_policy_group_name is not None:
msg = _('Data ONTAP operating in 7-Mode does not support QoS '
'policy groups.')
raise exception.VolumeDriverException(msg)
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
(parent, _splitter, name) = path.rpartition('/')
clone_path = '%s/%s' % (parent, new_name)
self.zapi_client.clone_lun(path, clone_path, name, new_name,
space_reserved, src_block=src_block,
dest_block=dest_block,
block_count=block_count,
source_snapshot=source_snapshot)
self.vol_refresh_voluntary = True
luns = self.zapi_client.get_lun_by_args(path=clone_path)
cloned_lun = luns[0]
self.zapi_client.set_space_reserve(clone_path, space_reserved)
clone_meta = self._create_lun_meta(cloned_lun)
handle = self._create_lun_handle(clone_meta)
self._add_lun_to_table(
block_base.NetAppLun(handle, new_name,
cloned_lun.get_child_content('size'),
clone_meta))
def _create_lun_meta(self, lun):
"""Creates LUN metadata dictionary."""
self.zapi_client.check_is_naelement(lun)
meta_dict = {}
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['Volume'] = lun.get_child_content('path').split('/')[2]
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = lun.get_child_content(
'is-space-reservation-enabled')
meta_dict['UUID'] = lun.get_child_content('uuid')
return meta_dict
def _get_fc_target_wwpns(self, include_partner=True):
wwpns = self.zapi_client.get_fc_target_wwpns()
if include_partner and self.partner_zapi_client:
wwpns.extend(self.partner_zapi_client.get_fc_target_wwpns())
return wwpns
def _update_volume_stats(self, filter_function=None,
goodness_function=None):
"""Retrieve stats info from filer."""
# ensure we get current data
self.vol_refresh_voluntary = True
self._refresh_volume_info()
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.driver_protocol
data['pools'] = self._get_pool_stats(
filter_function=filter_function,
goodness_function=goodness_function)
data['sparse_copy_volume'] = True
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. Data ONTAP volume) stats info from volumes."""
pools = []
self.perf_library.update_performance_cache()
for vol in self.vols:
volume_name = vol.get_child_content('name')
# omit volumes not specified in the config
if self.volume_list and volume_name not in self.volume_list:
continue
# omit root volume
if volume_name == self.root_volume_name:
continue
# ensure good volume state
state = vol.get_child_content('state')
inconsistent = vol.get_child_content('is-inconsistent')
invalid = vol.get_child_content('is-invalid')
if (state != 'online' or
inconsistent != 'false' or
invalid != 'false'):
continue
pool = dict()
pool['pool_name'] = volume_name
pool['QoS_support'] = False
pool['multiattach'] = False
pool['reserved_percentage'] = (
self.reserved_percentage)
pool['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
# convert sizes to GB
total = float(vol.get_child_content('size-total') or 0)
total /= units.Gi
pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
free = float(vol.get_child_content('size-available') or 0)
free /= units.Gi
pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
thick = (
self.configuration.netapp_lun_space_reservation == 'enabled')
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization()
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _get_filtered_pools(self):
"""Return available pools filtered by a pool name search pattern."""
# Inform deprecation of legacy option.
if self.configuration.safe_get('netapp_volume_list'):
msg = ("The option 'netapp_volume_list' is deprecated and "
"will be removed in the future releases. Please use "
"the option 'netapp_pool_name_search_pattern' instead.")
versionutils.report_deprecated_feature(LOG, msg)
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
filtered_pools = []
for vol in self.vols:
vol_name = vol.get_child_content('name')
if pool_regex.match(vol_name):
msg = ("Volume '%(vol_name)s' matches against regular "
"expression: %(vol_pattern)s")
LOG.debug(msg, {'vol_name': vol_name,
'vol_pattern': pool_regex.pattern})
filtered_pools.append(vol_name)
else:
msg = ("Volume '%(vol_name)s' does not match against regular "
"expression: %(vol_pattern)s")
LOG.debug(msg, {'vol_name': vol_name,
'vol_pattern': pool_regex.pattern})
return filtered_pools
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
bs = super(NetAppBlockStorage7modeLibrary,
self)._get_lun_block_count(path)
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major = api_version[0]
minor = api_version[1]
if major == 1 and minor < 15:
bs -= 1
return bs
def _refresh_volume_info(self):
"""Saves the volume information for the filer."""
if (self.vol_refresh_time is None or self.vol_refresh_voluntary or
timeutils.is_newer_than(self.vol_refresh_time,
self.vol_refresh_interval)):
try:
job_set = na_utils.set_safe_attr(self, 'vol_refresh_running',
True)
if not job_set:
LOG.warning("Volume refresh job already running. "
"Returning...")
return
self.vol_refresh_voluntary = False
self.vols = self.zapi_client.get_filer_volumes()
self.volume_list = self._get_filtered_pools()
self.vol_refresh_time = timeutils.utcnow()
except Exception as e:
LOG.warning("Error refreshing volume info. Message: %s",
e)
finally:
na_utils.set_safe_attr(self, 'vol_refresh_running', False)
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppBlockStorage7modeLibrary, self).delete_volume(volume)
self.vol_refresh_voluntary = True
LOG.debug('Deleted LUN with name %s', volume['name'])
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
super(NetAppBlockStorage7modeLibrary, self).delete_snapshot(snapshot)
self.vol_refresh_voluntary = True
def _is_lun_valid_on_storage(self, lun):
"""Validate LUN specific to storage system."""
if self.volume_list:
lun_vol = lun.get_metadata_property('Volume')
if lun_vol not in self.volume_list:
return False
return True
def _check_volume_type_for_lun(self, volume, lun, existing_ref,
extra_specs):
"""Check if LUN satisfies volume type."""
if extra_specs:
legacy_policy = extra_specs.get('netapp:qos_policy_group')
if legacy_policy is not None:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("Setting LUN QoS policy group is not supported "
"on this storage family and ONTAP version."))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type is None:
return
spec = na_utils.get_backend_qos_spec_from_volume_type(volume_type)
if spec is not None:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("Back-end QoS specs are not supported on this "
"storage family and ONTAP version."))
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
# 7-mode iSCSI LIFs migrate from controller to controller
# in failover and flap operational state in transit, so
# we don't filter these on operational state.
return (super(NetAppBlockStorage7modeLibrary, self)
._get_preferred_target_from_list(target_details_list))
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
return self.volume_list or []
def create_consistencygroup(self, group):
"""Driver entry point for creating a consistency group.
ONTAP does not maintain an actual CG construct. As a result, no
communication to the backend is necessary for consistency group
creation.
:returns: Hard-coded model update for consistency group model.
"""
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
def delete_consistencygroup(self, group, volumes):
"""Driver entry point for deleting a consistency group.
:returns: Updated consistency group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': fields.ConsistencyGroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_lun(volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': 'error_deleting'})
LOG.exception("Volume %(vol)s in the consistency group "
"could not be deleted.", {'vol': volume})
return model_update, volumes_model_update
def update_consistencygroup(self, group, add_volumes=None,
remove_volumes=None):
"""Driver entry point for updating a consistency group.
Since no actual CG construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
def create_cgsnapshot(self, cgsnapshot, snapshots):
"""Creates a Cinder cgsnapshot object.
The Cinder cgsnapshot object is created by making use of an
ephemeral ONTAP CG in order to provide write-order consistency for a
set of flexvol snapshots. First, a list of the flexvols backing the
given Cinder CG must be gathered. An ONTAP cg-snapshot of these
flexvols will create a snapshot copy of all the Cinder volumes in the
CG group. For each Cinder volume in the CG, it is then necessary to
clone its backing LUN from the ONTAP cg-snapshot. The naming convention
used for the clones is what indicates the clone's role as a Cinder
snapshot and its inclusion in a Cinder CG. The ONTAP CG-snapshot of
the flexvols is no longer required after having cloned the LUNs
backing the Cinder volumes in the Cinder CG.
:returns: An implicit update for cgsnapshot and snapshots models that
is interpreted by the manager to set their models to
available.
"""
flexvols = set()
for snapshot in snapshots:
flexvols.add(volume_utils.extract_host(snapshot['volume']['host'],
level='pool'))
self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id'])
for snapshot in snapshots:
self._clone_lun(snapshot['volume']['name'], snapshot['name'],
source_snapshot=cgsnapshot['id'])
for flexvol in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol, cgsnapshot['id'])
self.zapi_client.delete_snapshot(
flexvol, cgsnapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol, cgsnapshot['id'])
return None, None
def delete_cgsnapshot(self, cgsnapshot, snapshots):
"""Delete LUNs backing each snapshot in the cgsnapshot.
:returns: An implicit update for snapshots models that is interpreted
by the manager to set their models to deleted.
"""
for snapshot in snapshots:
self._delete_lun(snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
def create_consistencygroup_from_src(self, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a CG from a either a cgsnapshot or group of cinder vols.
:returns: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
volume_model_updates = []
if cgsnapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
else:
vols = zip(volumes, source_vols)
for volume, old_src_vref in vols:
src_lun = self._get_lun_from_table(old_src_vref['name'])
source = {'name': src_lun.name, 'size': old_src_vref['size']}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
return None, volume_model_updates

View File

@ -155,7 +155,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.driver_name, self.app_version)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(

View File

@ -1,604 +0,0 @@
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import time
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from oslo_utils import strutils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(client_base.Client):
def __init__(self, volume_list=None, **kwargs):
super(Client, self).__init__(**kwargs)
vfiler = kwargs.get('vfiler', None)
self.connection.set_vfiler(vfiler)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
self.volume_list = volume_list
self._init_features()
def _init_features(self):
super(Client, self)._init_features()
ontapi_version = self.get_ontapi_version() # major, minor
ontapi_1_20 = ontapi_version >= (1, 20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20)
def send_ems_log_message(self, message_dict):
"""Sends a message to the Data ONTAP EMS log."""
# NOTE(cknight): Cannot use deepcopy on the connection context
node_client = copy.copy(self)
node_client.connection = copy.copy(self.connection)
node_client.connection.set_timeout(25)
try:
node_client.connection.set_vfiler(None)
node_client.send_request('ems-autosupport-log', message_dict)
LOG.debug('EMS executed successfully.')
except netapp_api.NaApiError as e:
LOG.warning('Failed to invoke EMS. %s', e)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info')
result = self.connection.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
initiator_exists = True
try:
auth_list = netapp_api.NaElement('iscsi-initiator-auth-list-info')
auth_list.add_new_child('initiator', iqn)
self.connection.invoke_successfully(auth_list, True)
except netapp_api.NaApiError:
initiator_exists = False
return initiator_exists
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
wwpns = []
port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info')
result = self.connection.invoke_successfully(port_name_list_api)
port_names = result.get_child_by_name('fcp-port-names')
if port_names:
for port_name_info in port_names.get_children():
wwpn = port_name_info.get_child_content('port-name').lower()
wwpns.append(wwpn)
return wwpns
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name')
result = self.connection.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
command = ("iscsi security add -i %(iqn)s -s CHAP "
"-p %(password)s -n %(username)s") % {
'iqn': iqn,
'password': password,
'username': username,
}
LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn})
try:
ssh_pool = self.ssh_client.ssh_pool
with ssh_pool.item() as ssh:
self.ssh_client.execute_command(ssh, command)
except Exception as e:
msg = _('Failed to set CHAP authentication for target IQN '
'%(iqn)s. Details: %(ex)s') % {
'iqn': iqn,
'ex': e,
}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning("Error finding LUNs for volume %s."
" Verify volume exists.", vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
return lun_list
def _get_vol_luns(self, vol_name):
"""Gets the LUNs for a volume."""
api = netapp_api.NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.connection.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
igroup_list = []
if not initiator_list:
return igroup_list
initiator_set = set(initiator_list)
igroup_list_info = netapp_api.NaElement('igroup-list-info')
result = self.connection.invoke_successfully(igroup_list_info, True)
initiator_groups = result.get_child_by_name(
'initiator-groups') or netapp_api.NaElement('none')
for initiator_group_info in initiator_groups.get_children():
initiator_set_for_igroup = set()
initiators = initiator_group_info.get_child_by_name(
'initiators') or netapp_api.NaElement('none')
for initiator_info in initiators.get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = {'initiator-group-os-type':
initiator_group_info.get_child_content(
'initiator-group-os-type'),
'initiator-group-type':
initiator_group_info.get_child_content(
'initiator-group-type'),
'initiator-group-name':
initiator_group_info.get_child_content(
'initiator-group-name')}
igroup_list.append(igroup)
return igroup_list
def clone_lun(self, path, clone_path, name, new_name,
space_reserved='true', src_block=0,
dest_block=0, block_count=0, source_snapshot=None):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
zapi_args = {
'source-path': path,
'destination-path': clone_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range =\
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number':
six.text_type(src_block),
'destination-block-number':
six.text_type(dest_block),
'block-count':
six.text_type(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.connection.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = netapp_api.NaElement('clone-list-status')
cl_id = netapp_api.NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_id,
'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.connection.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
lun_info = netapp_api.NaElement.create_node_with_children(
'lun-list-info', **args)
result = self.connection.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in API format."""
vol_request = netapp_api.NaElement('volume-list-info')
res = self.connection.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def get_lun_map(self, path):
lun_map_list = netapp_api.NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
return self.connection.invoke_successfully(lun_map_list, True)
def set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.connection.invoke_successfully(space_res, True)
def get_actual_path_for_export(self, export_path):
"""Gets the actual path on the filer for export path."""
storage_path = netapp_api.NaElement.create_node_with_children(
'nfs-exportfs-storage-path', **{'pathname': export_path})
result = self.connection.invoke_successfully(storage_path,
enable_tunneling=True)
if result.get_child_content('actual-pathname'):
return result.get_child_content('actual-pathname')
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def clone_file(self, src_path, dest_path, source_snapshot=None):
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
zapi_args = {
'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
result = self.connection.invoke_successfully(clone_start,
enable_tunneling=True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
try:
self._wait_for_clone_finish(clone_id, vol_uuid)
except netapp_api.NaApiError as e:
if e.code != 'UnknownCloneId':
self._clear_clone(clone_id)
raise
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
"""Waits till a clone operation is complete or errored out."""
clone_ls_st = netapp_api.NaElement('clone-list-status')
clone_id = netapp_api.NaElement('clone-id')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self.connection.invoke_successfully(clone_ls_st,
enable_tunneling=True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
state = ops_info[0].get_child_content('clone-state')
if state == 'completed':
task_running = False
elif state == 'failed':
code = ops_info[0].get_child_content('error')
reason = ops_info[0].get_child_content('reason')
raise netapp_api.NaApiError(code, reason)
else:
time.sleep(1)
else:
raise netapp_api.NaApiError(
'UnknownCloneId',
'No clone operation for clone id %s found on the filer'
% (clone_id))
def _clear_clone(self, clone_id):
"""Clear the clone information.
Invoke this in case of failed clone.
"""
clone_clear = netapp_api.NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
retry = 3
while retry:
try:
self.connection.invoke_successfully(clone_clear,
enable_tunneling=True)
break
except netapp_api.NaApiError:
# Filer might be rebooting
time.sleep(5)
retry = retry - 1
def get_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = netapp_api.NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):
ifconfig = netapp_api.NaElement('net-ifconfig-get')
return self.connection.invoke_successfully(ifconfig)
def get_flexvol_capacity(self, flexvol_path):
"""Gets total capacity and free capacity, in bytes, of the flexvol."""
api_args = {'volume': flexvol_path, 'verbose': 'false'}
result = self.send_request('volume-list-info', api_args)
flexvol_info_list = result.get_child_by_name('volumes')
flexvol_info = flexvol_info_list.get_children()[0]
size_total = float(flexvol_info.get_child_content('size-total'))
size_available = float(
flexvol_info.get_child_content('size-available'))
return {
'size-total': size_total,
'size-available': size_available,
}
def get_performance_instance_names(self, object_name):
"""Get names of performance instances for a node."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-instance-list-info',
api_args,
enable_tunneling=False)
instance_names = []
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance_info in instances.get_children():
instance_names.append(instance_info.get_child_content('name'))
return instance_names
def get_performance_counters(self, object_name, instance_names,
counter_names):
"""Gets or or more 7-mode Data ONTAP performance counters."""
api_args = {
'objectname': object_name,
'instances': [
{'instance': instance} for instance in instance_names
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
result = self.send_request('perf-object-get-instances',
api_args,
enable_tunneling=False)
counter_data = []
timestamp = result.get_child_content('timestamp')
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance in instances.get_children():
instance_name = instance.get_child_content('name')
counters = instance.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
counter_name = counter.get_child_content('name')
counter_value = counter.get_child_content('value')
counter_data.append({
'instance-name': instance_name,
'timestamp': timestamp,
counter_name: counter_value,
})
return counter_data
def get_system_name(self):
"""Get the name of the 7-mode Data ONTAP controller."""
result = self.send_request('system-get-info',
{},
enable_tunneling=False)
system_info = result.get_child_by_name('system-info')
system_name = system_info.get_child_content('system-name')
return system_name
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
snapshot_list_info = netapp_api.NaElement('snapshot-list-info')
snapshot_list_info.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(snapshot_list_info,
enable_tunneling=True)
snapshots = result.get_child_by_name('snapshots')
if not snapshots:
msg = _('No snapshots could be found on volume %s.')
raise exception.VolumeBackendAPIException(data=msg % volume_name)
snapshot_list = snapshots.get_children()
snapshot = None
for s in snapshot_list:
if (snapshot_name == s.get_child_content('name')) and (snapshot
is None):
snapshot = {
'name': s.get_child_content('name'),
'volume': s.get_child_content('volume'),
'busy': strutils.bool_from_string(
s.get_child_content('busy')),
}
snapshot_owners_list = s.get_child_by_name(
'snapshot-owners-list') or netapp_api.NaElement('none')
snapshot_owners = set([snapshot_owner.get_child_content(
'owner') for snapshot_owner in
snapshot_owners_list.get_children()])
snapshot['owners'] = snapshot_owners
elif (snapshot_name == s.get_child_content('name')) and (
snapshot is not None):
msg = _('Could not find unique snapshot %(snap)s on '
'volume %(vol)s.')
msg_args = {'snap': snapshot_name, 'vol': volume_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
if not snapshot:
raise exception.SnapshotNotFound(snapshot_id=snapshot_name)
return snapshot
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion."""
snapshots = []
for volume_name in volume_list:
api_args = {
'target-name': volume_name,
'target-type': 'volume',
'terse': 'true',
}
result = self.send_request('snapshot-list-info', api_args)
snapshots.extend(
self._parse_snapshot_list_info_result(result, volume_name))
return snapshots
def _parse_snapshot_list_info_result(self, result, volume_name):
snapshots = []
snapshots_elem = result.get_child_by_name(
'snapshots') or netapp_api.NaElement('none')
snapshot_info_list = snapshots_elem.get_children()
for snapshot_info in snapshot_info_list:
snapshot_name = snapshot_info.get_child_content('name')
snapshot_busy = strutils.bool_from_string(
snapshot_info.get_child_content('busy'))
snapshot_id = snapshot_info.get_child_content(
'snapshot-instance-uuid')
if (not snapshot_busy and
snapshot_name.startswith(client_base.DELETED_PREFIX)):
snapshots.append({
'name': snapshot_name,
'instance_id': snapshot_id,
'volume_name': volume_name,
})
return snapshots

View File

@ -1660,12 +1660,8 @@ class Client(client_base.Client):
return counter_data
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion.
:param volume_list: Placeholder parameter to match 7mode client method
signature.
"""
def get_snapshots_marked_for_deletion(self):
"""Get a list of snapshots marked for deletion."""
api_args = {
'query': {

View File

@ -1,134 +0,0 @@
# Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetApp7modeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp 7-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
raise NotImplementedError()

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
Volume driver for NetApp Data ONTAP FibreChannel storage systems.
"""
from cinder import interface

View File

@ -1,131 +0,0 @@
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
@interface.volumedriver
class NetApp7modeISCSIDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp 7-mode iSCSI volume driver."""
DRIVER_NAME = 'NetApp_iSCSI_7mode_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_iscsi(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_iscsi(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
raise NotImplementedError()

View File

@ -1,432 +0,0 @@
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
from oslo_log import log as logging
from oslo_log import versionutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
@interface.volumedriver
class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (7-mode)."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
def __init__(self, *args, **kwargs):
super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
self.driver_name = 'NetApp_NFS_7mode_direct'
self.driver_mode = '7mode'
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
def do_setup(self, context):
"""Do the customized set up on client if any for 7 mode."""
super(NetApp7modeNfsDriver, self).do_setup(context)
self.zapi_client = client_7mode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
# This driver has been marked 'deprecated' in the Ocata release and
# can be removed in Queens.
msg = _("The 7-mode Data ONTAP driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._add_looping_tasks()
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetApp7modeNfsDriver, self)._add_looping_tasks()
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, None,
self._get_backing_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume.
:param: is_snapshot Not used, present for method signature consistency
"""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path, source_snapshot)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
data['sparse_copy_volume'] = True
self._spawn_clean_cache_job()
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
self.perf_library.update_performance_cache()
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['multiattach'] = False
pool.update(capacity)
thick = not self.configuration.nfs_sparsed_volumes
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization()
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(_, export_path) = self._get_export_ip_path(share=share)
exported_volume = self.zapi_client.get_actual_path_for_export(
export_path)
for old_file in old_files:
path = os.path.join(exported_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = self.zapi_client.get_ifconfig()
if_info = ifconfig.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self.configuration.nfs_sparsed_volumes
return self._share_has_space_for_clone(share, volume['size'], thin)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []
for nfs_share in self._mounted_shares:
flexvol_name = nfs_share.rsplit('/', 1)[1]
flexvol_names.append(flexvol_name)
LOG.debug("Found flexvol %s", flexvol_name)
return flexvol_names
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
flexvol_name = pool_name.rsplit('/', 1)[1]
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_file(snapshot['volume_id'], snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
@utils.trace_method
def create_consistencygroup(self, context, group):
"""Driver entry point for creating a consistency group.
ONTAP does not maintain an actual CG construct. As a result, no
communtication to the backend is necessary for consistency group
creation.
:returns: Hard-coded model update for consistency group model.
"""
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
@utils.trace_method
def delete_consistencygroup(self, context, group, volumes):
"""Driver entry point for deleting a consistency group.
:returns: Updated consistency group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': fields.ConsistencyGroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_file(volume['id'], volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': 'error_deleting'})
LOG.exception("Volume %(vol)s in the consistency group "
"could not be deleted.", {'vol': volume})
return model_update, volumes_model_update
@utils.trace_method
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
"""Driver entry point for updating a consistency group.
Since no actual CG construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
@utils.trace_method
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a Cinder cgsnapshot object.
The Cinder cgsnapshot object is created by making use of an ONTAP CG
snapshot in order to provide write-order consistency for a set of
backing flexvols. First, a list of the flexvols backing the given
Cinder volumes in the CG is determined. An ONTAP CG snapshot of the
flexvols creates a write-order consistent snapshot of each backing
flexvol. For each Cinder volume in the CG, it is then necessary to
clone its volume from the ONTAP CG snapshot. The naming convention
used to create the clones indicates the clone's role as a Cinder
snapshot and its inclusion in a Cinder CG snapshot. The ONTAP CG
snapshots, of each backing flexvol, are deleted after the cloning
operation is completed.
:returns: An implicit update for the cgsnapshot and snapshot models
that is then used by the manager to set the models to
available.
"""
hosts = [snapshot['volume']['host'] for snapshot in snapshots]
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id'])
# Start clone process for snapshot files
for snapshot in snapshots:
self._clone_backing_file_for_volume(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=cgsnapshot['id'])
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, cgsnapshot['id'])
self.zapi_client.delete_snapshot(
flexvol_name, cgsnapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol_name, cgsnapshot['id'])
return None, None
@utils.trace_method
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a CG from a either a cgsnapshot or group of cinder vols.
:returns: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
model_update = None
volumes_model_update = []
if cgsnapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
update = self.create_volume_from_snapshot(
volume, snapshot)
update['id'] = volume['id']
volumes_model_update.append(update)
elif source_cg and source_vols:
hosts = [source_vol['host'] for source_vol in source_vols]
flexvols = self._get_flexvol_names_from_hosts(hosts)
# Create snapshot for backing flexvol
snapshot_name = 'snapshot-temp-' + source_cg['id']
self.zapi_client.create_cg_snapshot(flexvols, snapshot_name)
# Start clone process for new volumes
vols = zip(volumes, source_vols)
for volume, source_vol in vols:
self._clone_backing_file_for_volume(
source_vol['name'], volume['name'],
source_vol['id'], source_snapshot=snapshot_name)
volume_model_update = (
self._get_volume_model_update(volume) or {})
volume_model_update.update({
'id': volume['id'],
'provider_location': source_vol['provider_location'],
})
volumes_model_update.append(volume_model_update)
# Delete backing flexvol snapshots
for flexvol_name in flexvols:
self.zapi_client.wait_for_busy_snapshot(
flexvol_name, snapshot_name)
self.zapi_client.delete_snapshot(flexvol_name, snapshot_name)
else:
LOG.error("Unexpected set of parameters received when "
"creating consistency group from source.")
model_update = {'status': fields.ConsistencyGroupStatus.ERROR}
return model_update, volumes_model_update

View File

@ -127,7 +127,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.driver_name, self.app_version)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(

View File

@ -1,148 +0,0 @@
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Performance metrics functions and cache for NetApp 7-mode Data ONTAP systems.
"""
from oslo_log import log as logging
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.performance import perf_base
LOG = logging.getLogger(__name__)
class Performance7modeLibrary(perf_base.PerformanceLibrary):
def __init__(self, zapi_client):
super(Performance7modeLibrary, self).__init__(zapi_client)
self.performance_counters = []
self.utilization = perf_base.DEFAULT_UTILIZATION
self.node_name = self.zapi_client.get_system_name()
def _init_counter_info(self):
"""Set a few counter names based on Data ONTAP version."""
super(Performance7modeLibrary, self)._init_counter_info()
if self.zapi_client.features.SYSTEM_METRICS:
self.system_object_name = 'system'
try:
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system',
'avg_processor_busy'))
except netapp_api.NaApiError:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
def update_performance_cache(self):
"""Called periodically to update node utilization metrics."""
# Nothing to do on older systems
if not self.zapi_client.features.SYSTEM_METRICS:
return
# Get new performance counters and save only the last 10
counters = self._get_node_utilization_counters()
if not counters:
return
self.performance_counters.append(counters)
self.performance_counters = self.performance_counters[-10:]
# Update utilization using newest & oldest sample
if len(self.performance_counters) < 2:
self.utilization = perf_base.DEFAULT_UTILIZATION
else:
self.utilization = self._get_node_utilization(
self.performance_counters[0], self.performance_counters[-1],
self.node_name)
def get_node_utilization(self):
"""Get the node utilization, if available."""
return self.utilization
def _get_node_utilization_counters(self):
"""Get all performance counters for calculating node utilization."""
try:
return (self._get_node_utilization_system_counters() +
self._get_node_utilization_wafl_counters() +
self._get_node_utilization_processor_counters())
except netapp_api.NaApiError:
LOG.exception('Could not get utilization counters from node '
'%s', self.node_name)
return None
def _get_node_utilization_system_counters(self):
"""Get the system counters for calculating node utilization."""
system_instance_names = (
self.zapi_client.get_performance_instance_names(
self.system_object_name))
system_counter_names = [
'avg_processor_busy',
self.avg_processor_busy_base_counter_name,
]
if 'cpu_elapsed_time1' in system_counter_names:
system_counter_names.append('cpu_elapsed_time')
system_counters = self.zapi_client.get_performance_counters(
self.system_object_name, system_instance_names,
system_counter_names)
return system_counters
def _get_node_utilization_wafl_counters(self):
"""Get the WAFL counters for calculating node utilization."""
wafl_instance_names = self.zapi_client.get_performance_instance_names(
'wafl')
wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
wafl_counters = self.zapi_client.get_performance_counters(
'wafl', wafl_instance_names, wafl_counter_names)
# Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
for counter in wafl_counters:
if 'cp_phase_times' in counter:
self._expand_performance_array(
'wafl', 'cp_phase_times', counter)
return wafl_counters
def _get_node_utilization_processor_counters(self):
"""Get the processor counters for calculating node utilization."""
processor_instance_names = (
self.zapi_client.get_performance_instance_names('processor'))
processor_counter_names = ['domain_busy', 'processor_elapsed_time']
processor_counters = self.zapi_client.get_performance_counters(
'processor', processor_instance_names, processor_counter_names)
# Expand array data so we can use processor:domain_busy[kahuna]
for counter in processor_counters:
if 'domain_busy' in counter:
self._expand_performance_array(
'processor', 'domain_busy', counter)
return processor_counters

View File

@ -90,14 +90,11 @@ def _build_base_ems_log_message(driver_name, app_version):
return ems_log
def build_ems_log_message_0(driver_name, app_version, driver_mode):
def build_ems_log_message_0(driver_name, app_version):
"""Construct EMS Autosupport log message with deployment info."""
dest = 'cluster node' if driver_mode == 'cluster' else '7 mode controller'
ems_log = _build_base_ems_log_message(driver_name, app_version)
ems_log['event-id'] = '0'
ems_log['event-description'] = 'OpenStack Cinder connected to %s' % dest
ems_log['event-description'] = 'OpenStack Cinder connected to cluster node'
return ems_log

View File

@ -34,11 +34,10 @@ NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2
netapp_proxy_opts = [
cfg.StrOpt('netapp_storage_family',
default='ontap_cluster',
choices=['ontap_7mode', 'ontap_cluster', 'eseries'],
choices=['ontap_cluster', 'eseries'],
help=('The storage family type used on the storage system; '
'valid values are ontap_7mode for using Data ONTAP '
'operating in 7-Mode, ontap_cluster for using '
'clustered Data ONTAP, or eseries for using E-Series.')),
'valid values are ontap_cluster for using clustered '
'Data ONTAP, or eseries for using E-Series.')),
cfg.StrOpt('netapp_storage_protocol',
choices=['iscsi', 'fc', 'nfs'],
help=('The storage protocol to be used on the data path with '
@ -93,21 +92,6 @@ netapp_cluster_opts = [
'(Vserver) name on the storage cluster on which '
'provisioning of block storage volumes should occur.')), ]
netapp_7mode_opts = [
cfg.StrOpt('netapp_vfiler',
help=('The vFiler unit on which provisioning of block storage '
'volumes will be done. This option is only used by the '
'driver when connecting to an instance with a storage '
'family of Data ONTAP operating in 7-Mode. Only use this '
'option when utilizing the MultiStore feature on the '
'NetApp storage system.')),
cfg.StrOpt('netapp_partner_backend_name',
help=('The name of the config.conf stanza for a Data ONTAP '
'(7-mode) HA partner. This option is only used by the '
'driver when connecting to an instance with a storage '
'family of Data ONTAP operating in 7-Mode, and it is '
'required if the storage protocol selected is FC.')), ]
netapp_img_cache_opts = [
cfg.IntOpt('thres_avl_size_perc_start',
default=20,
@ -220,7 +204,6 @@ CONF.register_opts(netapp_connection_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_transport_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_basicauth_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_cluster_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_7mode_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_provisioning_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_img_cache_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_eseries_opts, group=conf.SHARED_CONF_GROUP)

View File

@ -4,32 +4,19 @@ NetApp unified driver
The NetApp unified driver is a Block Storage driver that supports
multiple storage families and protocols. A storage family corresponds to
storage systems built on different NetApp technologies such as clustered
Data ONTAP, Data ONTAP operating in 7-Mode, and E-Series. The storage
protocol refers to the protocol used to initiate data storage and access
operations on those storage systems like iSCSI and NFS. The NetApp
unified driver can be configured to provision and manage OpenStack
volumes on a given storage family using a specified storage protocol.
storage systems built on either clustered Data ONTAP or E-Series. The
storage protocol refers to the protocol used to initiate data storage and
access operations on those storage systems like iSCSI and NFS. The NetApp
unified driver can be configured to provision and manage OpenStack volumes
on a given storage family using a specified storage protocol.
Also, the NetApp unified driver supports over subscription or over
provisioning when thin provisioned Block Storage volumes are in use
on an E-Series backend. The OpenStack volumes can then be used for
provisioning when thin provisioned Block Storage volumes are in use.
The OpenStack volumes can then be used for
accessing and storing data using the storage protocol on the storage
family system. The NetApp unified driver is an extensible interface
that can support new storage families and protocols.
.. important::
The NetApp unified driver in cinder currently provides integration for
two major generations of the ONTAP operating system: the current
clustered ONTAP and the legacy 7-mode. NetApps full support for
7-mode ended in August of 2015 and the current limited support period
will end in February of 2017.
The 7-mode components of the cinder NetApp unified driver have now been
marked deprecated and will be removed in the Queens release. This will
apply to all three protocols currently supported in this driver: iSCSI,
FC and NFS.
.. note::
With the Juno release of OpenStack, Block Storage has
@ -114,9 +101,8 @@ setting the ``volume_driver``, ``netapp_storage_family`` and
.. tip::
For more information on these options and other deployment and
operational scenarios, visit the `NetApp OpenStack Deployment and
Operations
Guide <http://netapp.github.io/openstack-deploy-ops-guide/>`__.
operational scenarios, visit the `NetApp OpenStack website
<http://netapp.io/openstack/>`_.
NetApp NFS configuration for clustered Data ONTAP
-------------------------------------------------
@ -237,8 +223,8 @@ To use this feature, you must configure the Block Storage service, as follows:
.. tip::
For more information on these options and other deployment and operational
scenarios, visit the `NetApp OpenStack Deployment and Operations Guide
<http://netapp.github.io/openstack-deploy-ops-guide/>`__.
scenarios, visit the `NetApp OpenStack website
<http://netapp.io/openstack/>`_.
NetApp-supported extra specs for clustered Data ONTAP
-----------------------------------------------------
@ -264,117 +250,6 @@ type set` command.
.. include:: ../../tables/manual/cinder-netapp_cdot_extraspecs.inc
NetApp Data ONTAP operating in 7-Mode storage family
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The NetApp Data ONTAP operating in 7-Mode storage family represents a
configuration group which provides Compute instances access to 7-Mode
storage systems. At present it can be configured in Block Storage to
work with iSCSI and NFS storage protocols.
NetApp iSCSI configuration for Data ONTAP operating in 7-Mode
-------------------------------------------------------------
The NetApp iSCSI configuration for Data ONTAP operating in 7-Mode is an
interface from OpenStack to Data ONTAP operating in 7-Mode storage systems for
provisioning and managing the SAN block storage entity, that is, a LUN which
can be accessed using iSCSI protocol.
The iSCSI configuration for Data ONTAP operating in 7-Mode is a direct
interface from OpenStack to Data ONTAP operating in 7-Mode storage system and
it does not require additional management software to achieve the desired
functionality. It uses NetApp ONTAPI to interact with the Data ONTAP operating
in 7-Mode storage system.
**Configuration options**
Configure the volume driver, storage family and storage protocol to the NetApp
unified driver, Data ONTAP operating in 7-Mode, and iSCSI respectively by
setting the ``volume_driver``, ``netapp_storage_family`` and
``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_7mode
netapp_storage_protocol = iscsi
netapp_server_hostname = myhostname
netapp_server_port = 80
netapp_login = username
netapp_password = password
.. note::
To use the iSCSI protocol, you must override the default value of
``netapp_storage_protocol`` with ``iscsi``.
.. include:: ../../tables/cinder-netapp_7mode_iscsi.inc
.. note::
The driver supports iSCSI CHAP uni-directional authentication.
To enable it, set the ``use_chap_auth`` option to ``True``.
.. tip::
For more information on these options and other deployment and
operational scenarios, visit the `NetApp OpenStack Deployment and
Operations
Guide <http://netapp.github.io/openstack-deploy-ops-guide/>`__.
NetApp NFS configuration for Data ONTAP operating in 7-Mode
-----------------------------------------------------------
The NetApp NFS configuration for Data ONTAP operating in 7-Mode is an interface
from OpenStack to Data ONTAP operating in 7-Mode storage system for
provisioning and managing OpenStack volumes on NFS exports provided by the Data
ONTAP operating in 7-Mode storage system which can then be accessed using NFS
protocol.
The NFS configuration for Data ONTAP operating in 7-Mode is a direct interface
from Block Storage to the Data ONTAP operating in 7-Mode instance and
as such does not require any additional management software to achieve the
desired functionality. It uses NetApp ONTAPI to interact with the Data ONTAP
operating in 7-Mode storage system.
.. important::
Support for 7-mode configuration has been deprecated in the Ocata release
and will be removed in the Queens release of OpenStack.
**Configuration options**
Configure the volume driver, storage family, and storage protocol to the NetApp
unified driver, Data ONTAP operating in 7-Mode, and NFS respectively by setting
the ``volume_driver``, ``netapp_storage_family`` and
``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_7mode
netapp_storage_protocol = nfs
netapp_server_hostname = myhostname
netapp_server_port = 80
netapp_login = username
netapp_password = password
nfs_shares_config = /etc/cinder/nfs_shares
.. include:: ../../tables/cinder-netapp_7mode_nfs.inc
.. note::
Additional NetApp NFS configuration options are shared with the
generic NFS driver. For a description of these, see
:ref:`cinder-storage_nfs`.
.. tip::
For more information on these options and other deployment and
operational scenarios, visit the `NetApp OpenStack Deployment and
Operations
Guide <http://netapp.github.io/openstack-deploy-ops-guide/>`__.
NetApp E-Series storage family
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -442,9 +317,8 @@ NetApp unified driver, E-Series, and iSCSI respectively by setting the
.. tip::
For more information on these options and other deployment and
operational scenarios, visit the `NetApp OpenStack Deployment and
Operations
Guide <http://netapp.github.io/openstack-deploy-ops-guide/>`__.
operational scenarios, visit the `NetApp OpenStack website
<http://netapp.io/openstack/>`_.
NetApp-supported extra specs for E-Series
-----------------------------------------
@ -476,115 +350,3 @@ type set` command.
- Boolean
- Limit the candidate volume list to only the ones that support thin
provisioning on the storage controller.
Upgrading prior NetApp drivers to the NetApp unified driver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NetApp introduced a new unified block storage driver in Havana for configuring
different storage families and storage protocols. This requires defining an
upgrade path for NetApp drivers which existed in releases prior to Havana. This
section covers the upgrade configuration for NetApp drivers to the new unified
configuration and a list of deprecated NetApp drivers.
Upgraded NetApp drivers
-----------------------
This section describes how to update Block Storage configuration from
a pre-Havana release to the unified driver format.
- NetApp iSCSI direct driver for Clustered Data ONTAP in Grizzly (or earlier):
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver
NetApp unified driver configuration:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_cluster
netapp_storage_protocol = iscsi
- NetApp NFS direct driver for Clustered Data ONTAP in Grizzly (or
earlier):
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver
NetApp unified driver configuration:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_cluster
netapp_storage_protocol = nfs
- NetApp iSCSI direct driver for Data ONTAP operating in 7-Mode storage
controller in Grizzly (or earlier):
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver
NetApp unified driver configuration:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_7mode
netapp_storage_protocol = iscsi
- NetApp NFS direct driver for Data ONTAP operating in 7-Mode storage
controller in Grizzly (or earlier):
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver
NetApp unified driver configuration:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver
netapp_storage_family = ontap_7mode
netapp_storage_protocol = nfs
Deprecated NetApp drivers
-------------------------
This section lists the NetApp drivers in earlier releases that are
deprecated in Havana.
- NetApp iSCSI driver for clustered Data ONTAP:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver
- NetApp NFS driver for clustered Data ONTAP:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver
- NetApp iSCSI driver for Data ONTAP operating in 7-Mode storage
controller:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver
- NetApp NFS driver for Data ONTAP operating in 7-Mode storage
controller:
.. code-block:: ini
volume_driver = cinder.volume.drivers.netapp.nfs.NetAppNFSDriver
.. note::
For support information on deprecated NetApp drivers in the Havana
release, visit the `NetApp OpenStack Deployment and Operations
Guide <http://netapp.github.io/openstack-deploy-ops-guide/>`__.

View File

@ -1,46 +0,0 @@
..
Warning: Do not edit this file. It is automatically generated from the
software project's code and your changes will be overwritten.
The tool to generate this file lives in openstack-doc-tools repository.
Please make any changes needed in the code, then run the
autogenerate-config-doc tool from the openstack-doc-tools repository, or
ask for help on the documentation mailing list, IRC channel or meeting.
.. _cinder-netapp_7mode_iscsi:
.. list-table:: Description of NetApp 7-Mode iSCSI driver configuration options
:header-rows: 1
:class: config-ref-table
* - Configuration option = Default value
- Description
* - **[DEFAULT]**
-
* - ``netapp_login`` = ``None``
- (String) Administrative user account name used to access the storage system or proxy server.
* - ``netapp_partner_backend_name`` = ``None``
- (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
* - ``netapp_password`` = ``None``
- (String) Password for the administrative user account specified in the netapp_login option.
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
- (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
* - ``netapp_replication_aggregate_map`` = ``None``
- (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:<name_of_replication_device_section>,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,...
* - ``netapp_server_hostname`` = ``None``
- (String) The hostname (or IP address) for the storage system or proxy server.
* - ``netapp_server_port`` = ``None``
- (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
* - ``netapp_size_multiplier`` = ``1.2``
- (Floating point) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release.
* - ``netapp_snapmirror_quiesce_timeout`` = ``3600``
- (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover.
* - ``netapp_storage_family`` = ``ontap_cluster``
- (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
* - ``netapp_storage_protocol`` = ``None``
- (String) The storage protocol to be used on the data path with the storage system.
* - ``netapp_transport_type`` = ``http``
- (String) The transport protocol used when communicating with the storage system or proxy server.
* - ``netapp_vfiler`` = ``None``
- (String) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system.

View File

@ -1,50 +0,0 @@
..
Warning: Do not edit this file. It is automatically generated from the
software project's code and your changes will be overwritten.
The tool to generate this file lives in openstack-doc-tools repository.
Please make any changes needed in the code, then run the
autogenerate-config-doc tool from the openstack-doc-tools repository, or
ask for help on the documentation mailing list, IRC channel or meeting.
.. _cinder-netapp_7mode_nfs:
.. list-table:: Description of NetApp 7-Mode NFS driver configuration options
:header-rows: 1
:class: config-ref-table
* - Configuration option = Default value
- Description
* - **[DEFAULT]**
-
* - ``expiry_thres_minutes`` = ``720``
- (Integer) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share.
* - ``netapp_login`` = ``None``
- (String) Administrative user account name used to access the storage system or proxy server.
* - ``netapp_partner_backend_name`` = ``None``
- (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
* - ``netapp_password`` = ``None``
- (String) Password for the administrative user account specified in the netapp_login option.
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
- (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC.
* - ``netapp_replication_aggregate_map`` = ``None``
- (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:<name_of_replication_device_section>,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,...
* - ``netapp_server_hostname`` = ``None``
- (String) The hostname (or IP address) for the storage system or proxy server.
* - ``netapp_server_port`` = ``None``
- (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
* - ``netapp_snapmirror_quiesce_timeout`` = ``3600``
- (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover.
* - ``netapp_storage_family`` = ``ontap_cluster``
- (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
* - ``netapp_storage_protocol`` = ``None``
- (String) The storage protocol to be used on the data path with the storage system.
* - ``netapp_transport_type`` = ``http``
- (String) The transport protocol used when communicating with the storage system or proxy server.
* - ``netapp_vfiler`` = ``None``
- (String) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system.
* - ``thres_avl_size_perc_start`` = ``20``
- (Integer) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned.
* - ``thres_avl_size_perc_stop`` = ``60``
- (Integer) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option.

View File

@ -24,8 +24,6 @@
- (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created.
* - ``netapp_lun_space_reservation`` = ``enabled``
- (String) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand.
* - ``netapp_partner_backend_name`` = ``None``
- (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
* - ``netapp_password`` = ``None``
- (String) Password for the administrative user account specified in the netapp_login option.
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
@ -41,7 +39,7 @@
* - ``netapp_snapmirror_quiesce_timeout`` = ``3600``
- (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover.
* - ``netapp_storage_family`` = ``ontap_cluster``
- (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
- (String) The storage family type used on the storage system; valid values are ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
* - ``netapp_storage_protocol`` = ``None``
- (String) The storage protocol to be used on the data path with the storage system.
* - ``netapp_transport_type`` = ``http``

View File

@ -30,8 +30,6 @@
- (String) Administrative user account name used to access the storage system or proxy server.
* - ``netapp_lun_ostype`` = ``None``
- (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created.
* - ``netapp_partner_backend_name`` = ``None``
- (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
* - ``netapp_password`` = ``None``
- (String) Password for the administrative user account specified in the netapp_login option.
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
@ -45,7 +43,7 @@
* - ``netapp_snapmirror_quiesce_timeout`` = ``3600``
- (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover.
* - ``netapp_storage_family`` = ``ontap_cluster``
- (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
- (String) The storage family type used on the storage system; valid values are ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
* - ``netapp_storage_protocol`` = ``None``
- (String) The storage protocol to be used on the data path with the storage system.
* - ``netapp_transport_type`` = ``http``

View File

@ -26,8 +26,6 @@
- (String) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts.
* - ``netapp_login`` = ``None``
- (String) Administrative user account name used to access the storage system or proxy server.
* - ``netapp_partner_backend_name`` = ``None``
- (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC.
* - ``netapp_password`` = ``None``
- (String) Password for the administrative user account specified in the netapp_login option.
* - ``netapp_pool_name_search_pattern`` = ``(.+)``
@ -43,7 +41,7 @@
* - ``netapp_snapmirror_quiesce_timeout`` = ``3600``
- (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover.
* - ``netapp_storage_family`` = ``ontap_cluster``
- (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
- (String) The storage family type used on the storage system; valid values are ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series.
* - ``netapp_transport_type`` = ``http``
- (String) The transport protocol used when communicating with the storage system or proxy server.
* - ``netapp_webservice_path`` = ``/devmgr/v2``

View File

@ -0,0 +1,10 @@
---
upgrade:
- Support for NetApp ONTAP 7 (previously known as "Data ONTAP operating in
7mode") has been removed. The NetApp Unified driver can now only be used
with NetApp Clustered Data ONTAP and NetApp E-Series storage systems.
This removal affects all three storage protocols that were supported on
for ONTAP 7 - iSCSI, NFS and FC. Deployers are advised to consult the
`migration support <https://mysupport.netapp.com/info/web/ECMP1658253
.html>`_ provided to transition from ONTAP 7 to Clustered
Data ONTAP operating system.