NetApp: Replace SSC for cDOT block & file drivers

This commit continues the storage service catalog (SSC)
replacement epic.  The new SSC code is added here, the
iSCSI/FC and NFS drivers are updated to use the new
capabilities library, the old SSC module and tests are
removed entirely, and all new or modified code is 100%
covered by unit tests.

Partially implements: blueprint replace-netapp-cdot-ssc-module

Change-Id: Iba688ac670f3e3671a4115c40cf34431e59989b1
This commit is contained in:
Clinton Knight 2016-04-19 16:38:35 -04:00
parent a26117f6ae
commit a5746b62b5
19 changed files with 1921 additions and 2202 deletions

View File

@ -33,7 +33,6 @@ from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils
@ -540,36 +539,12 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'host': 'hostname@backend#vol1'}
vol1 = ssc_cmode.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
vol1.state['junction_active'] = True
vol1.space['size_avl_bytes'] = '4000000000'
vol1.space['size_total_bytes'] = '5000000000'
vol1.space['space-guarantee-enabled'] = False
vol1.space['space-guarantee'] = 'file'
vol1.space['thin_provisioned'] = True
vol1.mirror['mirrored'] = True
vol1.qos['qos_policy_group'] = None
vol1.aggr['name'] = 'aggr1'
vol1.aggr['junction'] = '/vola'
vol1.sis['dedup'] = True
vol1.sis['compression'] = True
vol1.aggr['raid_type'] = 'raiddp'
vol1.aggr['ha_policy'] = 'cfo'
vol1.aggr['disk_type'] = 'SSD'
ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]),
'compression': set([vol1]),
'thin': set([vol1]), 'all': set([vol1])}
def setUp(self):
super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.stubs.Set(
ssc_cmode, 'refresh_cluster_ssc',
lambda a, b, c, synchronous: None)
self.mock_object(utils, 'OpenStackInfo')
self.mock_object(perf_7mode, 'Performance7modeLibrary')
self.mock_object(capabilities, 'CapabilitiesLibrary')
@ -582,7 +557,6 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
driver.do_setup(context='')
self.driver = driver
self.mock_object(self.driver.library.zapi_client, '_init_ssh_client')
self.driver.ssc_vols = self.ssc_map
def _set_config(self, configuration):
configuration.netapp_storage_protocol = 'iscsi'
@ -599,8 +573,8 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
self.driver.library.zapi_client.get_ontapi_version.return_value = \
(1, 20)
self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary,
'_get_filtered_pools',
mock.Mock(return_value=fakes.FAKE_CMODE_POOLS))
'_get_flexvol_to_pool_map',
mock.Mock(return_value=fakes.FAKE_CMODE_POOL_MAP))
self.driver.check_for_setup_error()
def test_do_setup_all_default(self):
@ -696,7 +670,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
self.mock_object(common.na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=FAKE_CONN_PROPERTIES))
self.mock_object(client_cmode.Client,
'get_operational_network_interface_addresses',
'get_operational_lif_addresses',
mock.Mock(return_value=[]))
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume, {})
@ -728,7 +702,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
self.mock_object(client_cmode.Client, 'get_igroup_by_initiators',
mock.Mock(return_value=[FAKE_IGROUP_INFO]))
self.mock_object(client_cmode.Client,
'get_operational_network_interface_addresses',
'get_operational_lif_addresses',
mock.Mock(return_value=[]))
self.mock_object(client_cmode.Client, 'get_iscsi_target_details')
self.mock_object(client_cmode.Client, 'get_iscsi_service_details')

View File

@ -224,10 +224,14 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
def test_check_for_setup_error(self, mock_super_check_for_setup_error):
self._driver.zapi_client = mock.Mock()
self._driver._start_periodic_tasks = mock.Mock()
self._driver.check_for_setup_error()
(self._driver.ssc_library.check_api_permissions.
assert_called_once_with())
mock_super_check_for_setup_error.assert_called_once_with()
self._driver._start_periodic_tasks.assert_called_once_with()
def _prepare_clone_mock(self, status):
drv = self._driver
@ -239,7 +243,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv.zapi_client = mox.CreateMockAnything()
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
@ -249,7 +252,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'openstack')
drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
@ -1174,7 +1176,6 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
self.mock_object(netapp_nfs_cmode, 'LOG')
@ -1194,7 +1195,6 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
@ -1213,7 +1213,6 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
@ -1221,7 +1220,6 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
def test_create_volume_with_legacy_qos_policy(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
@ -1252,14 +1250,12 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
drv._copy_from_img_service = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value=volume.id)
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(self.context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(self.context,
volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with(volume.id)
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
@ -1272,7 +1268,6 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value=volume.id)
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(self.context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(self.context,
@ -1280,9 +1275,8 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
image_service,
image_id)
nfs_base.NetAppNfsDriver.copy_image_to_volume. \
assert_called_once_with(self.context, volume,
image_service, image_id)
drv._update_stale_vols.assert_called_once_with(volume.id)
assert_called_once_with(
self.context, volume, image_service, image_id)
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
@ -1432,7 +1426,6 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,

View File

@ -1,728 +0,0 @@
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific ssc module."""
import copy
import ddt
from lxml import etree
import mock
from mox3 import mox
import six
from six.moves import BaseHTTPServer
from six.moves import http_client
from cinder import exception
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that doesn't spam the log."""
def log_message(self, format, *args):
pass
class FakeHttplibSocket(object):
"""A fake socket implementation for http_client.HTTPResponse."""
def __init__(self, value):
self._rbuffer = six.StringIO(value)
self._wbuffer = six.StringIO('')
oldclose = self._wbuffer.close
def newclose():
self.result = self._wbuffer.getvalue()
oldclose()
self._wbuffer.close = newclose
def makefile(self, mode, _other):
"""Returns the socket's internal buffer"""
if mode == 'r' or mode == 'rb':
return self._rbuffer
if mode == 'w' or mode == 'wb':
return self._wbuffer
RESPONSE_PREFIX_DIRECT_CMODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>"""
RESPONSE_PREFIX_DIRECT = """
<netapp version='1.15' xmlns='http://www.netapp.com/filer/admin'>"""
RESPONSE_SUFFIX_DIRECT = """</netapp>"""
class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s):
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
localname = etree.QName(tag).localname or tag
if 'volume-get-iter' == localname:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/iscsi</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>214748364</size-available>
<size-total>224748364</size-total>
<space-guarantee-enabled>enabled</space-guarantee-enabled>
<space-guarantee>file</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>14748364</size-available>
<size-total>24748364</size-total>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol2</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs2</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>14748364</size-available>
<size-total>24748364</size-total>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>true</is-inconsistent>
<is-invalid>true</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol3</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs3</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume
</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
</attributes-list>
<num-records>4</num-records></results>"""
elif 'aggr-options-list-info' == localname:
body = """<results status="passed">
<options>
<aggr-option-info>
<name>ha_policy</name>
<value>cfo</value>
</aggr-option-info>
<aggr-option-info>
<name>raidtype</name>
<value>raid_dp</value>
</aggr-option-info>
</options>
</results>"""
elif 'sis-get-iter' == localname:
body = """<results status="passed">
<attributes-list>
<sis-status-info>
<path>/vol/iscsi</path>
<is-compression-enabled>
true
</is-compression-enabled>
<state>enabled</state>
</sis-status-info>
</attributes-list>
</results>"""
elif 'storage-disk-get-iter' == localname:
body = """<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-raid-info>
<effective-disk-type>SATA</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
</results>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
class FakeDirectCmodeHTTPConnection(object):
"""A fake http_client.HTTPConnection for netapp tests.
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the http_client.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.items():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = http_client.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
def createNetAppVolume(**kwargs):
vol = ssc_cmode.NetAppVolume(kwargs['name'], kwargs['vs'])
vol.state['vserver_root'] = kwargs.get('vs_root')
vol.state['status'] = kwargs.get('status')
vol.state['junction_active'] = kwargs.get('junc_active')
vol.space['size_avl_bytes'] = kwargs.get('avl_byt')
vol.space['size_total_bytes'] = kwargs.get('total_byt')
vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled')
vol.space['space-guarantee'] = kwargs.get('sg')
vol.space['thin_provisioned'] = kwargs.get('thin')
vol.mirror['mirrored'] = kwargs.get('mirrored')
vol.qos['qos_policy_group'] = kwargs.get('qos')
vol.aggr['name'] = kwargs.get('aggr_name')
vol.aggr['junction'] = kwargs.get('junction')
vol.sis['dedup'] = kwargs.get('dedup')
vol.sis['compression'] = kwargs.get('compression')
vol.aggr['raid_type'] = kwargs.get('raid')
vol.aggr['ha_policy'] = kwargs.get('ha')
vol.aggr['disk_type'] = kwargs.get('disk')
return vol
@ddt.ddt
class SscUtilsTestCase(test.TestCase):
"""Test ssc utis."""
vol1 = createNetAppVolume(name='vola', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='1000', total_byt='1500',
sg_enabled=False,
sg='file', thin=False, mirrored=False,
qos=None, aggr_name='aggr1', junction='/vola',
dedup=False, compression=False,
raid='raiddp', ha='cfo', disk='SSD')
vol2 = createNetAppVolume(name='volb', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='2000', total_byt='2500',
sg_enabled=True,
sg='file', thin=True, mirrored=False,
qos=None, aggr_name='aggr2', junction='/volb',
dedup=True, compression=False,
raid='raid4', ha='cfo', disk='SSD')
vol3 = createNetAppVolume(name='volc', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='3000', total_byt='3500',
sg_enabled=True,
sg='volume', thin=True, mirrored=False,
qos=None, aggr_name='aggr1', junction='/volc',
dedup=True, compression=True,
raid='raiddp', ha='cfo', disk='SAS')
vol4 = createNetAppVolume(name='vold', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='4000', total_byt='4500',
sg_enabled=False,
sg='none', thin=False, mirrored=False,
qos=None, aggr_name='aggr1', junction='/vold',
dedup=False, compression=False,
raid='raiddp', ha='cfo', disk='SSD')
vol5 = createNetAppVolume(name='vole', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='5000', total_byt='5500',
sg_enabled=True,
sg='none', thin=False, mirrored=True,
qos=None, aggr_name='aggr2', junction='/vole',
dedup=True, compression=False,
raid='raid4', ha='cfo', disk='SAS')
test_vols = {vol1, vol2, vol3, vol4, vol5}
ssc_map = {
'mirrored': {vol1},
'dedup': {vol1, vol2, vol3},
'compression': {vol3, vol4},
'thin': {vol5, vol2},
'all': test_vols
}
def setUp(self):
super(SscUtilsTestCase, self).setUp()
self.stubs.Set(http_client, 'HTTPConnection',
FakeDirectCmodeHTTPConnection)
@ddt.data({'na_server_exists': False, 'volume': None},
{'na_server_exists': True, 'volume': 'vol'},
{'na_server_exists': True, 'volume': None})
@ddt.unpack
def test_query_cluster_vols_for_ssc(self, na_server_exists, volume):
if na_server_exists:
na_server = netapp_api.NaServer('127.0.0.1')
fake_api_return = mock.Mock(return_value=[])
self.mock_object(ssc_cmode.netapp_api, 'invoke_api',
new_attr=fake_api_return)
ssc_cmode.query_cluster_vols_for_ssc(na_server, 'vserver',
volume)
else:
na_server = None
fake_api_error = mock.Mock(side_effect=exception.InvalidInput)
self.mock_object(ssc_cmode.netapp_api, 'invoke_api',
new_attr=fake_api_error)
self.assertRaises(KeyError, ssc_cmode.query_cluster_vols_for_ssc,
na_server, 'vserver', volume)
def test_cl_vols_ssc_all(self):
"""Test cluster ssc for all vols."""
na_server = netapp_api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set([copy.deepcopy(self.vol1),
copy.deepcopy(self.vol2), copy.deepcopy(self.vol3)])
sis = {'vola': {'dedup': False, 'compression': False},
'volb': {'dedup': True, 'compression': False}}
mirrored = {'vola': [{'dest_loc': 'openstack1:vol1',
'rel_type': 'data_protection',
'mirr_state': 'broken'},
{'dest_loc': 'openstack2:vol2',
'rel_type': 'data_protection',
'mirr_state': 'snapmirrored'}],
'volb': [{'dest_loc': 'openstack1:vol2',
'rel_type': 'data_protection',
'mirr_state': 'broken'}]}
self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc')
self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict')
self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict')
self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options')
self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk')
ssc_cmode.query_cluster_vols_for_ssc(
na_server, vserver, None).AndReturn(test_vols)
ssc_cmode.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis)
ssc_cmode.get_snapmirror_vol_dict(na_server, vserver, None).AndReturn(
mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_cmode.query_aggr_options(
na_server, mox.IgnoreArg()).AndReturn(raiddp)
ssc_cmode.query_aggr_storage_disk(
na_server, mox.IgnoreArg()).AndReturn('SSD')
raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
ssc_cmode.query_aggr_options(
na_server, mox.IgnoreArg()).AndReturn(raid4)
ssc_cmode.query_aggr_storage_disk(
na_server, mox.IgnoreArg()).AndReturn('SAS')
self.mox.ReplayAll()
res_vols = ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver, volume=None)
self.mox.VerifyAll()
for vol in res_vols:
if vol.id['name'] == 'volc':
self.assertEqual(False, vol.sis['compression'])
self.assertEqual(False, vol.sis['dedup'])
else:
pass
def test_cl_vols_ssc_single(self):
"""Test cluster ssc for single vol."""
na_server = netapp_api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set([copy.deepcopy(self.vol1)])
sis = {'vola': {'dedup': False, 'compression': False}}
mirrored = {'vola': [{'dest_loc': 'openstack1:vol1',
'rel_type': 'data_protection',
'mirr_state': 'broken'},
{'dest_loc': 'openstack2:vol2',
'rel_type': 'data_protection',
'mirr_state': 'snapmirrored'}]}
self.mox.StubOutWithMock(ssc_cmode, 'query_cluster_vols_for_ssc')
self.mox.StubOutWithMock(ssc_cmode, 'get_sis_vol_dict')
self.mox.StubOutWithMock(ssc_cmode, 'get_snapmirror_vol_dict')
self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_options')
self.mox.StubOutWithMock(ssc_cmode, 'query_aggr_storage_disk')
ssc_cmode.query_cluster_vols_for_ssc(
na_server, vserver, 'vola').AndReturn(test_vols)
ssc_cmode.get_sis_vol_dict(
na_server, vserver, 'vola').AndReturn(sis)
ssc_cmode.get_snapmirror_vol_dict(
na_server, vserver, 'vola').AndReturn(mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_cmode.query_aggr_options(
na_server, 'aggr1').AndReturn(raiddp)
ssc_cmode.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD')
self.mox.ReplayAll()
res_vols = ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver, volume='vola')
self.mox.VerifyAll()
self.assertEqual(1, len(res_vols))
def test_get_cluster_ssc(self):
"""Test get cluster ssc map."""
na_server = netapp_api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set(
[self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
self.mox.StubOutWithMock(ssc_cmode, 'get_cluster_vols_with_ssc')
ssc_cmode.get_cluster_vols_with_ssc(
na_server, vserver).AndReturn(test_vols)
self.mox.ReplayAll()
res_map = ssc_cmode.get_cluster_ssc(na_server, vserver)
self.mox.VerifyAll()
self.assertEqual(1, len(res_map['mirrored']))
self.assertEqual(3, len(res_map['dedup']))
self.assertEqual(1, len(res_map['compression']))
self.assertEqual(2, len(res_map['thin']))
self.assertEqual(5, len(res_map['all']))
def test_vols_for_boolean_specs(self):
"""Test ssc for boolean specs."""
test_vols = set(
[self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
ssc_map = {'mirrored': set([self.vol1]),
'dedup': set([self.vol1, self.vol2, self.vol3]),
'compression': set([self.vol3, self.vol4]),
'thin': set([self.vol5, self.vol2]), 'all': test_vols}
test_map = {'mirrored': ('netapp_mirrored', 'netapp_unmirrored'),
'dedup': ('netapp_dedup', 'netapp_nodedup'),
'compression': ('netapp_compression',
'netapp_nocompression'),
'thin': ('netapp_thin_provisioned',
'netapp_thick_provisioned')}
for type in test_map.keys():
# type
extra_specs = {test_map[type][0]: 'true'}
res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(ssc_map[type]), len(res))
# opposite type
extra_specs = {test_map[type][1]: 'true'}
res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(ssc_map['all'] - ssc_map[type]), len(res))
# both types
extra_specs =\
{test_map[type][0]: 'true', test_map[type][1]: 'true'}
res = ssc_cmode.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(ssc_map['all']), len(res))
def test_vols_for_optional_specs(self):
"""Test ssc for optional specs."""
extra_specs =\
{'netapp_dedup': 'true',
'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'}
res = ssc_cmode.get_volumes_for_specs(self.ssc_map, extra_specs)
self.assertEqual(1, len(res))
def test_get_volumes_for_specs_none_specs(self):
none_specs = None
expected = self.ssc_map['all']
result = ssc_cmode.get_volumes_for_specs(self.ssc_map, none_specs)
self.assertEqual(expected, result)
def test_get_volumes_for_specs_empty_dict(self):
empty_dict = {}
expected = self.ssc_map['all']
result = ssc_cmode.get_volumes_for_specs(
self.ssc_map, empty_dict)
self.assertEqual(expected, result)
def test_get_volumes_for_specs_not_a_dict(self):
not_a_dict = False
expected = self.ssc_map['all']
result = ssc_cmode.get_volumes_for_specs(
self.ssc_map, not_a_dict)
self.assertEqual(expected, result)
def test_query_cl_vols_for_ssc(self):
na_server = netapp_api.NaServer('127.0.0.1')
body = etree.XML("""<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/iscsi</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled</space-guarantee-enabled>
<space-guarantee>file</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol2</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs2</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>true</is-inconsistent>
<is-invalid>true</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol3</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs3</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume
</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
</attributes-list>
<num-records>4</num-records></results>""")
self.mock_object(ssc_cmode.netapp_api, 'invoke_api', mock.Mock(
return_value=[netapp_api.NaElement(body)]))
vols = ssc_cmode.query_cluster_vols_for_ssc(na_server, 'Openstack')
self.assertEqual(3, len(vols))
for vol in vols:
if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol':
pass
else:
raise exception.InvalidVolume('Invalid volume returned.')
def test_query_aggr_options(self):
na_server = netapp_api.NaServer('127.0.0.1')
body = etree.XML("""<results status="passed">
<options>
<aggr-option-info>
<name>ha_policy</name>
<value>cfo</value>
</aggr-option-info>
<aggr-option-info>
<name>raidtype</name>
<value>raid_dp</value>
</aggr-option-info>
</options>
</results>""")
self.mock_object(ssc_cmode.netapp_api, 'invoke_api', mock.Mock(
return_value=[netapp_api.NaElement(body)]))
aggr_attribs = ssc_cmode.query_aggr_options(na_server, 'aggr0')
if aggr_attribs:
self.assertEqual('cfo', aggr_attribs['ha_policy'])
self.assertEqual('raid_dp', aggr_attribs['raid_type'])
else:
raise exception.InvalidParameterValue("Incorrect aggr options")
def test_query_aggr_storage_disk(self):
na_server = netapp_api.NaServer('127.0.0.1')
body = etree.XML("""<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-raid-info>
<effective-disk-type>SATA</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
</results>""")
self.mock_object(ssc_cmode.netapp_api, 'invoke_api',
mock.Mock(return_value=[netapp_api.NaElement(body)]))
eff_disk_type = ssc_cmode.query_aggr_storage_disk(na_server, 'aggr0')
self.assertEqual('SATA', eff_disk_type)

View File

@ -118,7 +118,13 @@ INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML("""
</results>
""")
GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE = etree.XML("""
INVALID_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
</results>
""")
GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE = etree.XML("""
<results status="passed">
<num-records>2</num-records>
<attributes-list>
@ -616,9 +622,39 @@ AGGR_GET_NODE_RESPONSE = etree.XML("""
'node': NODE_NAME,
})
AGGR_RAID_TYPE = 'raid_dp'
AGGR_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
<raid-type>%(raid)s</raid-type>
</aggr-raid-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGR_RAID_TYPE})
AGGR_INFO_SSC = {
'name': VOLUME_AGGREGATE_NAME,
'raid-type': AGGR_RAID_TYPE,
}
VOLUME_SIZE_TOTAL = 19922944
VOLUME_SIZE_AVAILABLE = 19791872
VOLUME_GET_ITER_RESPONSE = etree.XML("""
VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<attributes-list>
@ -635,6 +671,112 @@ VOLUME_GET_ITER_RESPONSE = etree.XML("""
'total_size': VOLUME_SIZE_TOTAL,
})
VOLUME_VSERVER_NAME = 'fake_vserver'
VOLUME_NAMES = ('volume1', 'volume2')
VOLUME_GET_ITER_LIST_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume1)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(volume2)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'volume1': VOLUME_NAMES[0],
'volume2': VOLUME_NAMES[1],
'vserver': VOLUME_VSERVER_NAME,
})
VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': VOLUME_AGGREGATE_NAMES[0],
'volume': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
})
VOLUME_INFO_SSC = {
'name': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
'junction-path': '/%s' % VOLUME_NAMES[0],
'aggregate': VOLUME_AGGREGATE_NAMES[0],
'space-guarantee-enabled': True,
'space-guarantee': 'none',
'qos-policy-group': 'fake_qos_policy_group_name',
}
SIS_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<sis-status-info>
<is-compression-enabled>false</is-compression-enabled>
<state>enabled</state>
</sis-status-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
VOLUME_DEDUPE_INFO_SSC = {
'compression': False,
'dedupe': True,
}
SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<destination-location>%(vserver)s:%(volume2)s</destination-location>
<destination-volume>%(volume2)s</destination-volume>
<destination-vserver>%(vserver)s</destination-vserver>
<source-location>%(vserver)s:%(volume1)s</source-location>
<source-volume>%(volume1)s</source-volume>
<source-vserver>%(vserver)s</source-vserver>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume1': VOLUME_NAMES[0],
'volume2': VOLUME_NAMES[1],
'vserver': VOLUME_VSERVER_NAME,
})
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML("""
<results status="passed">
<attributes-list>
@ -745,7 +887,7 @@ STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML("""
</results>
""")
AGGREGATE_DISK_TYPE = 'FCAL'
AGGR_DISK_TYPE = 'FCAL'
STORAGE_DISK_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
@ -758,7 +900,7 @@ STORAGE_DISK_GET_ITER_RESPONSE = etree.XML("""
</attributes-list>
<num-records>1</num-records>
</results>
""" % AGGREGATE_DISK_TYPE)
""" % AGGR_DISK_TYPE)
SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">

View File

@ -1173,14 +1173,30 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.client._check_cluster_api_legacy,
'fake_api')
def test_get_operational_network_interface_addresses(self):
def test_get_operational_lif_addresses(self):
expected_result = ['1.2.3.4', '99.98.97.96']
api_response = netapp_api.NaElement(
fake_client.GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE)
self.mock_send_request.return_value = api_response
fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
address_list = (
self.client.get_operational_network_interface_addresses())
address_list = self.client.get_operational_lif_addresses()
net_interface_get_iter_args = {
'query': {
'net-interface-info': {
'operational-status': 'up'
}
},
'desired-attributes': {
'net-interface-info': {
'address': None,
}
}
}
self.client.send_iter_request.assert_called_once_with(
'net-interface-get-iter', net_interface_get_iter_args)
self.assertEqual(expected_result, address_list)
@ -1190,8 +1206,10 @@ class NetAppCmodeClientTestCase(test.TestCase):
def test_get_flexvol_capacity(self, kwargs):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_RESPONSE)
self.mock_send_request.return_value = api_response
fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE)
mock_send_iter_request = self.mock_object(
self.client, 'send_iter_request',
mock.Mock(return_value=api_response))
capacity = self.client.get_flexvol_capacity(**kwargs)
@ -1216,7 +1234,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
}
},
}
self.mock_send_request.assert_called_once_with(
mock_send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertEqual(fake_client.VOLUME_SIZE_TOTAL, capacity['size-total'])
@ -1232,6 +1250,228 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.client.get_flexvol_capacity,
flexvol_path='fake_path')
def test_list_flexvols(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_LIST_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.list_flexvols()
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'type': 'rw',
'style': 'flex',
},
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertEqual(list(fake_client.VOLUME_NAMES), result)
def test_list_flexvols_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.list_flexvols()
self.assertEqual([], result)
def test_get_flexvol(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_flexvol(
flexvol_name=fake_client.VOLUME_NAMES[0],
flexvol_path='/%s' % fake_client.VOLUME_NAMES[0])
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake_client.VOLUME_NAMES[0],
'junction-path': '/' + fake_client.VOLUME_NAMES[0],
'type': 'rw',
'style': 'flex',
},
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
'owning-vserver-name': None,
'junction-path': None,
'containing-aggregate-name': None,
},
'volume-mirror-attributes': {
'is-data-protection-mirror': None,
'is-replica-volume': None,
},
'volume-space-attributes': {
'is-space-guarantee-enabled': None,
'space-guarantee': None,
},
'volume-qos-attributes': {
'policy-group-name': None,
}
},
},
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertEqual(fake_client.VOLUME_INFO_SSC, result)
def test_get_flexvol_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.VolumeBackendAPIException,
self.client.get_flexvol,
flexvol_name=fake_client.VOLUME_NAMES[0])
def test_get_flexvol_dedupe_info(self):
api_response = netapp_api.NaElement(
fake_client.SIS_GET_ITER_SSC_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
sis_get_iter_args = {
'query': {
'sis-status-info': {
'path': '/vol/%s' % fake_client.VOLUME_NAMES[0],
},
},
'desired-attributes': {
'sis-status-info': {
'state': None,
'is-compression-enabled': None,
},
},
}
self.client.send_iter_request.assert_called_once_with(
'sis-get-iter', sis_get_iter_args)
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC, result)
def test_get_flexvol_dedupe_info_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
expected = {'compression': False, 'dedupe': False}
self.assertEqual(expected, result)
def test_get_flexvol_dedupe_info_api_error(self):
self.mock_object(self.client,
'send_iter_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
expected = {'compression': False, 'dedupe': False}
self.assertEqual(expected, result)
def test_is_flexvol_mirrored(self):
api_response = netapp_api.NaElement(
fake_client.SNAPMIRROR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
mock.Mock(return_value=api_response))
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
snapmirror_get_iter_args = {
'query': {
'snapmirror-info': {
'source-vserver': fake_client.VOLUME_VSERVER_NAME,
'source-volume': fake_client.VOLUME_NAMES[0],
'mirror-state': 'snapmirrored',
'relationship-type': 'data_protection',
},
},
'desired-attributes': {
'snapmirror-info': None,
},
}
self.client.send_iter_request.assert_called_once_with(
'snapmirror-get-iter', snapmirror_get_iter_args)
self.assertTrue(result)
def test_is_flexvol_mirrored_not_mirrored(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_is_flexvol_mirrored_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_get_aggregates(self):
api_response = netapp_api.NaElement(
@ -1368,6 +1608,123 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertIsNone(result)
def test_get_aggregate_none_specified(self):
result = self.client.get_aggregate('')
self.assertEqual({}, result)
def test_get_aggregate(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
expected = {
'name': fake_client.VOLUME_AGGREGATE_NAME,
'raid-type': 'raid_dp',
}
self.assertEqual(expected, result)
def test_get_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_disk_type(self):
api_response = netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_type(
fake_client.VOLUME_AGGREGATE_NAME)
storage_disk_get_iter_args = {
'max-records': 1,
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-aggregate-info': {
'aggregate-name':
fake_client.VOLUME_AGGREGATE_NAME,
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
self.client.send_request.assert_called_once_with(
'storage-disk-get-iter', storage_disk_get_iter_args,
enable_tunneling=False)
self.assertEqual(fake_client.AGGR_DISK_TYPE, result)
@ddt.data(fake_client.NO_RECORDS_RESPONSE, fake_client.INVALID_RESPONSE)
def test_get_aggregate_disk_type_not_found(self, response):
api_response = netapp_api.NaElement(response)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_type(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual('unknown', result)
def test_get_aggregate_disk_type_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
result = self.client.get_aggregate_disk_type(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual('unknown', result)
def test_get_performance_instance_uuids(self):
self.mock_send_request.return_value = netapp_api.NaElement(

View File

@ -17,7 +17,6 @@
from lxml import etree
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56'
@ -224,38 +223,17 @@ SNAPSHOT = {
VOLUME_REF = {'name': 'fake_vref_name', 'size': 42}
FAKE_CMODE_POOLS = [
{
'QoS_support': True,
'consistencygroup_support': True,
'free_capacity_gb': 3.72,
'netapp_compression': u'true',
'netapp_dedup': u'true',
'netapp_disk_type': 'SSD',
'netapp_mirrored': u'true',
'netapp_nocompression': u'false',
'netapp_nodedup': u'false',
'netapp_raid_type': 'raiddp',
'netapp_thick_provisioned': u'false',
'netapp_thin_provisioned': u'true',
'netapp_unmirrored': u'false',
FAKE_CMODE_VOLUMES = ['open123', 'mixed', 'open321']
FAKE_CMODE_POOL_MAP = {
'open123': {
'pool_name': 'open123',
'reserved_percentage': 0,
'total_capacity_gb': 4.65,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'provisioned_capacity_gb': 0.93,
'max_over_subscription_ratio': 20.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
}
]
FAKE_CMODE_VOLUME = {
'all': [ssc_cmode.NetAppVolume(name='open123', vserver='vs'),
ssc_cmode.NetAppVolume(name='mixed', vserver='vs'),
ssc_cmode.NetAppVolume(name='open321', vserver='vs')],
},
'mixed': {
'pool_name': 'mixed',
},
'open321': {
'pool_name': 'open321',
},
}
FAKE_7MODE_VOLUME = {
@ -275,31 +253,6 @@ FAKE_7MODE_VOLUME = {
],
}
FAKE_CMODE_VOL1 = ssc_cmode.NetAppVolume(name='open123', vserver='openstack')
FAKE_CMODE_VOL1.state['vserver_root'] = False
FAKE_CMODE_VOL1.state['status'] = 'online'
FAKE_CMODE_VOL1.state['junction_active'] = True
FAKE_CMODE_VOL1.space['space-guarantee-enabled'] = False
FAKE_CMODE_VOL1.space['space-guarantee'] = 'file'
FAKE_CMODE_VOL1.space['thin_provisioned'] = True
FAKE_CMODE_VOL1.mirror['mirrored'] = True
FAKE_CMODE_VOL1.qos['qos_policy_group'] = None
FAKE_CMODE_VOL1.aggr['name'] = 'aggr1'
FAKE_CMODE_VOL1.aggr['junction'] = '/vola'
FAKE_CMODE_VOL1.sis['dedup'] = True
FAKE_CMODE_VOL1.sis['compression'] = True
FAKE_CMODE_VOL1.aggr['raid_type'] = 'raiddp'
FAKE_CMODE_VOL1.aggr['ha_policy'] = 'cfo'
FAKE_CMODE_VOL1.aggr['disk_type'] = 'SSD'
ssc_map = {
'mirrored': [FAKE_CMODE_VOL1],
'dedup': [FAKE_CMODE_VOL1],
'compression': [FAKE_CMODE_VOL1],
'thin': [FAKE_CMODE_VOL1],
'all': [FAKE_CMODE_VOL1],
}
FILE_LIST = ['file1', 'file2', 'file3']
FAKE_LUN = netapp_api.NaElement.create_node_with_children(

View File

@ -43,20 +43,17 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
def _set_up_fake_pools(self):
class test_volume(object):
self.id = None
self.aggr = None
volume1 = test_volume()
volume1.id = {'name': 'pool1'}
volume1.aggr = {'name': 'aggr1'}
volume2 = test_volume()
volume2.id = {'name': 'pool2'}
volume2.aggr = {'name': 'aggr2'}
volume3 = test_volume()
volume3.id = {'name': 'pool3'}
volume3.aggr = {'name': 'aggr2'}
self.fake_volumes = [volume1, volume2, volume3]
self.fake_volumes = {
'pool1': {
'aggregate': 'aggr1',
},
'pool2': {
'aggregate': 'aggr2',
},
'pool3': {
'aggregate': 'aggr2',
},
}
self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3'])
self.fake_nodes = set(['node1', 'node2'])
@ -336,18 +333,7 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
def test_get_aggregates_for_pools(self):
class test_volume(object):
self.aggr = None
volume1 = test_volume()
volume1.aggr = {'name': 'aggr1'}
volume2 = test_volume()
volume2.aggr = {'name': 'aggr2'}
volume3 = test_volume()
volume3.aggr = {'name': 'aggr2'}
volumes = [volume1, volume2, volume3]
result = self.perf_library._get_aggregates_for_pools(volumes)
result = self.perf_library._get_aggregates_for_pools(self.fake_volumes)
expected_aggregate_names = set(['aggr1', 'aggr2'])
self.assertEqual(expected_aggregate_names, result)

View File

@ -31,7 +31,6 @@ from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils as na_utils
@ -51,7 +50,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library.perf_library = mock.Mock()
self.library.ssc_library = mock.Mock()
self.library.vserver = mock.Mock()
self.library.ssc_vols = None
self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
fake.SIZE, None)
self.fake_snapshot_lun = block_base.NetAppLun(
@ -98,15 +96,16 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library.ssc_library, 'check_api_permissions')
mock_start_periodic_tasks = self.mock_object(
self.library, '_start_periodic_tasks')
self.mock_object(ssc_cmode, 'refresh_cluster_ssc')
self.mock_object(self.library, '_get_filtered_pools',
mock.Mock(return_value=fake.FAKE_CMODE_POOLS))
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
mock.Mock(return_value={'fake_map': None}))
self.library.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_api_permissions.assert_called_once_with()
self.assertEqual(1, mock_start_periodic_tasks.call_count)
mock_get_pool_map.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.mock_object(block_base.NetAppBlockStorageLibrary,
@ -114,9 +113,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
mock_check_api_permissions = self.mock_object(
self.library.ssc_library, 'check_api_permissions')
self.mock_object(self.library, '_start_periodic_tasks')
self.mock_object(ssc_cmode, 'refresh_cluster_ssc')
self.mock_object(self.library, '_get_filtered_pools',
mock.Mock(return_value=[]))
self.mock_object(
self.library, '_get_flexvol_to_pool_map',
mock.Mock(return_value={}))
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
@ -262,7 +261,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertSetEqual(set(ports), set(result))
@mock.patch.object(ssc_cmode, 'refresh_cluster_ssc', mock.Mock())
@mock.patch.object(block_cmode.NetAppBlockStorageCmodeLibrary,
'_get_pool_stats', mock.Mock())
def test_vol_stats_calls_provide_ems(self):
@ -273,22 +271,19 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual(1, self.library.zapi_client.provide_ems.call_count)
def test_create_lun(self):
self.library._update_stale_vols = mock.Mock()
self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library._create_lun(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
self.assertEqual(1, self.library._update_stale_vols.call_count)
def test_get_preferred_target_from_list(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
operational_addresses = [
target['address']
for target in target_details_list[2:]]
self.zapi_client.get_operational_network_interface_addresses = (
self.zapi_client.get_operational_lif_addresses = (
mock.Mock(return_value=operational_addresses))
result = self.library._get_preferred_target_from_list(
@ -296,59 +291,28 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertEqual(target_details_list[2], result)
def test_get_pool_stats_no_volumes(self):
def test_get_pool_stats(self):
self.library.ssc_vols = []
result = self.library._get_pool_stats()
self.assertListEqual([], result)
@ddt.data({'thin': True, 'netapp_lun_space_reservation': 'enabled'},
{'thin': True, 'netapp_lun_space_reservation': 'disabled'},
{'thin': False, 'netapp_lun_space_reservation': 'enabled'},
{'thin': False, 'netapp_lun_space_reservation': 'disabled'})
@ddt.unpack
def test_get_pool_stats(self, thin, netapp_lun_space_reservation):
class test_volume(object):
self.id = None
self.aggr = None
test_volume = test_volume()
test_volume.id = {'vserver': 'openstack', 'name': 'vola'}
test_volume.aggr = {
'disk_type': 'SSD',
'ha_policy': 'cfo',
'junction': '/vola',
'name': 'aggr1',
'raid_type': 'raiddp'
ssc = {
'vola': {
'pool_name': 'vola',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
},
}
test_volume.space = {
'space-guarantee': 'file',
'space-guarantee-enabled': False,
'thin_provisioned': False
}
test_volume.sis = {'dedup': False, 'compression': False}
test_volume.state = {
'status': 'online',
'vserver_root': False,
'junction_active': True
}
test_volume.qos = {'qos_policy_group': None}
mock_get_ssc = self.mock_object(self.library.ssc_library,
'get_ssc',
mock.Mock(return_value=ssc))
ssc_map = {
'mirrored': {},
'dedup': {},
'compression': {},
'thin': {test_volume if thin else None},
'all': [test_volume]
}
self.library.ssc_vols = ssc_map
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10
self.library.configuration.netapp_lun_space_reservation = (
netapp_lun_space_reservation)
self.library.perf_library.get_node_utilization_for_pool = (
mock.Mock(return_value=30.0))
mock_capacities = {
@ -359,44 +323,118 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.zapi_client, 'get_flexvol_capacity',
mock.Mock(return_value=mock_capacities))
netapp_thin = 'true' if thin else 'false'
netapp_thick = 'false' if thin else 'true'
thick = not thin and (netapp_lun_space_reservation == 'enabled')
result = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': 'vola',
'consistencygroup_support': True,
'netapp_unmirrored': 'true',
'QoS_support': True,
'thin_provisioning_support': not thick,
'thick_provisioning_support': thick,
'provisioned_capacity_gb': 8.0,
'netapp_thick_provisioned': netapp_thick,
'netapp_nocompression': 'true',
'free_capacity_gb': 2.0,
'netapp_thin_provisioned': netapp_thin,
'total_capacity_gb': 10.0,
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'false',
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0,
'netapp_raid_type': 'raiddp',
'netapp_disk_type': 'SSD',
'netapp_nodedup': 'true',
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
expected = [{
'pool_name': 'vola',
'QoS_support': True,
'consistencygroup_support': True,
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0,
'total_capacity_gb': 10.0,
'free_capacity_gb': 2.0,
'provisioned_capacity_gb': 8.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
}]
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, ssc):
mock_get_ssc = self.mock_object(self.library.ssc_library,
'get_ssc',
mock.Mock(return_value=ssc))
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
mock_get_ssc.assert_called_once_with()
@ddt.data('open+|demix+', 'open.+', '.+\d', '^((?!mix+).)*$',
'open123, open321')
def test_get_pool_map_match_selected_pools(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
mock.Mock(return_value=fake.FAKE_CMODE_VOLUMES))
result = self.library._get_flexvol_to_pool_map()
expected = {
'open123': {
'pool_name': 'open123',
},
'open321': {
'pool_name': 'open321',
},
}
self.assertEqual(expected, result)
mock_list_flexvols.assert_called_once_with()
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321',
'.*?')
def test_get_pool_map_match_all_pools(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
mock.Mock(return_value=fake.FAKE_CMODE_VOLUMES))
result = self.library._get_flexvol_to_pool_map()
self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result)
mock_list_flexvols.assert_called_once_with()
def test_get_pool_map_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid"""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_flexvol_to_pool_map)
@ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack',
'abc*', '^$')
def test_get_pool_map_non_matching_patterns(self, patterns):
self.library.configuration.netapp_pool_name_search_pattern = patterns
mock_list_flexvols = self.mock_object(
self.zapi_client, 'list_flexvols',
mock.Mock(return_value=fake.FAKE_CMODE_VOLUMES))
result = self.library._get_flexvol_to_pool_map()
self.assertEqual({}, result)
mock_list_flexvols.assert_called_once_with()
def test_update_ssc(self):
mock_get_pool_map = self.mock_object(
self.library, '_get_flexvol_to_pool_map',
mock.Mock(return_value=fake.FAKE_CMODE_VOLUMES))
result = self.library._update_ssc()
self.assertIsNone(result)
mock_get_pool_map.assert_called_once_with()
self.library.ssc_library.update_ssc.assert_called_once_with(
fake.FAKE_CMODE_VOLUMES)
def test_delete_volume(self):
self.mock_object(block_base.NetAppLun, 'get_metadata_property',
mock.Mock(return_value=fake.POOL_NAME))
self.mock_object(self.library, '_update_stale_vols')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
@ -404,76 +442,24 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library.delete_volume(fake.VOLUME)
self.assertEqual(1,
block_base.NetAppLun.get_metadata_property.call_count)
block_base.NetAppBlockStorageLibrary.delete_volume\
.assert_called_once_with(fake.VOLUME)
(block_base.NetAppBlockStorageLibrary.delete_volume.
assert_called_once_with(fake.VOLUME))
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake.VOLUME)
self.library._mark_qos_policy_group_for_deletion\
.assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
self.assertEqual(1, self.library._update_stale_vols.call_count)
def test_delete_volume_no_netapp_vol(self):
self.mock_object(block_base.NetAppLun, 'get_metadata_property',
mock.Mock(return_value=None))
self.mock_object(self.library, '_update_stale_vols')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.delete_volume(fake.VOLUME)
block_base.NetAppLun.get_metadata_property.assert_called_once_with(
'Volume')
block_base.NetAppBlockStorageLibrary.delete_volume\
.assert_called_once_with(fake.VOLUME)
self.library._mark_qos_policy_group_for_deletion\
.assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)
self.assertEqual(0, self.library._update_stale_vols.call_count)
(self.library._mark_qos_policy_group_for_deletion.
assert_called_once_with(fake.QOS_POLICY_GROUP_INFO))
def test_delete_volume_get_valid_qos_policy_group_info_exception(self):
self.mock_object(block_base.NetAppLun, 'get_metadata_property',
mock.Mock(return_value=fake.NETAPP_VOLUME))
self.mock_object(self.library, '_update_stale_vols')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
mock.Mock(side_effect=exception.Invalid))
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.delete_volume(fake.VOLUME)
block_base.NetAppLun.get_metadata_property.assert_called_once_with(
'Volume')
block_base.NetAppBlockStorageLibrary.delete_volume\
.assert_called_once_with(fake.VOLUME)
self.library._mark_qos_policy_group_for_deletion\
.assert_called_once_with(None)
self.assertEqual(1, self.library._update_stale_vols.call_count)
def test_delete_snapshot(self):
self.mock_object(block_base.NetAppLun, 'get_metadata_property',
mock.Mock(return_value=fake.NETAPP_VOLUME))
mock_super_delete_snapshot = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_snapshot')
mock_update_stale_vols = self.mock_object(self.library,
'_update_stale_vols')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT)
self.assertTrue(mock_update_stale_vols.called)
def test_delete_snapshot_no_netapp_vol(self):
self.mock_object(block_base.NetAppLun, 'get_metadata_property',
mock.Mock(return_value=None))
mock_super_delete_snapshot = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_snapshot')
mock_update_stale_vols = self.mock_object(self.library,
'_update_stale_vols')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT)
self.assertFalse(mock_update_stale_vols.called)
(block_base.NetAppBlockStorageLibrary.delete_volume.
assert_called_once_with(fake.VOLUME))
(self.library._mark_qos_policy_group_for_deletion.
assert_called_once_with(None))
def test_setup_qos_for_volume(self):
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
@ -589,105 +575,23 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
def test_start_periodic_tasks(self):
mock_update_ssc = self.mock_object(
self.library, '_update_ssc')
mock_remove_unused_qos_policy_groups = self.mock_object(
self.zapi_client,
'remove_unused_qos_policy_groups')
self.zapi_client, 'remove_unused_qos_policy_groups')
update_ssc_periodic_task = mock.Mock()
harvest_qos_periodic_task = mock.Mock()
side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task]
mock_loopingcall = self.mock_object(
loopingcall,
'FixedIntervalLoopingCall',
mock.Mock(side_effect=[harvest_qos_periodic_task]))
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(side_effect=side_effect))
self.library._start_periodic_tasks()
mock_loopingcall.assert_has_calls([
mock.call(mock_update_ssc),
mock.call(mock_remove_unused_qos_policy_groups)])
self.assertTrue(update_ssc_periodic_task.start.called)
self.assertTrue(harvest_qos_periodic_task.start.called)
@ddt.data('open+|demix+', 'open.+', '.+\d', '^((?!mix+).)*$',
'open123, open321')
def test_get_filtered_pools_match_selected_pools(self, patterns):
self.library.ssc_vols = fake.FAKE_CMODE_VOLUME
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][0].id['name'],
filtered_pools[0].id['name'])
self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][2].id['name'],
filtered_pools[1].id['name'])
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321',
'.*?')
def test_get_filtered_pools_match_all_pools(self, patterns):
self.library.ssc_vols = fake.FAKE_CMODE_VOLUME
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][0].id['name'],
filtered_pools[0].id['name'])
self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][1].id['name'],
filtered_pools[1].id['name'])
self.assertEqual(fake.FAKE_CMODE_VOLUME['all'][2].id['name'],
filtered_pools[2].id['name'])
def test_get_filtered_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid"""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_filtered_pools)
@ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack',
'abc*', '^$')
def test_get_filtered_pools_non_matching_patterns(self, patterns):
self.library.ssc_vols = fake.FAKE_CMODE_VOLUME
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertListEqual([], filtered_pools)
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, vols):
self.library.ssc_vols = vols
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
def test_get_pool_stats_with_filtered_pools(self):
self.library.ssc_vols = fake.ssc_map
self.mock_object(self.library, '_get_filtered_pools',
mock.Mock(return_value=[fake.FAKE_CMODE_VOL1]))
self.library.perf_library.get_node_utilization_for_pool = (
mock.Mock(return_value=30.0))
mock_capacities = {
'size-total': 5000000000.0,
'size-available': 4000000000.0,
}
self.mock_object(
self.zapi_client, 'get_flexvol_capacity',
mock.Mock(return_value=mock_capacities))
pools = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
self.assertListEqual(fake.FAKE_CMODE_POOLS, pools)
def test_get_pool_stats_no_filtered_pools(self):
self.library.ssc_vols = fake.ssc_map
self.mock_object(self.library, '_get_filtered_pools',
mock.Mock(return_value=[]))
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
mock_update_ssc.assert_called_once_with()

View File

@ -52,7 +52,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.ssc_enabled = False
self.driver.db = mock.Mock()
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@ -111,13 +110,11 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
@ -133,14 +130,11 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_from_snapshot(self):
provider_location = fake.POOL_NAME
snapshot = fake.CLONE_SOURCE
@ -287,11 +281,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual(0, mock_delete.call_count)
def test_get_vol_for_share(self):
self.assertRaises(NotImplementedError,
self.driver._get_vol_for_share,
fake.NFS_SHARE)
def test_get_export_ip_path_volume_id_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS

View File

@ -25,6 +25,8 @@ from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as \
fake_ssc
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
@ -79,49 +81,25 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
@ddt.data({'thin': True, 'nfs_sparsed_volumes': True},
{'thin': True, 'nfs_sparsed_volumes': False},
{'thin': False, 'nfs_sparsed_volumes': True},
{'thin': False, 'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, thin, nfs_sparsed_volumes):
def test_get_pool_stats(self):
class test_volume(object):
pass
test_volume = test_volume()
test_volume.id = {'vserver': 'openstack', 'name': 'vola'}
test_volume.aggr = {
'disk_type': 'SSD',
'ha_policy': 'cfo',
'junction': '/vola',
'name': 'aggr1',
'raid_type': 'raiddp',
ssc = {
'vola': {
'pool_name': '10.10.10.10:/vola',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
},
}
test_volume.export = {'path': fake.NFS_SHARE}
test_volume.sis = {'dedup': False, 'compression': False}
test_volume.state = {
'status': 'online',
'vserver_root': False,
'junction_active': True,
}
test_volume.qos = {'qos_policy_group': None}
ssc_map = {
'mirrored': {},
'dedup': {},
'compression': {},
'thin': {test_volume if thin else None},
'all': [test_volume],
}
self.driver.ssc_vols = ssc_map
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
netapp_thin = 'true' if thin else 'false'
netapp_thick = 'false' if thin else 'true'
thick = not thin and not nfs_sparsed_volumes
mock_get_ssc = self.mock_object(self.driver.ssc_library,
'get_ssc',
mock.Mock(return_value=ssc))
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES // units.Gi, '0.01')
@ -138,36 +116,123 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.mock_object(self.driver,
'_get_share_capacity_info',
mock.Mock(return_value=capacity))
self.driver.perf_library.get_node_utilization_for_pool = (
mock.Mock(return_value=30.0))
result = self.driver._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'netapp_unmirrored': 'true',
'QoS_support': True,
'thick_provisioning_support': thick,
'netapp_thick_provisioned': netapp_thick,
'netapp_nocompression': 'true',
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'netapp_thin_provisioned': netapp_thin,
'total_capacity_gb': 4468.0,
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'false',
'reserved_percentage': 7,
'netapp_raid_type': 'raiddp',
'netapp_disk_type': 'SSD',
'netapp_nodedup': 'true',
'max_over_subscription_ratio': 19.0,
'provisioned_capacity_gb': 4456.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
expected = [{
'pool_name': '10.10.10.10:/vola',
'QoS_support': True,
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'true',
'aggregate': 'aggr1',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
}]
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
@ddt.data({}, None)
def test_get_pool_stats_no_ssc_vols(self, ssc):
mock_get_ssc = self.mock_object(self.driver.ssc_library,
'get_ssc',
mock.Mock(return_value=ssc))
pools = self.driver._get_pool_stats()
self.assertListEqual([], pools)
mock_get_ssc.assert_called_once_with()
def test_update_ssc(self):
mock_ensure_shares_mounted = self.mock_object(
self.driver, '_ensure_shares_mounted')
mock_get_pool_map = self.mock_object(
self.driver, '_get_flexvol_to_pool_map',
mock.Mock(return_value='fake_map'))
mock_update_ssc = self.mock_object(
self.driver.ssc_library, 'update_ssc')
result = self.driver._update_ssc()
self.assertIsNone(result)
mock_ensure_shares_mounted.assert_called_once_with()
mock_get_pool_map.assert_called_once_with()
mock_update_ssc.assert_called_once_with('fake_map')
def test_get_pool_map(self):
self.driver.zapi_client = mock.Mock()
mock_get_operational_lif_addresses = self.mock_object(
self.driver.zapi_client, 'get_operational_lif_addresses',
mock.Mock(return_value=[fake.SHARE_IP]))
mock_resolve_hostname = self.mock_object(
na_utils, 'resolve_hostname',
mock.Mock(return_value=fake.SHARE_IP))
mock_get_flexvol = self.mock_object(
self.driver.zapi_client, 'get_flexvol',
mock.Mock(return_value={'name': fake.NETAPP_VOLUME}))
result = self.driver._get_flexvol_to_pool_map()
expected = {
fake.NETAPP_VOLUME: {
'pool_name': fake.NFS_SHARE,
},
}
self.assertEqual(expected, result)
mock_get_operational_lif_addresses.assert_called_once_with()
mock_resolve_hostname.assert_called_once_with(fake.SHARE_IP)
mock_get_flexvol.assert_called_once_with(flexvol_path=fake.EXPORT_PATH)
def test_get_pool_map_address_not_found(self):
self.driver.zapi_client = mock.Mock()
self.mock_object(self.driver.zapi_client,
'get_operational_lif_addresses',
mock.Mock(return_value=[]))
self.mock_object(na_utils,
'resolve_hostname',
mock.Mock(return_value=fake.SHARE_IP))
result = self.driver._get_flexvol_to_pool_map()
self.assertEqual({}, result)
def test_get_pool_map_flexvol_not_found(self):
self.driver.zapi_client = mock.Mock()
self.mock_object(self.driver.zapi_client,
'get_operational_lif_addresses',
mock.Mock(return_value=[fake.SHARE_IP]))
self.mock_object(na_utils,
'resolve_hostname',
mock.Mock(return_value=fake.SHARE_IP))
side_effect = exception.VolumeBackendAPIException(data='fake_data')
self.mock_object(self.driver.zapi_client,
'get_flexvol',
mock.Mock(side_effect=side_effect))
result = self.driver._get_flexvol_to_pool_map()
self.assertEqual({}, result)
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
@ -187,26 +252,36 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake_provider_location = 'fake_provider_location'
fake_volume = {'provider_location': fake_provider_location}
self.mock_object(self.driver, '_delete_backing_file_for_volume')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info')
mock_prov_deprov = self.mock_object(self.driver,
'_post_prov_deprov_in_ssc')
self.mock_object(na_utils,
'get_valid_qos_policy_group_info',
mock.Mock(return_value='fake_qos_policy_group_info'))
self.driver.delete_volume(fake_volume)
mock_prov_deprov.assert_called_once_with(fake_provider_location)
self.driver._delete_backing_file_for_volume.assert_called_once_with(
fake_volume)
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake_volume)
(self.driver.zapi_client.mark_qos_policy_group_for_deletion.
assert_called_once_with('fake_qos_policy_group_info'))
def test_delete_volume_exception_path(self):
fake_provider_location = 'fake_provider_location'
fake_volume = {'provider_location': fake_provider_location}
self.mock_object(self.driver, '_delete_backing_file_for_volume')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info')
self.driver.zapi_client = mock.Mock(side_effect=[Exception])
mock_prov_deprov = self.mock_object(self.driver,
'_post_prov_deprov_in_ssc')
self.mock_object(na_utils,
'get_valid_qos_policy_group_info',
mock.Mock(return_value='fake_qos_policy_group_info'))
self.driver.zapi_client = mock.Mock(side_effect=Exception)
self.driver.delete_volume(fake_volume)
mock_prov_deprov.assert_called_once_with(fake_provider_location)
self.driver._delete_backing_file_for_volume.assert_called_once_with(
fake_volume)
na_utils.get_valid_qos_policy_group_info.assert_called_once_with(
fake_volume)
(self.driver.zapi_client.mark_qos_policy_group_for_deletion.
assert_called_once_with('fake_qos_policy_group_info'))
def test_delete_backing_file_for_volume(self):
mock_filer_delete = self.mock_object(self.driver,
@ -245,17 +320,12 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_location = self.mock_object(self.driver,
'_get_provider_location')
mock_get_location.return_value = fake.PROVIDER_LOCATION
mock_delete_backing = self.mock_object(
self.driver, '_delete_backing_file_for_snapshot')
mock_prov_deprov = self.mock_object(self.driver,
'_post_prov_deprov_in_ssc')
self.driver.delete_snapshot(fake.test_snapshot)
mock_delete_backing.assert_called_once_with(fake.test_snapshot)
mock_prov_deprov.assert_called_once_with(fake.PROVIDER_LOCATION)
def test_delete_backing_file_for_snapshot(self):
mock_filer_delete = self.mock_object(
@ -467,146 +537,123 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.mock_object(self.driver, '_get_vol_for_share')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(1, update_ssc.call_count)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.mock_object(self.driver, '_get_vol_for_share')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(1, update_ssc.call_count)
def test_start_periodic_tasks(self):
mock_update_ssc = self.mock_object(self.driver, '_update_ssc')
mock_remove_unused_qos_policy_groups = self.mock_object(
self.driver.zapi_client,
'remove_unused_qos_policy_groups')
update_ssc_periodic_task = mock.Mock()
harvest_qos_periodic_task = mock.Mock()
side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task]
mock_loopingcall = self.mock_object(
loopingcall,
'FixedIntervalLoopingCall',
mock.Mock(side_effect=[harvest_qos_periodic_task]))
loopingcall, 'FixedIntervalLoopingCall',
mock.Mock(side_effect=side_effect))
self.driver._start_periodic_tasks()
mock_loopingcall.assert_has_calls([
mock.call(mock_update_ssc),
mock.call(mock_remove_unused_qos_policy_groups)])
self.assertTrue(update_ssc_periodic_task.start.called)
self.assertTrue(harvest_qos_periodic_task.start.called)
mock_update_ssc.assert_called_once_with()
@ddt.data(
{'space': True, 'ssc': True, 'match': True, 'expected': True},
{'space': True, 'ssc': True, 'match': False, 'expected': False},
{'space': True, 'ssc': False, 'match': True, 'expected': True},
{'space': True, 'ssc': False, 'match': False, 'expected': True},
{'space': False, 'ssc': True, 'match': True, 'expected': False},
{'space': False, 'ssc': True, 'match': False, 'expected': False},
{'space': False, 'ssc': False, 'match': True, 'expected': False},
{'space': False, 'ssc': False, 'match': False, 'expected': False},
)
@ddt.data({'has_space': True, 'type_match': True, 'expected': True},
{'has_space': True, 'type_match': False, 'expected': False},
{'has_space': False, 'type_match': True, 'expected': False},
{'has_space': False, 'type_match': False, 'expected': False})
@ddt.unpack
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_is_share_vol_type_match')
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_share_has_space_for_clone')
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_is_volume_thin_provisioned')
def test_is_share_clone_compatible(self,
mock_is_volume_thin_provisioned,
mock_share_has_space_for_clone,
mock_is_share_vol_type_match,
space, ssc, match, expected):
mock_share_has_space_for_clone.return_value = space
mock_is_share_vol_type_match.return_value = match
def test_is_share_clone_compatible(self, has_space, type_match, expected):
with mock.patch.object(self.driver, 'ssc_enabled', ssc):
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
mock_get_flexvol_name_for_share = self.mock_object(
self.driver, '_get_flexvol_name_for_share',
mock.Mock(return_value='fake_flexvol'))
mock_is_volume_thin_provisioned = self.mock_object(
self.driver, '_is_volume_thin_provisioned',
mock.Mock(return_value='thin'))
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone',
mock.Mock(return_value=has_space))
mock_is_share_vol_type_match = self.mock_object(
self.driver, '_is_share_vol_type_match',
mock.Mock(return_value=type_match))
@ddt.data(
{'sparsed': True, 'ssc': True, 'vol_thin': True, 'expected': True},
{'sparsed': True, 'ssc': True, 'vol_thin': False, 'expected': True},
{'sparsed': True, 'ssc': False, 'vol_thin': True, 'expected': True},
{'sparsed': True, 'ssc': False, 'vol_thin': False, 'expected': True},
{'sparsed': False, 'ssc': True, 'vol_thin': True, 'expected': True},
{'sparsed': False, 'ssc': True, 'vol_thin': False, 'expected': False},
{'sparsed': False, 'ssc': False, 'vol_thin': True, 'expected': False},
{'sparsed': False, 'ssc': False, 'vol_thin': False, 'expected': False},
)
@ddt.unpack
def test_is_volume_thin_provisioned(
self, sparsed, ssc, vol_thin, expected):
fake_volume = object()
ssc_vols = {'thin': {fake_volume if vol_thin else None}}
with mock.patch.object(self.driver, 'ssc_enabled', ssc):
with mock.patch.object(self.driver, 'ssc_vols', ssc_vols):
with mock.patch.object(self.driver.configuration,
'nfs_sparsed_volumes',
sparsed):
result = self.driver._is_volume_thin_provisioned(
fake_volume)
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
mock_get_flexvol_name_for_share.assert_called_once_with(fake.NFS_SHARE)
mock_is_volume_thin_provisioned.assert_called_once_with('fake_flexvol')
mock_share_has_space_for_clone.assert_called_once_with(
fake.NFS_SHARE, fake.SIZE, 'thin')
if has_space:
mock_is_share_vol_type_match.assert_called_once_with(
fake.VOLUME, fake.NFS_SHARE, 'fake_flexvol')
@ddt.data(
{'ssc': True, 'share': fake.NFS_SHARE, 'vol': fake.test_volume},
{'ssc': True, 'share': fake.NFS_SHARE, 'vol': None},
{'ssc': True, 'share': None, 'vol': fake.test_volume},
{'ssc': True, 'share': None, 'vol': None},
{'ssc': False, 'share': fake.NFS_SHARE, 'vol': fake.test_volume},
{'ssc': False, 'share': fake.NFS_SHARE, 'vol': None},
{'ssc': False, 'share': None, 'vol': fake.test_volume},
{'ssc': False, 'share': None, 'vol': None},
)
@ddt.data({'thin': True, 'expected': True},
{'thin': False, 'expected': False},
{'thin': None, 'expected': False})
@ddt.unpack
def test_post_prov_deprov_in_ssc(self, ssc, share, vol):
def test_is_volume_thin_provisioned(self, thin, expected):
with mock.patch.object(self.driver, 'ssc_enabled', ssc):
with mock.patch.object(
self.driver, '_get_vol_for_share') as mock_get_vol:
with mock.patch.object(
self.driver, '_update_stale_vols') as mock_update:
mock_get_vol.return_value = vol
self.driver._post_prov_deprov_in_ssc(share)
ssc_data = {'thin_provisioning_support': thin}
mock_get_ssc_for_flexvol = self.mock_object(
self.driver.ssc_library, 'get_ssc_for_flexvol',
mock.Mock(return_value=ssc_data))
if ssc and share and vol:
mock_update.assert_called_once_with(volume=vol)
else:
self.assertEqual(0, mock_update.call_count)
result = self.driver._is_volume_thin_provisioned('fake_flexvol')
def test_get_vol_for_share(self):
fake_volume = fake.test_volume
ssc_vols = {'all': {fake_volume}}
self.assertEqual(expected, result)
mock_get_ssc_for_flexvol.assert_called_once_with('fake_flexvol')
with mock.patch.object(self.driver, 'ssc_vols', ssc_vols):
result = self.driver._get_vol_for_share(fake.NFS_SHARE)
@ddt.data({'flexvols': ['volume1', 'volume2'], 'expected': True},
{'flexvols': ['volume3', 'volume4'], 'expected': False},
{'flexvols': [], 'expected': False})
@ddt.unpack
def test_is_share_vol_type_match(self, flexvols, expected):
self.assertEqual(fake.test_volume, result)
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value='fake_extra_specs'))
mock_get_matching_flexvols_for_extra_specs = self.mock_object(
self.driver.ssc_library, 'get_matching_flexvols_for_extra_specs',
mock.Mock(return_value=flexvols))
def test_get_vol_for_share_no_ssc_vols(self):
with mock.patch.object(self.driver, 'ssc_vols', None):
self.assertIsNone(self.driver._get_vol_for_share(fake.NFS_SHARE))
result = self.driver._is_share_vol_type_match(fake.VOLUME,
fake.NFS_SHARE,
'volume1')
self.assertEqual(expected, result)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_get_matching_flexvols_for_extra_specs.assert_called_once_with(
'fake_extra_specs')
@ddt.data({'share': 'volume1', 'expected': 'volume1'},
{'share': 'volume3', 'expected': None})
@ddt.unpack
def test_get_flexvol_name_for_share(self, share, expected):
mock_get_ssc = self.mock_object(
self.driver.ssc_library, 'get_ssc',
mock.Mock(return_value=fake_ssc.SSC))
result = self.driver._get_flexvol_name_for_share(share)
self.assertEqual(expected, result)
mock_get_ssc.assert_called_once_with()
def test_get_flexvol_name_for_share_no_ssc_vols(self):
mock_get_ssc = self.mock_object(
self.driver.ssc_library, 'get_ssc',
mock.Mock(return_value={}))
result = self.driver._get_flexvol_name_for_share('fake_share')
self.assertIsNone(result)
mock_get_ssc.assert_called_once_with()
def test_find_image_location_with_local_copy(self):
local_share = '/share'

View File

@ -0,0 +1,99 @@
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SSC_VSERVER = 'fake_vserver'
SSC_VOLUMES = ('volume1', 'volume2')
SSC_VOLUME_MAP = {
SSC_VOLUMES[0]: {
'pool_name': SSC_VOLUMES[0],
},
SSC_VOLUMES[1]: {
'pool_name': SSC_VOLUMES[1],
},
}
SSC_AGGREGATES = ('aggr1', 'aggr2')
SSC = {
'volume1': {
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'aggregate': 'aggr1',
'netapp_compression': 'false',
'netapp_dedup': 'true',
'netapp_mirrored': 'false',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'SSD',
'pool_name': 'volume1',
},
'volume2': {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_thin_provisioned': 'true',
'aggregate': 'aggr2',
'netapp_compression': 'true',
'netapp_dedup': 'true',
'netapp_mirrored': 'true',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'FCAL',
'pool_name': 'volume2',
},
}
SSC_FLEXVOL_INFO = {
'volume1': {
'thick_provisioning_support': True,
'thin_provisioning_support': False,
'netapp_thin_provisioned': 'false',
'aggregate': 'aggr1',
},
'volume2': {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_thin_provisioned': 'true',
'aggregate': 'aggr2',
},
}
SSC_DEDUPE_INFO = {
'volume1': {
'netapp_dedup': 'true',
'netapp_compression': 'false',
},
'volume2': {
'netapp_dedup': 'true',
'netapp_compression': 'true',
},
}
SSC_MIRROR_INFO = {
'volume1': {
'netapp_mirrored': 'false',
},
'volume2': {
'netapp_mirrored': 'true',
},
}
SSC_AGGREGATE_INFO = {
'volume1': {
'netapp_disk_type': 'SSD',
'netapp_raid_type': 'raid_dp',
},
'volume2': {
'netapp_disk_type': 'FCAL',
'netapp_raid_type': 'raid_dp',
},
}

View File

@ -13,11 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import ddt
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
import cinder.tests.unit.volume.drivers.netapp.dataontap.utils.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
@ -28,7 +35,15 @@ class CapabilitiesLibraryTestCase(test.TestCase):
super(CapabilitiesLibraryTestCase, self).setUp()
self.zapi_client = mock.Mock()
self.ssc_library = capabilities.CapabilitiesLibrary(self.zapi_client)
self.configuration = self.get_config_cmode()
self.ssc_library = capabilities.CapabilitiesLibrary(
'iSCSI', fake.SSC_VSERVER, self.zapi_client, self.configuration)
self.ssc_library.ssc = fake.SSC
def get_config_cmode(self):
config = na_fakes.create_configuration_cmode()
config.volume_backend_name = 'fake_backend'
return config
def test_check_api_permissions(self):
@ -68,3 +83,256 @@ class CapabilitiesLibraryTestCase(test.TestCase):
self.ssc_library.check_api_permissions)
self.assertEqual(0, mock_log.call_count)
def test_get_ssc(self):
result = self.ssc_library.get_ssc()
self.assertEqual(fake.SSC, result)
self.assertIsNot(fake.SSC, result)
def test_get_ssc_for_flexvol(self):
result = self.ssc_library.get_ssc_for_flexvol(fake.SSC_VOLUMES[0])
self.assertEqual(fake.SSC.get(fake.SSC_VOLUMES[0]), result)
self.assertIsNot(fake.SSC.get(fake.SSC_VOLUMES[0]), result)
def test_get_ssc_for_flexvol_not_found(self):
result = self.ssc_library.get_ssc_for_flexvol('invalid')
self.assertEqual({}, result)
def test_update_ssc(self):
mock_get_ssc_flexvol_info = self.mock_object(
self.ssc_library, '_get_ssc_flexvol_info',
mock.Mock(side_effect=[
fake.SSC_FLEXVOL_INFO['volume1'],
fake.SSC_FLEXVOL_INFO['volume2']
]))
mock_get_ssc_dedupe_info = self.mock_object(
self.ssc_library, '_get_ssc_dedupe_info',
mock.Mock(side_effect=[
fake.SSC_DEDUPE_INFO['volume1'],
fake.SSC_DEDUPE_INFO['volume2']
]))
mock_get_ssc_mirror_info = self.mock_object(
self.ssc_library, '_get_ssc_mirror_info',
mock.Mock(side_effect=[
fake.SSC_MIRROR_INFO['volume1'],
fake.SSC_MIRROR_INFO['volume2']
]))
mock_get_ssc_aggregate_info = self.mock_object(
self.ssc_library, '_get_ssc_aggregate_info',
mock.Mock(side_effect=[
fake.SSC_AGGREGATE_INFO['volume1'],
fake.SSC_AGGREGATE_INFO['volume2']
]))
ordered_ssc = collections.OrderedDict()
ordered_ssc['volume1'] = fake.SSC_VOLUME_MAP['volume1']
ordered_ssc['volume2'] = fake.SSC_VOLUME_MAP['volume2']
result = self.ssc_library.update_ssc(ordered_ssc)
self.assertIsNone(result)
self.assertEqual(fake.SSC, self.ssc_library.ssc)
mock_get_ssc_flexvol_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_dedupe_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_mirror_info.assert_has_calls([
mock.call('volume1'), mock.call('volume2')])
mock_get_ssc_aggregate_info.assert_has_calls([
mock.call('aggr1'), mock.call('aggr2')])
@ddt.data({'lun_space_guarantee': True},
{'lun_space_guarantee': False})
@ddt.unpack
def test_get_ssc_flexvol_info_thin_block(self, lun_space_guarantee):
self.ssc_library.configuration.netapp_lun_space_reservation = \
'enabled' if lun_space_guarantee else 'disabled'
self.mock_object(self.ssc_library.zapi_client,
'get_flexvol',
mock.Mock(return_value=fake_client.VOLUME_INFO_SSC))
result = self.ssc_library._get_ssc_flexvol_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_thin_provisioned': 'true',
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'aggregate': 'fake_aggr1',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
flexvol_name=fake_client.VOLUME_NAMES[0])
@ddt.data({'vol_space_guarantee': 'file', 'lun_space_guarantee': True},
{'vol_space_guarantee': 'volume', 'lun_space_guarantee': True})
@ddt.unpack
def test_get_ssc_flexvol_info_thick_block(self, vol_space_guarantee,
lun_space_guarantee):
self.ssc_library.configuration.netapp_lun_space_reservation = \
'enabled' if lun_space_guarantee else 'disabled'
fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC)
fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee
self.mock_object(self.ssc_library.zapi_client,
'get_flexvol',
mock.Mock(return_value=fake_volume_info_ssc))
result = self.ssc_library._get_ssc_flexvol_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_thin_provisioned': 'false',
'thick_provisioning_support': lun_space_guarantee,
'thin_provisioning_support': not lun_space_guarantee,
'aggregate': 'fake_aggr1',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
flexvol_name=fake_client.VOLUME_NAMES[0])
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_ssc_flexvol_info_thin_file(self, nfs_sparsed_volumes):
self.ssc_library.protocol = 'nfs'
self.ssc_library.configuration.nfs_sparsed_volumes = \
nfs_sparsed_volumes
self.mock_object(self.ssc_library.zapi_client,
'get_flexvol',
mock.Mock(return_value=fake_client.VOLUME_INFO_SSC))
result = self.ssc_library._get_ssc_flexvol_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_thin_provisioned': 'true',
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'aggregate': 'fake_aggr1',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
flexvol_name=fake_client.VOLUME_NAMES[0])
@ddt.data({'vol_space_guarantee': 'file', 'nfs_sparsed_volumes': True},
{'vol_space_guarantee': 'volume', 'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_ssc_flexvol_info_thick_file(self, vol_space_guarantee,
nfs_sparsed_volumes):
self.ssc_library.protocol = 'nfs'
self.ssc_library.configuration.nfs_sparsed_volumes = \
nfs_sparsed_volumes
fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC)
fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee
self.mock_object(self.ssc_library.zapi_client,
'get_flexvol',
mock.Mock(return_value=fake_volume_info_ssc))
result = self.ssc_library._get_ssc_flexvol_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_thin_provisioned': 'false',
'thick_provisioning_support': not nfs_sparsed_volumes,
'thin_provisioning_support': nfs_sparsed_volumes,
'aggregate': 'fake_aggr1',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol.assert_called_once_with(
flexvol_name=fake_client.VOLUME_NAMES[0])
def test_get_ssc_dedupe_info(self):
self.mock_object(
self.ssc_library.zapi_client, 'get_flexvol_dedupe_info',
mock.Mock(return_value=fake_client.VOLUME_DEDUPE_INFO_SSC))
result = self.ssc_library._get_ssc_dedupe_info(
fake_client.VOLUME_NAMES[0])
expected = {
'netapp_dedup': 'true',
'netapp_compression': 'false',
}
self.assertEqual(expected, result)
self.zapi_client.get_flexvol_dedupe_info.assert_called_once_with(
fake_client.VOLUME_NAMES[0])
@ddt.data(True, False)
def test_get_ssc_mirror_info(self, mirrored):
self.mock_object(
self.ssc_library.zapi_client, 'is_flexvol_mirrored',
mock.Mock(return_value=mirrored))
result = self.ssc_library._get_ssc_mirror_info(
fake_client.VOLUME_NAMES[0])
expected = {'netapp_mirrored': 'true' if mirrored else 'false'}
self.assertEqual(expected, result)
self.zapi_client.is_flexvol_mirrored.assert_called_once_with(
fake_client.VOLUME_NAMES[0], fake.SSC_VSERVER)
def test_get_ssc_aggregate_info(self):
self.mock_object(
self.ssc_library.zapi_client, 'get_aggregate_disk_type',
mock.Mock(return_value=fake_client.AGGR_DISK_TYPE))
self.mock_object(
self.ssc_library.zapi_client, 'get_aggregate',
mock.Mock(return_value=fake_client.AGGR_INFO_SSC))
result = self.ssc_library._get_ssc_aggregate_info(
fake_client.VOLUME_AGGREGATE_NAME)
expected = {
'netapp_disk_type': fake_client.AGGR_DISK_TYPE,
'netapp_raid_type': fake_client.AGGR_RAID_TYPE,
}
self.assertEqual(expected, result)
self.zapi_client.get_aggregate_disk_type.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
self.zapi_client.get_aggregate.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_matching_flexvols_for_extra_specs(self):
specs = {
'thick_provisioning_support': '<is> False',
'netapp_compression': 'true',
'netapp_dedup': 'true',
'netapp_mirrored': 'true',
'netapp_raid_type': 'raid_dp',
'netapp_disk_type': 'FCAL',
}
result = self.ssc_library.get_matching_flexvols_for_extra_specs(specs)
self.assertEqual(['volume2'], result)
def test_modify_extra_specs_for_comparison(self):
specs = {
'thick_provisioning_support': '<is> False',
'thin_provisioning_support': '<is> true',
'netapp_compression': 'true',
}
result = self.ssc_library._modify_extra_specs_for_comparison(specs)
expected = {
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'netapp_compression': 'true',
}
self.assertEqual(expected, result)

View File

@ -24,8 +24,6 @@
Volume driver library for NetApp C-mode block storage systems.
"""
import copy
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
@ -37,7 +35,6 @@ from cinder import utils
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
@ -45,6 +42,7 @@ from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
QOS_CLEANUP_INTERVAL_SECONDS = 60
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
@six.add_metaclass(utils.TraceWrapperMetaclass)
@ -74,26 +72,38 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.ssc_vols = {}
self.stale_vols = set()
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
self.ssc_library = capabilities.CapabilitiesLibrary(self.zapi_client)
self.ssc_library = capabilities.CapabilitiesLibrary(
self.driver_protocol, self.vserver, self.zapi_client,
self.configuration)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self.ssc_library.check_api_permissions()
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.get_connection(),
self.vserver, synchronous=True)
if not self._get_filtered_pools():
if not self._get_flexvol_to_pool_map():
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise exception.NetAppDriverException(msg)
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
self._start_periodic_tasks()
def _start_periodic_tasks(self):
# Note(cknight): Run the task once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Start the task that updates the slow-changing storage service catalog
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc)
ssc_periodic_task.start(
interval=SSC_UPDATE_INTERVAL_SECONDS,
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
# Start the task that harvests soft-deleted QoS policy groups.
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
self.zapi_client.remove_unused_qos_policy_groups)
@ -108,9 +118,6 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
self.zapi_client.create_lun(
volume_name, lun_name, size, metadata, qos_policy_group_name)
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(volume_name, self.vserver))
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
return '%s:%s' % (self.vserver, metadata['Path'])
@ -158,8 +165,6 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
new_name,
lun[0].get_child_content('size'),
clone_meta))
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(volume, self.vserver))
def _create_lun_meta(self, lun):
"""Creates LUN metadata dictionary."""
@ -178,21 +183,10 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
def _get_fc_target_wwpns(self, include_partner=True):
return self.zapi_client.get_fc_target_wwpns()
def _configure_tunneling(self, do_tunneling=False):
"""Configures tunneling for Data ONTAP cluster."""
if do_tunneling:
self.zapi_client.set_vserver(self.vserver)
else:
self.zapi_client.set_vserver(None)
def _update_volume_stats(self, filter_function=None,
goodness_function=None):
"""Retrieve stats info from vserver."""
sync = True if self.ssc_vols is None else False
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.get_connection(),
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
@ -209,30 +203,38 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (Data ONTAP volume) stats info from SSC volumes."""
"""Retrieve pool (Data ONTAP flexvol) stats.
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the flexvol name.
"""
pools = []
if not self.ssc_vols:
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
filtered_pools = self._get_filtered_pools()
self.perf_library.update_performance_cache(filtered_pools)
self.perf_library.update_performance_cache(ssc)
for vol in filtered_pools:
pool_name = vol.id['name']
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
pool['pool_name'] = pool_name
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = True
pool['reserved_percentage'] = (
self.reserved_percentage)
pool['consistencygroup_support'] = True
pool['reserved_percentage'] = self.reserved_percentage
pool['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
# Get capacity info and convert to GB
# Add up-to-date capacity info
capacity = self.zapi_client.get_flexvol_capacity(
flexvol_name=pool_name)
flexvol_name=ssc_vol_name)
size_total_gb = capacity['size-total'] / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
@ -240,88 +242,56 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
size_available_gb = capacity['size-available'] / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
pool['provisioned_capacity_gb'] = (round(
pool['total_capacity_gb'] - pool['free_capacity_gb'], 2))
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(not thin).lower()
thick = (not thin and
self.configuration.netapp_lun_space_reservation
== 'enabled')
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
pool['provisioned_capacity_gb'] = round(
pool['total_capacity_gb'] - pool['free_capacity_gb'], 2)
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
pool_name)
pool['utilization'] = na_utils.round_down(utilization, '0.01')
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _get_filtered_pools(self):
"""Return filtered pools given a pool name search pattern."""
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that match the pool name search pattern.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
"""
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
filtered_pools = []
for vol in self.ssc_vols.get('all', []):
vol_name = vol.id['name']
if pool_regex.match(vol_name):
msg = ("Volume '%(vol_name)s' matches against regular "
"expression: %(vol_pattern)s")
LOG.debug(msg, {'vol_name': vol_name,
'vol_pattern': pool_regex.pattern})
filtered_pools.append(vol)
pools = {}
flexvol_names = self.zapi_client.list_flexvols()
for flexvol_name in flexvol_names:
msg_args = {
'flexvol': flexvol_name,
'vol_pattern': pool_regex.pattern,
}
if pool_regex.match(flexvol_name):
msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
LOG.debug(msg, msg_args)
pools[flexvol_name] = {'pool_name': flexvol_name}
else:
msg = ("Volume '%(vol_name)s' does not match against regular "
"expression: %(vol_pattern)s")
LOG.debug(msg, {'vol_name': vol_name,
'vol_pattern': pool_regex.pattern})
msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
LOG.debug(msg, msg_args)
return filtered_pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy if reset."""
if volume:
self.stale_vols.add(volume)
if reset:
set_copy = copy.deepcopy(self.stale_vols)
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
self.ssc_vols = vols
return pools
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
lun = self.lun_table.get(volume['name'])
netapp_vol = None
if lun:
netapp_vol = lun.get_metadata_property('Volume')
super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
@ -331,23 +301,10 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
if netapp_vol:
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
lun = self.lun_table.get(snapshot['name'])
netapp_vol = lun.get_metadata_property('Volume') if lun else None
super(NetAppBlockStorageCmodeLibrary, self).delete_snapshot(snapshot)
if netapp_vol:
self._update_stale_vols(
volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
# cDOT iSCSI LIFs do not migrate from controller to controller
@ -365,7 +322,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
# Nova uses the target.
operational_addresses = (
self.zapi_client.get_operational_network_interface_addresses())
self.zapi_client.get_operational_lif_addresses())
return (super(NetAppBlockStorageCmodeLibrary, self)
._get_preferred_target_from_list(target_details_list,

View File

@ -651,20 +651,6 @@ class Client(client_base.Client):
{'path': path, 'bytes': unique_bytes})
return unique_bytes
def get_vserver_ips(self, vserver):
"""Get ips for the vserver."""
result = netapp_api.invoke_api(
self.connection, api_name='net-interface-get-iter',
is_iter=True, tunnel=vserver)
if_list = []
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res['attributes-list']
ifs = attr_list.get_children()
if_list.extend(ifs)
return if_list
def check_cluster_api(self, object_name, operation_name, api):
"""Checks the availability of a cluster API.
@ -749,10 +735,10 @@ class Client(client_base.Client):
return True
def get_operational_network_interface_addresses(self):
def get_operational_lif_addresses(self):
"""Gets the IP addresses of operational LIFs on the vserver."""
api_args = {
net_interface_get_iter_args = {
'query': {
'net-interface-info': {
'operational-status': 'up'
@ -764,7 +750,8 @@ class Client(client_base.Client):
}
}
}
result = self.send_request('net-interface-get-iter', api_args)
result = self.send_iter_request('net-interface-get-iter',
net_interface_get_iter_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
@ -797,7 +784,7 @@ class Client(client_base.Client):
},
}
result = self.send_request('volume-get-iter', api_args)
result = self.send_iter_request('volume-get-iter', api_args)
if self._get_record_count(result) != 1:
msg = _('Volume %s not found.')
msg_args = flexvol_path or flexvol_name
@ -819,6 +806,203 @@ class Client(client_base.Client):
'size-available': size_available,
}
def list_flexvols(self):
"""Returns the names of the flexvols on the controller."""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'type': 'rw',
'style': 'flex',
},
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
result = self.send_iter_request('volume-get-iter', api_args)
if not self._has_records(result):
return []
volumes = []
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for volume_attributes in attributes_list.get_children():
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
volumes.append(volume_id_attributes.get_child_content('name'))
return volumes
def get_flexvol(self, flexvol_path=None, flexvol_name=None):
"""Get flexvol attributes needed for the storage service catalog."""
volume_id_attributes = {'type': 'rw', 'style': 'flex'}
if flexvol_path:
volume_id_attributes['junction-path'] = flexvol_path
if flexvol_name:
volume_id_attributes['name'] = flexvol_name
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': volume_id_attributes,
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
'owning-vserver-name': None,
'junction-path': None,
'containing-aggregate-name': None,
},
'volume-mirror-attributes': {
'is-data-protection-mirror': None,
'is-replica-volume': None,
},
'volume-space-attributes': {
'is-space-guarantee-enabled': None,
'space-guarantee': None,
},
'volume-qos-attributes': {
'policy-group-name': None,
}
},
},
}
result = self.send_iter_request('volume-get-iter', api_args)
if self._get_record_count(result) != 1:
msg = _('Could not find unique volume %(vol)s.')
msg_args = {'vol': flexvol_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes') or netapp_api.NaElement('none')
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes') or netapp_api.NaElement('none')
volume_qos_attributes = volume_attributes.get_child_by_name(
'volume-qos-attributes') or netapp_api.NaElement('none')
volume = {
'name': volume_id_attributes.get_child_content('name'),
'vserver': volume_id_attributes.get_child_content(
'owning-vserver-name'),
'junction-path': volume_id_attributes.get_child_content(
'junction-path'),
'aggregate': volume_id_attributes.get_child_content(
'containing-aggregate-name'),
'space-guarantee-enabled': strutils.bool_from_string(
volume_space_attributes.get_child_content(
'is-space-guarantee-enabled')),
'space-guarantee': volume_space_attributes.get_child_content(
'space-guarantee'),
'qos-policy-group': volume_qos_attributes.get_child_content(
'policy-group-name')
}
return volume
def get_flexvol_dedupe_info(self, flexvol_name):
"""Get dedupe attributes needed for the storage service catalog."""
api_args = {
'query': {
'sis-status-info': {
'path': '/vol/%s' % flexvol_name,
},
},
'desired-attributes': {
'sis-status-info': {
'state': None,
'is-compression-enabled': None,
},
},
}
try:
result = self.send_iter_request('sis-get-iter', api_args)
except netapp_api.NaApiError:
msg = _('Failed to get dedupe info for volume %s.')
LOG.exception(msg % flexvol_name)
return {'compression': False, 'dedupe': False}
if self._get_record_count(result) != 1:
return {'compression': False, 'dedupe': False}
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
sis_status_info = attributes_list.get_child_by_name(
'sis-status-info') or netapp_api.NaElement('none')
sis = {
'compression': strutils.bool_from_string(
sis_status_info.get_child_content('is-compression-enabled')),
'dedupe': na_utils.to_bool(
sis_status_info.get_child_content('state')),
}
return sis
def is_flexvol_mirrored(self, flexvol_name, vserver_name):
"""Check if flexvol is a SnapMirror source."""
api_args = {
'query': {
'snapmirror-info': {
'source-vserver': vserver_name,
'source-volume': flexvol_name,
'mirror-state': 'snapmirrored',
'relationship-type': 'data_protection',
},
},
'desired-attributes': {
'snapmirror-info': None,
},
}
try:
result = self.send_iter_request('snapmirror-get-iter', api_args)
except netapp_api.NaApiError:
msg = _('Failed to get SnapMirror info for volume %s.')
LOG.exception(msg % flexvol_name)
return False
if not self._has_records(result):
return False
return True
@utils.trace_method
def delete_file(self, path_to_file):
"""Delete file at path."""
@ -888,6 +1072,91 @@ class Client(client_base.Client):
'aggr-ownership-attributes') or netapp_api.NaElement('none')
return aggr_ownership_attrs.get_child_content('home-name')
def get_aggregate(self, aggregate_name):
"""Get aggregate attributes needed for the storage service catalog."""
if not aggregate_name:
return {}
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
},
},
}
try:
aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
desired_attributes=desired_attributes)
except netapp_api.NaApiError:
msg = _('Failed to get info for aggregate %s.')
LOG.exception(msg % aggregate_name)
return {}
if len(aggrs) < 1:
return {}
aggr_attributes = aggrs[0]
aggr_raid_attrs = aggr_attributes.get_child_by_name(
'aggr-raid-attributes') or netapp_api.NaElement('none')
aggregate = {
'name': aggr_attributes.get_child_content('aggregate-name'),
'raid-type': aggr_raid_attrs.get_child_content('raid-type'),
}
return aggregate
def get_aggregate_disk_type(self, aggregate_name):
"""Get the disk type of an aggregate."""
# Note(cknight): Only get 1 disk, since apart from hybrid
# aggregates all disks must be the same type.
api_args = {
'max-records': 1,
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-aggregate-info': {
'aggregate-name': aggregate_name,
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
try:
result = self.send_request('storage-disk-get-iter', api_args,
enable_tunneling=False)
except netapp_api.NaApiError:
msg = _('Failed to get disk info for aggregate %s.')
LOG.exception(msg % aggregate_name)
return 'unknown'
if self._get_record_count(result) != 1:
return 'unknown'
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
for storage_disk_info in attributes_list.get_children():
disk_raid_info = storage_disk_info.get_child_by_name(
'disk-raid-info') or netapp_api.NaElement('none')
disk_type = disk_raid_info.get_child_content(
'effective-disk-type') or 'unknown'
return disk_type
return 'unknown'
def get_performance_instance_uuids(self, object_name, node_name):
"""Get UUIDs of performance instances for a cluster node."""

View File

@ -82,7 +82,6 @@ class NetAppNfsDriver(driver.ManageableVD,
self._context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.zapi_client = None
self.ssc_enabled = False
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
@ -127,9 +126,6 @@ class NetAppNfsDriver(driver.ManageableVD,
# We need to set this for the model update in order for the
# manager to behave correctly.
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(pool_name))
msg = _("Volume %(vol)s could not be created in pool %(pool)s.")
raise exception.VolumeBackendAPIException(data=msg % {
@ -986,12 +982,3 @@ class NetAppNfsDriver(driver.ManageableVD,
vol_path = os.path.join(volume['provider_location'], vol_str)
LOG.info(_LI("Cinder NFS volume with current path \"%(cr)s\" is "
"no longer being managed."), {'cr': vol_path})
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
raise NotImplementedError
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
raise NotImplementedError

View File

@ -37,7 +37,6 @@ from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
@ -46,6 +45,7 @@ from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
QOS_CLEANUP_INTERVAL_SECONDS = 60
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
@interface.volumedriver
@ -74,12 +74,10 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.ssc_enabled = True
self.ssc_vols = None
self.stale_vols = set()
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
self.ssc_library = capabilities.CapabilitiesLibrary(self.zapi_client)
self.ssc_library = capabilities.CapabilitiesLibrary(
'nfs', self.vserver, self.zapi_client, self.configuration)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
@ -87,6 +85,26 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
self.ssc_library.check_api_permissions()
self._start_periodic_tasks()
def _start_periodic_tasks(self):
# Note(cknight): Run the task once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Start the task that updates the slow-changing storage service catalog
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc)
ssc_periodic_task.start(
interval=SSC_UPDATE_INTERVAL_SECONDS,
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
# Start the task that harvests soft-deleted QoS policy groups.
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
self.zapi_client.remove_unused_qos_policy_groups)
harvest_qos_periodic_task.start(
interval=QOS_CLEANUP_INTERVAL_SECONDS,
initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
def _do_qos_for_volume(self, volume, extra_specs, cleanup=True):
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
@ -100,14 +118,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
LOG.debug("Cleaning volume %s", volume['id'])
self._cleanup_volume_on_failure(volume)
def _start_periodic_tasks(self):
# Start the task that harvests soft-deleted QoS policy groups.
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
self.zapi_client.remove_unused_qos_policy_groups)
harvest_qos_periodic_task.start(
interval=QOS_CLEANUP_INTERVAL_SECONDS,
initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info):
if qos_policy_group_info is None:
return
@ -130,8 +140,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver)
share = share if share else self._get_provider_location(volume_id)
self._post_prov_deprov_in_ssc(share)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
@ -145,11 +153,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
sync = True if self.ssc_vols is None else False
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_Cluster_direct'
@ -168,103 +171,84 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
"""Retrieve pool (Data ONTAP flexvol) stats.
self.perf_library.update_performance_cache(
self.ssc_vols.get('all', []))
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the NFS share
path.
"""
pools = []
for nfs_share in self._mounted_shares:
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
capacity = self._get_share_capacity_info(nfs_share)
self.perf_library.update_performance_cache(ssc)
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
pool['pool_name'] = nfs_share
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = True
# Add up-to-date capacity info
nfs_share = ssc_vol_info['pool_name']
capacity = self._get_share_capacity_info(nfs_share)
pool.update(capacity)
# add SSC content if available
vol = self._get_vol_for_share(nfs_share)
if vol and self.ssc_vols:
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
flexvol_thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(
flexvol_thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(
not flexvol_thin).lower()
thick = (not flexvol_thin and
not self.configuration.nfs_sparsed_volumes)
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization_for_pool(
vol.id['name'])
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warning(_LW("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self.zapi_client.get_vserver_ips(self.vserver)
for vol in vols['all']:
for sh in self._mounted_shares:
host = sh.split(':')[0]
junction = sh.split(':')[1]
ip = na_utils.resolve_hostname(host)
if (self._ip_in_ifs(ip, vs_ifs) and
junction == vol.id['junction_path']):
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
self._ensure_shares_mounted()
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _ip_in_ifs(self, ip, api_ifs):
"""Checks if ip is listed for ifs in API format."""
if api_ifs is None:
return False
for ifc in api_ifs:
ifc_ip = ifc.get_child_content("address")
if ifc_ip == ip:
return True
return False
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that back all mounted shares.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <share_path>}}
"""
pools = {}
vserver_addresses = self.zapi_client.get_operational_lif_addresses()
for share in self._mounted_shares:
host = share.split(':')[0]
junction_path = share.split(':')[1]
address = na_utils.resolve_hostname(host)
if address not in vserver_addresses:
msg = _LW('Address not found for NFS share %s.')
LOG.warning(msg, share)
continue
try:
flexvol = self.zapi_client.get_flexvol(
flexvol_path=junction_path)
pools[flexvol['name']] = {'pool_name': share}
except exception.VolumeBackendAPIException:
msg = _LE('Flexvol not found for NFS share %s.')
LOG.exception(msg, share)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
@ -305,44 +289,39 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
except Exception:
return None
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
if self.ssc_vols:
for vol in self.ssc_vols['all']:
if vol.export['path'] == nfs_share:
return vol
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self._is_volume_thin_provisioned(volume)
compatible = self._share_has_space_for_clone(share,
volume['size'],
thin)
if compatible and self.ssc_enabled:
matched = self._is_share_vol_type_match(volume, share)
compatible = compatible and matched
return compatible
flexvol_name = self._get_flexvol_name_for_share(share)
thin = self._is_volume_thin_provisioned(flexvol_name)
return (
self._share_has_space_for_clone(share, volume['size'], thin) and
self._is_share_vol_type_match(volume, share, flexvol_name)
)
def _is_volume_thin_provisioned(self, volume):
if self.configuration.nfs_sparsed_volumes:
return True
if self.ssc_enabled and volume in self.ssc_vols['thin']:
return True
return False
def _is_volume_thin_provisioned(self, flexvol_name):
"""Checks if a flexvol is thin (sparse file or thin provisioned)."""
ssc_info = self.ssc_library.get_ssc_for_flexvol(flexvol_name)
return ssc_info.get('thin_provisioning_support') or False
def _is_share_vol_type_match(self, volume, share):
def _is_share_vol_type_match(self, volume, share, flexvol_name):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s.",
{'vol': netapp_vol, 'share': share})
{'vol': flexvol_name, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs(
extra_specs)
return flexvol_name in flexvol_names
def _get_flexvol_name_for_share(self, nfs_share):
"""Queries the SSC for the flexvol containing an NFS share."""
ssc = self.ssc_library.get_ssc()
for ssc_vol_name, ssc_vol_info in ssc.items():
if nfs_share == ssc_vol_info.get('pool_name'):
return ssc_vol_name
return None
def delete_volume(self, volume):
"""Deletes a logical volume."""
share = volume['provider_location']
self._delete_backing_file_for_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
@ -353,7 +332,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
# Don't blow up here if something went wrong de-provisioning the
# QoS policy for the volume.
pass
self._post_prov_deprov_in_ssc(share)
def _delete_backing_file_for_volume(self, volume):
"""Deletes file on nfs share that backs a cinder volume."""
@ -380,9 +358,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
@utils.trace_method
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
share = self._get_provider_location(snapshot.volume_id)
self._delete_backing_file_for_snapshot(snapshot)
self._post_prov_deprov_in_ssc(share)
@utils.trace_method
def _delete_backing_file_for_snapshot(self, snapshot):
@ -409,12 +385,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
'on filer.', path_on_filer, snapshot['id'])
self.zapi_client.delete_file(path_on_filer)
def _post_prov_deprov_in_ssc(self, share):
if self.ssc_enabled and share:
netapp_vol = self._get_vol_for_share(share)
if netapp_vol:
self._update_stale_vols(volume=netapp_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
@ -446,9 +416,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
if not copy_success:
super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
if self.ssc_enabled:
sh = self._get_provider_location(volume['id'])
self._update_stale_vols(self._get_vol_for_share(sh))
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""

View File

@ -96,9 +96,8 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
# Update pool utilization map atomically
pool_utilization = {}
for pool in ssc_pools:
pool_name = pool.id['name']
aggr_name = pool.aggr['name']
for pool_name, pool_info in ssc_pools.items():
aggr_name = pool_info.get('aggregate', 'unknown')
node_name = aggr_node_map.get(aggr_name)
if node_name:
pool_utilization[pool_name] = node_utilization.get(
@ -118,8 +117,8 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
"""Get the set of aggregates that contain the specified pools."""
aggr_names = set()
for pool in ssc_pools:
aggr_names.add(pool.aggr['name'])
for pool_name, pool_info in ssc_pools.items():
aggr_names.add(pool_info.get('aggregate'))
return aggr_names
def _get_nodes_for_aggregates(self, aggr_names):

View File

@ -1,594 +0,0 @@
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Storage service catalog utility functions and classes for NetApp systems.
"""
import copy
import threading
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
class NetAppVolume(object):
"""Represents a NetApp volume.
Present attributes
id - name, vserver, junction_path, type
aggr - name, raid_type, ha_policy, disk_type
sis - dedup, compression
state - status, vserver_root, cluster_volume,
inconsistent, invalid, junction_active
qos - qos_policy_group
space - space-guarantee-enabled, space-guarantee, thin_provisioned
mirror - mirrored i.e. dp mirror
export - path
"""
def __init__(self, name, vserver=None):
self.id = {}
self.aggr = {}
self.sis = {}
self.state = {}
self.qos = {}
self.space = {}
self.mirror = {}
self.export = {}
self.id['name'] = name
self.id['vserver'] = vserver
def __eq__(self, other):
"""Checks for equality."""
if (self.id['name'] == other.id['name'] and
self.id['vserver'] == other.id['vserver']):
return True
def __hash__(self):
"""Computes hash for the object."""
return hash(self.id['name'])
def __str__(self):
"""Returns human readable form for object."""
vol_str = "NetApp Volume id: %s, aggr: %s,"\
" space: %s, sis: %s, state: %s, qos: %s"\
% (self.id, self.aggr, self.space, self.sis, self.state, self.qos)
return vol_str
@utils.trace_method
def get_cluster_vols_with_ssc(na_server, vserver, volume=None):
"""Gets ssc vols for cluster vserver."""
volumes = query_cluster_vols_for_ssc(na_server, vserver, volume)
sis_vols = get_sis_vol_dict(na_server, vserver, volume)
mirrored_vols = get_snapmirror_vol_dict(na_server, vserver, volume)
aggrs = {}
for vol in volumes:
aggr_name = vol.aggr['name']
if aggr_name:
if aggr_name in aggrs:
aggr_attrs = aggrs[aggr_name]
else:
aggr_attrs = query_aggr_options(na_server, aggr_name)
if aggr_attrs:
eff_disk_type = query_aggr_storage_disk(na_server,
aggr_name)
aggr_attrs['disk_type'] = eff_disk_type
aggrs[aggr_name] = aggr_attrs
vol.aggr['raid_type'] = aggr_attrs.get('raid_type')
vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy')
vol.aggr['disk_type'] = aggr_attrs.get('disk_type')
if sis_vols:
if vol.id['name'] in sis_vols:
vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup']
vol.sis['compression'] =\
sis_vols[vol.id['name']]['compression']
else:
vol.sis['dedup'] = False
vol.sis['compression'] = False
if (vol.space['space-guarantee-enabled'] and
(vol.space['space-guarantee'] == 'file' or
vol.space['space-guarantee'] == 'volume')):
vol.space['thin_provisioned'] = False
else:
vol.space['thin_provisioned'] = True
if mirrored_vols:
vol.mirror['mirrored'] = False
if vol.id['name'] in mirrored_vols:
for mirr_attrs in mirrored_vols[vol.id['name']]:
if (mirr_attrs['rel_type'] == 'data_protection' and
mirr_attrs['mirr_state'] == 'snapmirrored'):
vol.mirror['mirrored'] = True
break
return volumes
@utils.trace_method
def query_cluster_vols_for_ssc(na_server, vserver, volume=None):
"""Queries cluster volumes for ssc."""
query = {'volume-attributes': None}
volume_id = {
'volume-id-attributes': {
'owning-vserver-name': vserver,
'type': 'rw',
'style': 'flex',
},
}
if volume:
volume_id['volume-id-attributes']['name'] = volume
query['volume-attributes'] = volume_id
des_attr = {'volume-attributes':
['volume-id-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
result = netapp_api.invoke_api(na_server, api_name='volume-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
vols = set()
for res in result:
records = res.get_child_content('num-records')
if int(records) > 0:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
vol_attrs = attr_list.get_children()
vols_found = create_vol_list(vol_attrs)
vols.update(vols_found)
return vols
@utils.trace_method
def create_vol_list(vol_attrs):
"""Creates vol list with features from attr list."""
vols = set()
for v in vol_attrs:
try:
# name and vserver are mandatory
# Absence will skip by giving KeyError.
name = v['volume-id-attributes']['name']
vserver = v['volume-id-attributes']['owning-vserver-name']
vol = NetAppVolume(name, vserver)
vol.id['type'] =\
v['volume-id-attributes'].get_child_content('type')
if vol.id['type'] == "tmp":
continue
vol.id['junction_path'] =\
v['volume-id-attributes'].get_child_content('junction-path')
# state attributes mandatory.
vol.state['vserver_root'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-vserver-root'))
if vol.state['vserver_root']:
continue
vol.state['status'] =\
v['volume-state-attributes'].get_child_content('state')
vol.state['inconsistent'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-inconsistent'))
vol.state['invalid'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-invalid'))
vol.state['junction_active'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-junction-active'))
vol.state['cluster_volume'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-cluster-volume'))
if (vol.state['status'] != 'online' or
vol.state['inconsistent'] or vol.state['invalid']):
# offline, invalid and inconsistent volumes are not usable
continue
# aggr attributes mandatory.
vol.aggr['name'] =\
v['volume-id-attributes']['containing-aggregate-name']
vol.space['space-guarantee-enabled'] =\
na_utils.to_bool(
v['volume-space-attributes'].get_child_content(
'is-space-guarantee-enabled'))
vol.space['space-guarantee'] =\
v['volume-space-attributes'].get_child_content(
'space-guarantee')
# qos attributes optional.
if v.get_child_by_name('volume-qos-attributes'):
vol.qos['qos_policy_group'] =\
v['volume-qos-attributes'].get_child_content(
'policy-group-name')
else:
vol.qos['qos_policy_group'] = None
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
' ssc vol list. Message - %s', e)
continue
return vols
@utils.trace_method
def query_aggr_options(na_server, aggr_name):
"""Queries cluster aggr for attributes.
Currently queries for raid and ha-policy.
"""
add_elems = {'aggregate': aggr_name}
attrs = {}
try:
result = netapp_api.invoke_api(na_server,
api_name='aggr-options-list-info',
api_family='cm', query=None,
des_result=None,
additional_elems=add_elems,
is_iter=False)
for res in result:
options = res.get_child_by_name('options')
if options:
op_list = options.get_children()
for op in op_list:
if op.get_child_content('name') == 'ha_policy':
attrs['ha_policy'] = op.get_child_content('value')
if op.get_child_content('name') == 'raidtype':
attrs['raid_type'] = op.get_child_content('value')
except Exception as e:
LOG.debug("Exception querying aggr options. %s", e)
return attrs
@utils.trace_method
def get_sis_vol_dict(na_server, vserver, volume=None):
"""Queries sis for volumes.
If volume is present sis is queried for it.
Records dedup and compression enabled.
"""
sis_vols = {}
query_attr = {'vserver': vserver}
if volume:
vol_path = '/vol/%s' % (volume)
query_attr['path'] = vol_path
query = {'sis-status-info': query_attr}
try:
result = netapp_api.invoke_api(na_server,
api_name='sis-get-iter',
api_family='cm',
query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
sis_status = attr_list.get_children()
for sis in sis_status:
path = sis.get_child_content('path')
if not path:
continue
(___, __, vol) = path.rpartition('/')
if not vol:
continue
v_sis = {}
v_sis['compression'] = na_utils.to_bool(
sis.get_child_content('is-compression-enabled'))
v_sis['dedup'] = na_utils.to_bool(
sis.get_child_content('state'))
sis_vols[vol] = v_sis
except Exception as e:
LOG.debug("Exception querying sis information. %s", e)
return sis_vols
@utils.trace_method
def get_snapmirror_vol_dict(na_server, vserver, volume=None):
"""Queries snapmirror volumes."""
mirrored_vols = {}
query_attr = {'source-vserver': vserver}
if volume:
query_attr['source-volume'] = volume
query = {'snapmirror-info': query_attr}
try:
result = netapp_api.invoke_api(na_server,
api_name='snapmirror-get-iter',
api_family='cm', query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
snap_info = attr_list.get_children()
for snap in snap_info:
src_volume = snap.get_child_content('source-volume')
v_snap = {}
v_snap['dest_loc'] =\
snap.get_child_content('destination-location')
v_snap['rel_type'] =\
snap.get_child_content('relationship-type')
v_snap['mirr_state'] =\
snap.get_child_content('mirror-state')
if mirrored_vols.get(src_volume):
mirrored_vols.get(src_volume).append(v_snap)
else:
mirrored_vols[src_volume] = [v_snap]
except Exception as e:
LOG.debug("Exception querying mirror information. %s", e)
return mirrored_vols
@utils.trace_method
def query_aggr_storage_disk(na_server, aggr):
"""Queries for storage disks associated to an aggregate."""
query = {'storage-disk-info': {'disk-raid-info':
{'disk-aggregate-info':
{'aggregate-name': aggr}}}}
des_attr = {'storage-disk-info':
{'disk-raid-info': ['effective-disk-type']}}
try:
result = netapp_api.invoke_api(na_server,
api_name='storage-disk-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
storage_disks = attr_list.get_children()
for disk in storage_disks:
raid_info = disk.get_child_by_name('disk-raid-info')
if raid_info:
eff_disk_type =\
raid_info.get_child_content('effective-disk-type')
if eff_disk_type:
return eff_disk_type
else:
continue
except Exception as e:
LOG.debug("Exception querying storage disk. %s", e)
return 'unknown'
@utils.trace_method
def get_cluster_ssc(na_server, vserver):
"""Provides cluster volumes with ssc."""
netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver)
mirror_vols = set()
dedup_vols = set()
compress_vols = set()
thin_prov_vols = set()
ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols,
'compression': compress_vols,
'thin': thin_prov_vols, 'all': netapp_volumes}
for vol in netapp_volumes:
if vol.sis.get('dedup'):
dedup_vols.add(vol)
if vol.sis.get('compression'):
compress_vols.add(vol)
if vol.mirror.get('mirrored'):
mirror_vols.add(vol)
if vol.space.get('thin_provisioned'):
thin_prov_vols.add(vol)
return ssc_map
@utils.trace_method
def refresh_cluster_stale_ssc(*args, **kwargs):
"""Refreshes stale ssc volumes with latest."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = six.text_type(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
try:
job_set = na_utils.set_safe_attr(
backend, 'refresh_stale_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror.get('mirrored'):
vol_set.add(vol)
if k == "dedup" and vol.sis.get('dedup'):
vol_set.add(vol)
if k == "compression" and vol.sis.get('compression'):
vol_set.add(vol)
if k == "thin" and vol.space.get('thin_provisioned'):
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_LI('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
@utils.trace_method
def get_cluster_latest_ssc(*args, **kwargs):
"""Updates volumes including ssc."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = six.text_type(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
# As this depends on stale job running state
# set flag as soon as job starts to avoid
# job accumulation.
try:
job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_LI('Successfully completed ssc job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
@utils.trace_method
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
if not isinstance(na_server, netapp_api.NaServer):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
LOG.warning(_LW('ssc job in progress. Returning... '))
return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_older_than(backend.ssc_run_time, delta_secs))):
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
t = threading.Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warning(_LW('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
t = threading.Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
@utils.trace_method
def get_volumes_for_specs(ssc_vols, specs):
"""Shortlists volumes for extra specs provided."""
if specs is None or specs == {} or not isinstance(specs, dict):
return ssc_vols['all']
result = copy.deepcopy(ssc_vols['all'])
raid_type = specs.get('netapp:raid_type')
disk_type = specs.get('netapp:disk_type')
bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored',
'netapp_dedup', 'netapp_nodedup',
'netapp_compression', 'netapp_nocompression',
'netapp_thin_provisioned', 'netapp_thick_provisioned']
b_specs = {}
for spec in bool_specs_list:
b_specs[spec] = na_utils.to_bool(specs.get(spec))\
if specs.get(spec) else None
def _spec_ineffect(b_specs, spec, opp_spec):
"""If the spec with opposite spec is ineffective."""
if ((b_specs[spec] is None and b_specs[opp_spec] is None)
or (b_specs[spec] == b_specs[opp_spec])):
return True
else:
return False
if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'):
pass
else:
if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False:
result = result & ssc_vols['mirrored']
else:
result = result - ssc_vols['mirrored']
if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'):
pass
else:
if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False:
result = result & ssc_vols['dedup']
else:
result = result - ssc_vols['dedup']
if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'):
pass
else:
if (b_specs['netapp_compression'] or
b_specs['netapp_nocompression'] is False):
result = result & ssc_vols['compression']
else:
result = result - ssc_vols['compression']
if _spec_ineffect(b_specs, 'netapp_thin_provisioned',
'netapp_thick_provisioned'):
pass
else:
if (b_specs['netapp_thin_provisioned'] or
b_specs['netapp_thick_provisioned'] is False):
result = result & ssc_vols['thin']
else:
result = result - ssc_vols['thin']
if raid_type or disk_type:
tmp = copy.deepcopy(result)
for vol in tmp:
if raid_type:
vol_raid = vol.aggr['raid_type']
vol_raid = vol_raid.lower() if vol_raid else None
if raid_type.lower() != vol_raid:
result.discard(vol)
if disk_type:
vol_dtype = vol.aggr['disk_type']
vol_dtype = vol_dtype.lower() if vol_dtype else None
if disk_type.lower() != vol_dtype:
result.discard(vol)
return result

View File

@ -15,10 +15,14 @@
Storage service catalog (SSC) functions and classes for NetApp cDOT systems.
"""
import copy
import re
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _, _LI, _LW
LOG = logging.getLogger(__name__)
@ -46,9 +50,14 @@ SSC_API_MAP = {
class CapabilitiesLibrary(object):
def __init__(self, zapi_client):
def __init__(self, protocol, vserver_name, zapi_client, configuration):
self.protocol = protocol.lower()
self.vserver_name = vserver_name
self.zapi_client = zapi_client
self.configuration = configuration
self.backend_name = self.configuration.safe_get('volume_backend_name')
self.ssc = {}
def check_api_permissions(self):
"""Check which APIs that support SSC functionality are available."""
@ -73,3 +82,144 @@ class CapabilitiesLibrary(object):
'sufficient privileges to use all needed '
'APIs. The following extra specs will fail '
'or be ignored: %s.'), invalid_extra_specs)
def get_ssc(self):
"""Get a copy of the Storage Service Catalog."""
return copy.deepcopy(self.ssc)
def get_ssc_for_flexvol(self, flexvol_name):
"""Get map of Storage Service Catalog entries for a single flexvol."""
return copy.deepcopy(self.ssc.get(flexvol_name, {}))
def update_ssc(self, flexvol_map):
"""Periodically runs to update Storage Service Catalog data.
The self.ssc attribute is updated with the following format.
{<flexvol_name> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'"), self.backend_name)
ssc = {}
for flexvol_name, flexvol_info in flexvol_map.items():
ssc_volume = {}
# Add metadata passed from the driver, including pool name
ssc_volume.update(flexvol_info)
# Get volume info
ssc_volume.update(self._get_ssc_flexvol_info(flexvol_name))
ssc_volume.update(self._get_ssc_dedupe_info(flexvol_name))
ssc_volume.update(self._get_ssc_mirror_info(flexvol_name))
# Get aggregate info
aggregate_name = ssc_volume.get('aggregate')
ssc_volume.update(self._get_ssc_aggregate_info(aggregate_name))
ssc[flexvol_name] = ssc_volume
self.ssc = ssc
def _get_ssc_flexvol_info(self, flexvol_name):
"""Gather flexvol info and recast into SSC-style volume stats."""
volume_info = self.zapi_client.get_flexvol(flexvol_name=flexvol_name)
netapp_thick = (volume_info.get('space-guarantee-enabled') and
(volume_info.get('space-guarantee') == 'file' or
volume_info.get('space-guarantee') == 'volume'))
thick = self._get_thick_provisioning_support(netapp_thick)
return {
'netapp_thin_provisioned': six.text_type(not netapp_thick).lower(),
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'aggregate': volume_info.get('aggregate'),
}
def _get_thick_provisioning_support(self, netapp_thick):
"""Get standard thick/thin values for a flexvol.
The values reported for the standard thick_provisioning_support and
thin_provisioning_support flags depend on both the flexvol state as
well as protocol-specific configuration values.
"""
if self.protocol == 'nfs':
return (netapp_thick and
not self.configuration.nfs_sparsed_volumes)
else:
return (netapp_thick and
(self.configuration.netapp_lun_space_reservation ==
'enabled'))
def _get_ssc_dedupe_info(self, flexvol_name):
"""Gather dedupe info and recast into SSC-style volume stats."""
dedupe_info = self.zapi_client.get_flexvol_dedupe_info(flexvol_name)
dedupe = dedupe_info.get('dedupe')
compression = dedupe_info.get('compression')
return {
'netapp_dedup': six.text_type(dedupe).lower(),
'netapp_compression': six.text_type(compression).lower(),
}
def _get_ssc_mirror_info(self, flexvol_name):
"""Gather SnapMirror info and recast into SSC-style volume stats."""
mirrored = self.zapi_client.is_flexvol_mirrored(
flexvol_name, self.vserver_name)
return {'netapp_mirrored': six.text_type(mirrored).lower()}
def _get_ssc_aggregate_info(self, aggregate_name):
"""Gather aggregate info and recast into SSC-style volume stats."""
disk_type = self.zapi_client.get_aggregate_disk_type(aggregate_name)
aggr_info = self.zapi_client.get_aggregate(aggregate_name)
raid_type = aggr_info.get('raid-type')
return {
'netapp_disk_type': disk_type,
'netapp_raid_type': raid_type,
}
def get_matching_flexvols_for_extra_specs(self, extra_specs):
"""Return a list of flexvol names that match a set of extra specs."""
extra_specs = self._modify_extra_specs_for_comparison(extra_specs)
extra_specs_set = set(extra_specs.items())
ssc = self.get_ssc()
matching_flexvols = []
for flexvol_name, flexvol_info in ssc.items():
if extra_specs_set.issubset(set(flexvol_info.items())):
matching_flexvols.append(flexvol_name)
return matching_flexvols
def _modify_extra_specs_for_comparison(self, extra_specs):
"""Adjust extra spec values for simple comparison to SSC values.
Most extra-spec key-value tuples may be directly compared. But the
boolean values that take the form '<is> True' or '<is> False' must be
modified to allow comparison with the values we keep in the SSC and
report the the scheduler.
"""
modified_extra_specs = copy.deepcopy(extra_specs)
for key, value in extra_specs.items():
if re.match('<is>\s+True', value, re.I):
modified_extra_specs[key] = True
elif re.match('<is>\s+False', value, re.I):
modified_extra_specs[key] = False
return modified_extra_specs