NetApp storage service feature support

This change introduces storage service feature for
NetApp drivers. It reports back supported service
features like mirroring, deduplication, compression
etc. to the scheduler which can be used to shortlist
backends. It accepts extra_specs defined
in volume type with the service features and provisions
volumes based on the requested spec.

blueprint cinder-nfs-driver-qos

Change-Id: I0706af2fe39b2db111a6d98b016307fc63f74dc6
This commit is contained in:
Navneet Singh 2013-07-19 03:37:15 +05:30
parent 3e69b7ce2e
commit 5581cb3492
12 changed files with 1661 additions and 150 deletions

View File

@ -21,12 +21,10 @@ Tests for NetApp volume driver
import BaseHTTPServer
import httplib
from lxml import etree
import StringIO
from lxml import etree
from cinder.exception import InvalidInput
from cinder.exception import VolumeBackendAPIException
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
@ -37,6 +35,8 @@ from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp import utils
LOG = logging.getLogger("cinder.volume.driver")
@ -489,12 +489,35 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
vol1 = ssc_utils.NetAppVolume('lun1', 'openstack')
vol1.state['vserver_root'] = False
vol1.state['status'] = 'online'
vol1.state['junction_active'] = True
vol1.space['size_avl_bytes'] = '4000000000'
vol1.space['size_total_bytes'] = '5000000000'
vol1.space['space-guarantee-enabled'] = False
vol1.space['space-guarantee'] = 'file'
vol1.space['thin_provisioned'] = True
vol1.mirror['mirrored'] = True
vol1.qos['qos_policy_group'] = None
vol1.aggr['name'] = 'aggr1'
vol1.aggr['junction'] = '/vola'
vol1.sis['dedup'] = True
vol1.sis['compression'] = True
vol1.aggr['raid_type'] = 'raiddp'
vol1.aggr['ha_policy'] = 'cfo'
vol1.aggr['disk_type'] = 'SSD'
ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]),
'compression': set([vol1]),
'thin': set([vol1]), 'all': set([vol1])}
def setUp(self):
super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.stubs.Set(
ssc_utils, 'refresh_cluster_ssc', lambda a, b, c: None)
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
self.stubs.Set(httplib, 'HTTPConnection',
@ -503,6 +526,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
client = driver.client
client.set_api_version(1, 15)
self.driver = driver
self.driver.ssc_vols = self.ssc_map
def _set_config(self, configuration):
configuration.netapp_storage_protocol = 'iscsi'
@ -549,7 +573,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
self.driver.create_volume_from_snapshot(self.volume,
self.snapshot_fail)
raise AssertionError()
except VolumeBackendAPIException:
except exception.VolumeBackendAPIException:
pass
finally:
self.driver.delete_volume(self.volume)
@ -566,7 +590,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
self.driver.create_cloned_volume(self.volume_clone_fail,
self.volume)
raise AssertionError()
except VolumeBackendAPIException:
except exception.VolumeBackendAPIException:
pass
finally:
self.driver.delete_volume(self.volume)
@ -585,7 +609,7 @@ class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase):
raise AssertionError('Target portal is none')
def test_fail_create_vol(self):
self.assertRaises(VolumeBackendAPIException,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.vol_fail)
def test_vol_stats(self):
@ -604,7 +628,7 @@ class NetAppDriverNegativeTestCase(test.TestCase):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage family is getting accepted.')
except InvalidInput:
except exception.InvalidInput:
pass
def test_incorrect_protocol(self):
@ -614,7 +638,7 @@ class NetAppDriverNegativeTestCase(test.TestCase):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Wrong storage protocol is getting accepted.')
except InvalidInput:
except exception.InvalidInput:
pass
def test_non_netapp_driver(self):
@ -626,7 +650,7 @@ class NetAppDriverNegativeTestCase(test.TestCase):
try:
driver = common.NetAppDriver(configuration=configuration)
raise AssertionError('Non NetApp driver is getting instantiated.')
except InvalidInput:
except exception.InvalidInput:
pass
finally:
common.netapp_unified_plugin_registry.pop('test_family')
@ -1066,7 +1090,7 @@ class NetAppDirect7modeISCSIDriverTestCase_NV(
success = False
try:
self.driver.create_volume(self.volume)
except VolumeBackendAPIException:
except exception.VolumeBackendAPIException:
success = True
pass
finally:

View File

@ -0,0 +1,530 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific ssc module."""
import BaseHTTPServer
import copy
import httplib
from lxml import etree
import mox
from mox import IgnoreArg
from mox import IsA
from mox import MockObject
import StringIO
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import ssc_utils
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that doesn't spam the log."""
def log_message(self, format, *args):
pass
class FakeHttplibSocket(object):
"""A fake socket implementation for httplib.HTTPResponse."""
def __init__(self, value):
self._rbuffer = StringIO.StringIO(value)
self._wbuffer = StringIO.StringIO('')
oldclose = self._wbuffer.close
def newclose():
self.result = self._wbuffer.getvalue()
oldclose()
self._wbuffer.close = newclose
def makefile(self, mode, _other):
"""Returns the socket's internal buffer"""
if mode == 'r' or mode == 'rb':
return self._rbuffer
if mode == 'w' or mode == 'wb':
return self._wbuffer
RESPONSE_PREFIX_DIRECT_CMODE = """<?xml version='1.0' encoding='UTF-8' ?>
<!DOCTYPE netapp SYSTEM 'file:/etc/netapp_gx.dtd'>"""
RESPONSE_PREFIX_DIRECT = """
<netapp version='1.15' xmlns='http://www.netapp.com/filer/admin'>"""
RESPONSE_SUFFIX_DIRECT = """</netapp>"""
class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(s):
"""Respond to a GET request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
out = s.wfile
out.write('<netapp version="1.15">'
'<results reason="Not supported method type"'
' status="failed" errno="Not_Allowed"/></netapp>')
def do_POST(s):
"""Respond to a POST request."""
if '/servlets/netapp.servlets.admin.XMLrequest_filer' != s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
root = etree.fromstring(request_xml)
body = [x for x in root.iterchildren()]
request = body[0]
tag = request.tag
api = etree.QName(tag).localname or tag
if 'volume-get-iter' == api:
body = """<results status="passed"><attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>iscsi</name>
<owning-vserver-name>Openstack</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/iscsi</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>214748364</size-available>
<size-total>224748364</size-total>
<space-guarantee-enabled>enabled</space-guarantee-enabled>
<space-guarantee>file</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>14748364</size-available>
<size-total>24748364</size-total>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol2</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs2</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<size-available>14748364</size-available>
<size-total>24748364</size-total>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>true</is-inconsistent>
<is-invalid>true</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>nfsvol3</name>
<owning-vserver-name>Openstack
</owning-vserver-name>
<containing-aggregate-name>aggr0
</containing-aggregate-name>
<junction-path>/nfs3</junction-path>
<type>rw</type>
</volume-id-attributes>
<volume-space-attributes>
<space-guarantee-enabled>enabled
</space-guarantee-enabled>
<space-guarantee>volume
</space-guarantee>
</volume-space-attributes>
<volume-state-attributes>
<is-cluster-volume>true
</is-cluster-volume>
<is-vserver-root>false</is-vserver-root>
<state>online</state>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
<is-junction-active>true</is-junction-active>
</volume-state-attributes>
</volume-attributes>
</attributes-list>
<num-records>4</num-records></results>"""
elif 'aggr-options-list-info' == api:
body = """<results status="passed">
<options>
<aggr-option-info>
<name>ha_policy</name>
<value>cfo</value>
</aggr-option-info>
<aggr-option-info>
<name>raidtype</name>
<value>raid_dp</value>
</aggr-option-info>
</options>
</results>"""
elif 'sis-get-iter' == api:
body = """<results status="passed">
<attributes-list>
<sis-status-info>
<path>/vol/iscsi</path>
<is-compression-enabled>
true
</is-compression-enabled>
<state>enabled</state>
</sis-status-info>
</attributes-list>
</results>"""
elif 'storage-disk-get-iter' == api:
body = """<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-raid-info>
<effective-disk-type>SATA</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
</results>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE)
s.wfile.write(RESPONSE_PREFIX_DIRECT)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_DIRECT)
class FakeDirectCmodeHTTPConnection(object):
"""A fake httplib.HTTPConnection for netapp tests.
Requests made via this connection actually get translated and routed into
the fake direct handler above, we then turn the response into
the httplib.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = httplib.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
def createNetAppVolume(**kwargs):
vol = ssc_utils.NetAppVolume(kwargs['name'], kwargs['vs'])
vol.state['vserver_root'] = kwargs.get('vs_root')
vol.state['status'] = kwargs.get('status')
vol.state['junction_active'] = kwargs.get('junc_active')
vol.space['size_avl_bytes'] = kwargs.get('avl_byt')
vol.space['size_total_bytes'] = kwargs.get('total_byt')
vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled')
vol.space['space-guarantee'] = kwargs.get('sg')
vol.space['thin_provisioned'] = kwargs.get('thin')
vol.mirror['mirrored'] = kwargs.get('mirrored')
vol.qos['qos_policy_group'] = kwargs.get('qos')
vol.aggr['name'] = kwargs.get('aggr_name')
vol.aggr['junction'] = kwargs.get('junction')
vol.sis['dedup'] = kwargs.get('dedup')
vol.sis['compression'] = kwargs.get('compression')
vol.aggr['raid_type'] = kwargs.get('raid')
vol.aggr['ha_policy'] = kwargs.get('ha')
vol.aggr['disk_type'] = kwargs.get('disk')
return vol
class SscUtilsTestCase(test.TestCase):
"""Test ssc utis."""
vol1 = createNetAppVolume(name='vola', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='1000', total_byt='1500',
sg_enabled=False,
sg='file', thin=False, mirrored=False,
qos=None, aggr_name='aggr1', junction='/vola',
dedup=False, compression=False,
raid='raiddp', ha='cfo', disk='SSD')
vol2 = createNetAppVolume(name='volb', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='2000', total_byt='2500',
sg_enabled=True,
sg='file', thin=True, mirrored=False,
qos=None, aggr_name='aggr2', junction='/volb',
dedup=True, compression=False,
raid='raid4', ha='cfo', disk='SSD')
vol3 = createNetAppVolume(name='volc', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='3000', total_byt='3500',
sg_enabled=True,
sg='volume', thin=True, mirrored=False,
qos=None, aggr_name='aggr1', junction='/volc',
dedup=True, compression=True,
raid='raiddp', ha='cfo', disk='SAS')
vol4 = createNetAppVolume(name='vold', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='4000', total_byt='4500',
sg_enabled=False,
sg='none', thin=False, mirrored=False,
qos=None, aggr_name='aggr1', junction='/vold',
dedup=False, compression=False,
raid='raiddp', ha='cfo', disk='SSD')
vol5 = createNetAppVolume(name='vole', vs='openstack',
vs_root=False, status='online', junc_active=True,
avl_byt='5000', total_byt='5500',
sg_enabled=True,
sg='none', thin=False, mirrored=True,
qos=None, aggr_name='aggr2', junction='/vole',
dedup=True, compression=False,
raid='raid4', ha='cfo', disk='SAS')
def setUp(self):
super(SscUtilsTestCase, self).setUp()
self.stubs.Set(httplib, 'HTTPConnection',
FakeDirectCmodeHTTPConnection)
def test_cl_vols_ssc_all(self):
"""Test cluster ssc for all vols."""
mox = self.mox
na_server = api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set([copy.deepcopy(self.vol1),
copy.deepcopy(self.vol2), copy.deepcopy(self.vol3)])
sis = {'vola': {'dedup': False, 'compression': False},
'volb': {'dedup': True, 'compression': False}}
mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
ssc_utils.query_cluster_vols_for_ssc(
na_server, vserver, None).AndReturn(test_vols)
ssc_utils.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_utils.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raiddp)
ssc_utils.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SSD')
raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
ssc_utils.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raid4)
ssc_utils.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SAS')
mox.ReplayAll()
res_vols = ssc_utils.get_cluster_vols_with_ssc(
na_server, vserver, volume=None)
mox.VerifyAll()
for vol in res_vols:
if vol.id['name'] == 'volc':
self.assertEqual(vol.sis['compression'], False)
self.assertEqual(vol.sis['dedup'], False)
else:
pass
def test_cl_vols_ssc_single(self):
"""Test cluster ssc for single vol."""
mox = self.mox
na_server = api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set([copy.deepcopy(self.vol1)])
sis = {'vola': {'dedup': False, 'compression': False}}
mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc')
mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict')
mox.StubOutWithMock(ssc_utils, 'query_aggr_options')
mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk')
ssc_utils.query_cluster_vols_for_ssc(
na_server, vserver, 'vola').AndReturn(test_vols)
ssc_utils.get_sis_vol_dict(
na_server, vserver, 'vola').AndReturn(sis)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_utils.query_aggr_options(
na_server, 'aggr1').AndReturn(raiddp)
ssc_utils.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD')
mox.ReplayAll()
res_vols = ssc_utils.get_cluster_vols_with_ssc(
na_server, vserver, volume='vola')
mox.VerifyAll()
self.assertEqual(len(res_vols), 1)
def test_get_cluster_ssc(self):
"""Test get cluster ssc map."""
mox = self.mox
na_server = api.NaServer('127.0.0.1')
vserver = 'openstack'
test_vols = set(
[self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
mox.StubOutWithMock(ssc_utils, 'get_cluster_vols_with_ssc')
ssc_utils.get_cluster_vols_with_ssc(
na_server, vserver).AndReturn(test_vols)
mox.ReplayAll()
res_map = ssc_utils.get_cluster_ssc(na_server, vserver)
mox.VerifyAll()
self.assertEqual(len(res_map['mirrored']), 1)
self.assertEqual(len(res_map['dedup']), 3)
self.assertEqual(len(res_map['compression']), 1)
self.assertEqual(len(res_map['thin']), 2)
self.assertEqual(len(res_map['all']), 5)
def test_vols_for_boolean_specs(self):
"""Test ssc for boolean specs."""
test_vols = set(
[self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
ssc_map = {'mirrored': set([self.vol1]),
'dedup': set([self.vol1, self.vol2, self.vol3]),
'compression': set([self.vol3, self.vol4]),
'thin': set([self.vol5, self.vol2]), 'all': test_vols}
test_map = {'mirrored': ('netapp_mirrored', 'netapp_unmirrored'),
'dedup': ('netapp_dedup', 'netapp_nodedup'),
'compression': ('netapp_compression',
'netapp_nocompression'),
'thin': ('netapp_thin_provisioned',
'netapp_thick_provisioned')}
for type in test_map.keys():
# type
extra_specs = {test_map[type][0]: 'true'}
res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map[type]))
# opposite type
extra_specs = {test_map[type][1]: 'true'}
res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map['all'] - ssc_map[type]))
# both types
extra_specs =\
{test_map[type][0]: 'true', test_map[type][1]: 'true'}
res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), len(ssc_map['all']))
def test_vols_for_optional_specs(self):
"""Test ssc for optional specs."""
test_vols =\
set([self.vol1, self.vol2, self.vol3, self.vol4, self.vol5])
ssc_map = {'mirrored': set([self.vol1]),
'dedup': set([self.vol1, self.vol2, self.vol3]),
'compression': set([self.vol3, self.vol4]),
'thin': set([self.vol5, self.vol2]), 'all': test_vols}
extra_specs =\
{'netapp_dedup': 'true',
'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'}
res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs)
self.assertEqual(len(res), 1)
def test_query_cl_vols_for_ssc(self):
na_server = api.NaServer('127.0.0.1')
na_server.set_api_version(1, 15)
vols = ssc_utils.query_cluster_vols_for_ssc(na_server, 'Openstack')
self.assertEqual(len(vols), 2)
for vol in vols:
if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol':
pass
else:
raise exception.InvalidVolume('Invalid volume returned.')
def test_query_aggr_options(self):
na_server = api.NaServer('127.0.0.1')
aggr_attribs = ssc_utils.query_aggr_options(na_server, 'aggr0')
if aggr_attribs:
self.assertEqual(aggr_attribs['ha_policy'], 'cfo')
self.assertEqual(aggr_attribs['raid_type'], 'raid_dp')
else:
raise exception.InvalidParameterValue("Incorrect aggr options")
def test_query_aggr_storage_disk(self):
na_server = api.NaServer('127.0.0.1')
eff_disk_type = ssc_utils.query_aggr_storage_disk(na_server, 'aggr0')
self.assertEqual(eff_disk_type, 'SATA')

View File

@ -412,6 +412,12 @@ class NfsDriverTestCase(test.TestCase):
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 2 * units.GiB,
2 * units.GiB))
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
AndReturn((5 * units.GiB, 2 * units.GiB,
2 * units.GiB))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 3 * units.GiB,
1 * units.GiB))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
AndReturn((10 * units.GiB, 3 * units.GiB,
1 * units.GiB))

View File

@ -291,6 +291,9 @@ class NaServer(object):
def _create_certificate_auth_handler(self):
raise NotImplementedError()
def __str__(self):
return "server: %s" % (self._host)
class NaElement(object):
"""Class wraps basic building block for NetApp api request."""
@ -307,7 +310,7 @@ class NaElement(object):
return self._element.tag
def set_content(self, text):
"""Set the text for the element."""
"""Set the text string for the element."""
self._element.text = text
def get_content(self):
@ -399,6 +402,73 @@ class NaElement(object):
return etree.tostring(self._element, method=method, encoding=encoding,
pretty_print=pretty)
def __getitem__(self, key):
"""Dict getter method for NaElement.
Returns NaElement list if present,
text value in case no NaElement node
children or attribute value if present.
"""
child = self.get_child_by_name(key)
if child:
if child.get_children():
return child
else:
return child.get_content()
elif self.has_attr(key):
return self.get_attr(key)
raise KeyError(_('No element by given name %s.') % (key))
def __setitem__(self, key, value):
"""Dict setter method for NaElement."""
if key:
if value:
if isinstance(value, NaElement):
child = NaElement(key)
child.add_child_elem(value)
self.add_child_elem(child)
elif isinstance(value, str):
child = self.get_child_by_name(key)
if child:
child.set_content(value)
else:
self.add_new_child(key, value)
elif isinstance(value, dict):
child = NaElement(key)
child.translate_struct(value)
self.add_child_elem(child)
else:
raise TypeError(_('Not a valid value for NaElement.'))
else:
self.add_child_elem(NaElement(key))
else:
raise KeyError(_('NaElement name cannot be null.'))
def translate_struct(self, data_struct):
"""Convert list, tuple, dict to NaElement and appends.
Useful for NaElement queries which have unique
query parameters.
"""
if isinstance(data_struct, list) or isinstance(data_struct, tuple):
for el in data_struct:
self.add_child_elem(NaElement(el))
elif isinstance(data_struct, dict):
for k in data_struct.keys():
child = NaElement(k)
if (isinstance(data_struct[k], dict) or
isinstance(data_struct[k], list) or
isinstance(data_struct[k], tuple)):
child.translate_struct(data_struct[k])
else:
if data_struct[k]:
child.set_content(str(data_struct[k]))
self.add_child_elem(child)
else:
raise ValueError(_('Type cannot be converted into NaElement.'))
class NaApiError(Exception):
"""Base exception class for NetApp api errors."""

View File

@ -112,27 +112,25 @@ class NetAppDriverFactory(object):
'%(storage_protocol)s') % fmt)
storage_family = storage_family.lower()
family_meta = netapp_unified_plugin_registry.get(storage_family)
if not family_meta:
if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported')
% storage_family)
if not storage_protocol:
if storage_protocol is None:
storage_protocol = netapp_family_default.get(storage_family)
if not storage_protocol:
msg_fmt = {'storage_family': storage_family}
fmt['storage_protocol'] = storage_protocol
if storage_protocol is None:
raise exception.InvalidInput(
reason=_('No default storage protocol found'
' for storage family %(storage_family)s')
% msg_fmt)
% fmt)
storage_protocol = storage_protocol.lower()
driver_loc = family_meta.get(storage_protocol)
if not driver_loc:
msg_fmt = {'storage_protocol': storage_protocol,
'storage_family': storage_family}
if driver_loc is None:
raise exception.InvalidInput(
reason=_('Protocol %(storage_protocol)s is not supported'
' for storage family %(storage_family)s')
% msg_fmt)
% fmt)
NetAppDriverFactory.check_netapp_driver(driver_loc)
kwargs = kwargs or {}
kwargs['netapp_mode'] = 'proxy'

View File

@ -22,12 +22,15 @@ This driver requires NetApp Clustered Data ONTAP or 7-mode
storage systems with installed iSCSI licenses.
"""
import copy
import sys
import time
import uuid
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
@ -38,6 +41,8 @@ from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume import volume_types
@ -104,6 +109,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
This method creates NetApp server client for api communication.
"""
host_filer = kwargs['hostname']
LOG.debug(_('Using NetApp filer: %s') % host_filer)
self.client = NaServer(host=host_filer,
@ -132,6 +138,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Validate the flags we care about and setup NetApp
client.
"""
self._check_flags()
self._create_client(
transport_type=self.configuration.netapp_transport_type,
@ -146,6 +153,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Discovers the LUNs on the NetApp server.
"""
self.lun_table = {}
self._get_lun_list()
LOG.debug(_("Success getting LUN list from server"))
@ -162,7 +170,8 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
metadata = {}
metadata['OsType'] = 'linux'
metadata['SpaceReserved'] = 'true'
self._create_lun_on_eligible_vol(name, size, metadata)
extra_specs = get_volume_extra_specs(volume)
self._create_lun_on_eligible_vol(name, size, metadata, extra_specs)
LOG.debug(_("Created LUN with name %s") % name)
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
@ -178,8 +187,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
return
lun_destroy = NaElement.create_node_with_children(
'lun-destroy',
**{'path': metadata['Path'],
'force': 'true'})
**{'path': metadata['Path'], 'force': 'true'})
self.client.invoke_successfully(lun_destroy, True)
LOG.debug(_("Destroyed LUN %s") % name)
self.lun_table.pop(name)
@ -200,6 +208,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def initialize_connection(self, volume, connector):
@ -213,6 +222,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
@ -270,6 +280,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self.lun_table[vol_name]
@ -286,6 +297,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
@ -303,6 +315,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Unmask the LUN on the storage system so the given intiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
@ -321,28 +334,9 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
minor = res.get_child_content('minor-version')
return (major, minor)
def _create_lun_on_eligible_vol(self, name, size, metadata):
def _create_lun_on_eligible_vol(self, name, size, metadata,
extra_specs=None):
"""Creates an actual lun on filer."""
req_size = float(size) *\
float(self.configuration.netapp_size_multiplier)
volume = self._get_avl_volume_by_size(req_size)
if not volume:
msg = _('Failed to get vol with required size for volume: %s')
raise exception.VolumeBackendAPIException(data=msg % name)
path = '/vol/%s/%s' % (volume['name'], name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': size,
'ostype': metadata['OsType'],
'space-reservation-enabled':
metadata['SpaceReserved']})
self.client.invoke_successfully(lun_create, True)
metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
metadata['Volume'] = volume['name']
metadata['Qtree'] = None
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
raise NotImplementedError()
def _get_iscsi_service_details(self):
@ -366,6 +360,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Populates in the lun table.
"""
for lun in api_luns:
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
@ -394,7 +389,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
initiator_type, os)
lun_map = NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
@ -418,8 +413,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
(igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
lun_unmap = NaElement.create_node_with_children(
'lun-unmap',
**{'path': path,
'initiator-group': igroup_name})
**{'path': path, 'initiator-group': igroup_name})
try:
self.client.invoke_successfully(lun_unmap, True)
except NaApiError as e:
@ -444,6 +438,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
Creates igroup if not found.
"""
igroups = self._get_igroup_by_initiator(initiator=initiator)
igroup_name = None
for igroup in igroups:
@ -477,8 +472,8 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
'initiator-group-type': igroup_type,
'os-type': os_type})
self.client.invoke_successfully(igroup_create, True)
def _add_igroup_initiator(self, igroup, initiator):
@ -486,7 +481,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
igroup_add = NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
'initiator': initiator})
self.client.invoke_successfully(igroup_add, True)
def _get_qos_type(self, volume):
@ -543,6 +538,7 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
@ -556,6 +552,8 @@ class NetAppDirectISCSIDriver(driver.ISCSIDriver):
class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp C-mode iSCSI volume driver."""
DEFAULT_VS = 'openstack'
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
@ -563,6 +561,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
def _do_custom_setup(self):
"""Does custom setup for ontap cluster."""
self.vserver = self.configuration.netapp_vserver
self.vserver = self.vserver if self.vserver else self.DEFAULT_VS
# We set vserver in client permanently.
# To use tunneling enable_tunneling while invoking api
self.client.set_vserver(self.vserver)
@ -570,62 +569,49 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
self.client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
self.ssc_vols = None
self.stale_vols = set()
ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
tag = None
while True:
vol_request = self._create_avl_vol_request(self.vserver, tag)
res = self.client.invoke_successfully(vol_request)
tag = res.get_child_content('next-tag')
attr_list = res.get_child_by_name('attributes-list')
vols = attr_list.get_children()
for vol in vols:
vol_space = vol.get_child_by_name('volume-space-attributes')
avl_size = vol_space.get_child_content('size-available')
if float(avl_size) >= float(size):
avl_vol = dict()
vol_id = vol.get_child_by_name('volume-id-attributes')
avl_vol['name'] = vol_id.get_child_content('name')
avl_vol['vserver'] = vol_id.get_child_content(
'owning-vserver-name')
avl_vol['size-available'] = avl_size
return avl_vol
if tag is None:
break
return None
def _create_lun_on_eligible_vol(self, name, size, metadata,
extra_specs=None):
"""Creates an actual lun on filer."""
req_size = float(size) *\
float(self.configuration.netapp_size_multiplier)
volumes = self._get_avl_volumes(req_size, extra_specs)
if not volumes:
msg = _('Failed to get vol with required'
' size and extra specs for volume: %s')
raise exception.VolumeBackendAPIException(data=msg % name)
for volume in volumes:
try:
path = '/vol/%s/%s' % (volume.id['name'], name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': size,
'ostype': metadata['OsType']})
self.client.invoke_successfully(lun_create, True)
metadata['Path'] = '/vol/%s/%s' % (volume.id['name'], name)
metadata['Volume'] = volume.id['name']
metadata['Qtree'] = None
return
except NaApiError:
LOG.warn(_("Error provisioning vol %(name)s on %(volume)s")
% {'name': name, 'volume': volume.id['name']})
finally:
self._update_stale_vols(volume=volume)
def _create_avl_vol_request(self, vserver, tag=None):
vol_get_iter = NaElement('volume-get-iter')
vol_get_iter.add_new_child('max-records', '100')
if tag:
vol_get_iter.add_new_child('tag', tag, True)
query = NaElement('query')
vol_get_iter.add_child_elem(query)
vol_attrs = NaElement('volume-attributes')
query.add_child_elem(vol_attrs)
if vserver:
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{"owning-vserver-name": vserver})
vol_attrs.add_node_with_children(
'volume-state-attributes',
**{"is-vserver-root": "false", "state": "online"})
desired_attrs = NaElement('desired-attributes')
vol_get_iter.add_child_elem(desired_attrs)
des_vol_attrs = NaElement('volume-attributes')
desired_attrs.add_child_elem(des_vol_attrs)
des_vol_attrs.add_node_with_children(
'volume-id-attributes',
**{"name": None, "owning-vserver-name": None})
des_vol_attrs.add_node_with_children(
'volume-space-attributes',
**{"size-available": None})
des_vol_attrs.add_node_with_children('volume-state-attributes',
**{"is-cluster-volume": None,
"is-vserver-root": None,
"state": None})
return vol_get_iter
def _get_avl_volumes(self, size, extra_specs=None):
"""Get the available volume by size, extra_specs."""
result = []
volumes = ssc_utils.get_volumes_for_specs(
self.ssc_vols, extra_specs)
if volumes:
sorted_vols = sorted(volumes, reverse=True)
for vol in sorted_vols:
if int(vol.space['size_avl_bytes']) >= int(size):
result.append(vol)
return result
def _get_target_details(self):
"""Gets the target portal details."""
@ -667,6 +653,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
Gets the luns from cluster with vserver.
"""
tag = None
while True:
api = NaElement('lun-get-iter')
@ -783,8 +770,7 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
'destination-path': new_name, 'space-reserve': space_reserved})
self.client.invoke_successfully(clone_create, True)
LOG.debug(_("Cloned LUN with new name %s") % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
@ -798,6 +784,8 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
new_name,
lun[0].get_child_content('size'),
clone_meta))
self._update_stale_vols(
volume=ssc_utils.NetAppVolume(volume, self.vserver))
def _get_lun_by_args(self, **args):
"""Retrives lun with specified args."""
@ -844,13 +832,62 @@ class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._update_cluster_vol_stats(data)
provide_ems(self, self.client, data, netapp_backend)
self._stats = data
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
data['netapp_unmirrored'] = 'true'\
if len(self.ssc_vols['all']) > len(self.ssc_vols['mirrored'])\
else 'false'
data['netapp_dedup'] = 'true'\
if self.ssc_vols['dedup'] else 'false'
data['netapp_nodedupe'] = 'true'\
if len(self.ssc_vols['all']) > len(self.ssc_vols['dedup'])\
else 'false'
data['netapp_compression'] = 'true'\
if self.ssc_vols['compression'] else False
data['netapp_nocompression'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['compression'])\
else 'false'
data['netapp_thin_provisioned'] = 'true'\
if self.ssc_vols['thin'] else 'false'
data['netapp_thick_provisioned'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['thin']) else 'false'
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
int(vol_max.space['size_total_bytes']) / units.GiB
data['free_capacity_gb'] =\
int(vol_max.space['size_avl_bytes']) / units.GiB
else:
LOG.warn(_("Cluster ssc is not updated. No volume stats found."))
ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver)
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy if reset."""
if volume:
self.stale_vols.add(volume)
if reset:
set_copy = copy.deepcopy(self.stale_vols)
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
self.ssc_vols = vols
class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
@ -871,6 +908,25 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
self.client.set_api_version(major, minor)
self.client.set_vfiler(self.vfiler)
def _create_lun_on_eligible_vol(self, name, size, metadata,
extra_specs=None):
"""Creates an actual lun on filer."""
req_size = float(size) *\
float(self.configuration.netapp_size_multiplier)
volume = self._get_avl_volume_by_size(req_size)
if not volume:
msg = _('Failed to get vol with required size for volume: %s')
raise exception.VolumeBackendAPIException(data=msg % name)
path = '/vol/%s/%s' % (volume['name'], name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': size, 'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
self.client.invoke_successfully(lun_create, True)
metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
metadata['Volume'] = volume['name']
metadata['Qtree'] = None
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
vol_request = NaElement('volume-list-info')
@ -1028,7 +1084,7 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': path, 'destination-path': clone_path,
'no-snap': 'true'})
'no-snap': 'true'})
result = self.client.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
@ -1116,7 +1172,6 @@ class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
def _update_volume_stats(self):
"""Retrieve status info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'

View File

@ -27,12 +27,17 @@ from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import ssc_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume.drivers import nfs
@ -168,6 +173,7 @@ class NetAppNFSDriver(nfs.NfsDriver):
@param volume_name string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
"""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
@ -259,6 +265,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self, client):
"""Do the customized set up on client for cluster mode."""
@ -266,6 +273,17 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
client.set_api_version(major, minor)
self.vserver = self.configuration.netapp_vserver
self.ssc_vols = None
self.stale_vols = set()
if self.vserver:
self.ssc_enabled = True
LOG.warn(_("Shares on vserver %s will only"
" be used for provisioning.") % (self.vserver))
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
else:
self.ssc_enabled = False
LOG.warn(_("No vserver set in config. SSC will be disabled."))
def _invoke_successfully(self, na_element, vserver=None):
"""Invoke the api for successful result.
@ -274,6 +292,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
else Cluster api.
:param vserver: vserver name.
"""
self._is_naelement(na_element)
server = copy.copy(self._client)
if vserver:
@ -283,6 +302,59 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
result = server.invoke_successfully(na_element, True)
return result
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
self._ensure_shares_mounted()
extra_specs = get_volume_extra_specs(volume)
eligible = self._find_containers(volume['size'], extra_specs)
if not eligible:
raise exception.NfsNoSuitableShareFound(
volume_size=volume['size'])
for sh in eligible:
try:
if self.ssc_enabled:
volume['provider_location'] = sh.export['path']
else:
volume['provider_location'] = sh
LOG.info(_('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
except Exception:
LOG.warn(_("Exception creating vol %(name)s"
" on share %(share)s")
% {'name': volume['name'],
'share': volume['provider_location']})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(volume=sh)
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _find_containers(self, size, extra_specs):
"""Finds suitable containers for given params."""
containers = []
if self.ssc_enabled:
vols =\
ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs)
sort_vols = sorted(vols, reverse=True)
for vol in sort_vols:
if self._is_share_eligible(vol.export['path'], size):
containers.append(vol)
else:
for sh in self._mounted_shares:
if self._is_share_eligible(sh, size):
total_size, total_available, total_allocated = \
self._get_capacity_info(sh)
containers.append((sh, total_available))
containers = [a for a, b in
sorted(containers, key=lambda x: x[1], reverse=True)]
return containers
def _clone_volume(self, volume_name, clone_name, volume_id):
"""Clones mounted volume on NetApp Cluster."""
host_ip = self._get_host_ip(volume_id)
@ -319,7 +391,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{'junction-path': junction,
'owning-vserver-name': vserver})
'owning-vserver-name': vserver})
des_attrs = NaElement('desired-attributes')
des_attrs.add_node_with_children('volume-attributes',
**{'volume-id-attributes': None})
@ -346,7 +418,7 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': src_path,
'destination-path': dest_path})
'destination-path': dest_path})
self._invoke_successfully(clone_create, vserver)
def _update_volume_stats(self):
@ -358,8 +430,73 @@ class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
netapp_backend)
self._stats["vendor_name"] = 'NetApp'
self._stats["driver_version"] = '1.0'
self._update_cluster_vol_stats(self._stats)
provide_ems(self, self._client, self._stats, netapp_backend)
def _update_cluster_vol_stats(self, data):
"""Updates vol stats with cluster config."""
if self.ssc_vols:
data['netapp_mirrored'] = 'true'\
if self.ssc_vols['mirrored'] else 'false'
data['netapp_unmirrored'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['mirrored']) else 'false'
data['netapp_dedup'] = 'true'\
if self.ssc_vols['dedup'] else 'false'
data['netapp_nodedupe'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['dedup']) else 'false'
data['netapp_compression'] = 'true'\
if self.ssc_vols['compression'] else False
data['netapp_nocompression'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['compression']) else 'false'
data['netapp_thin_provisioned'] = 'true'\
if self.ssc_vols['thin'] else 'false'
data['netapp_thick_provisioned'] = 'true'\
if len(self.ssc_vols['all']) >\
len(self.ssc_vols['thin']) else 'false'
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
int(vol_max.space['size_total_bytes']) / units.GiB
data['free_capacity_gb'] =\
int(vol_max.space['size_avl_bytes']) / units.GiB
elif self.ssc_enabled:
LOG.warn(_("No cluster ssc stats found."
" Wait for next volume stats update."))
if self.ssc_enabled:
ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver)
else:
LOG.warn(_("No vserver set in config. SSC will be disabled."))
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warn(_("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
for vol in vols['all']:
for sh in self._mounted_shares:
junction = sh.split(':')[1]
if junction == vol.id['junction_path']:
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
"""Executes commands related to volumes on 7 mode."""
@ -379,6 +516,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
else filer api.
:param vfiler: vfiler name.
"""
self._is_naelement(na_element)
server = copy.copy(self._client)
if vfiler:
@ -419,14 +557,15 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
:returns: clone-id
"""
msg_fmt = {'src_path': src_path, 'dest_path': dest_path}
LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""")
% msg_fmt)
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true'})
'destination-path': dest_path,
'no-snap': 'true'})
result = self._invoke_successfully(clone_start, None)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
@ -441,7 +580,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self._invoke_successfully(clone_ls_st, None)
@ -468,6 +607,7 @@ class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
Invoke this in case of failed clone.
"""
clone_clear = NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})

View File

@ -68,7 +68,7 @@ netapp_provisioning_opts = [
netapp_cluster_opts = [
cfg.StrOpt('netapp_vserver',
default='openstack',
default=None,
help='Cluster vserver to use for provisioning'), ]
netapp_7mode_opts = [

View File

@ -0,0 +1,545 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Storage service catalog utility functions and classes for NetApp systems.
"""
import copy
from threading import Timer
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
class NetAppVolume(object):
"""Represents a NetApp volume.
Present attributes
id - name, vserver, junction_path, type
aggr - name, raid_type, ha_policy, disk_type
sis - dedup, compression
state - status, vserver_root, cluster_volume,
inconsistent, invalid, junction_active
qos - qos_policy_group
space - space-guarantee-enabled, space-guarantee,
thin_provisioned, size_avl_bytes, size_total_bytes
mirror - mirrored i.e. dp mirror
export - path
"""
def __init__(self, name, vserver=None):
self.id = {}
self.aggr = {}
self.sis = {}
self.state = {}
self.qos = {}
self.space = {}
self.mirror = {}
self.export = {}
self.id['name'] = name
self.id['vserver'] = vserver
def __eq__(self, other):
"""Checks for equality."""
if (self.id['name'] == other.id['name'] and
self.id['vserver'] == other.id['vserver']):
return True
def __hash__(self):
"""Computes hash for the object."""
return hash(self.id['name'])
def __cmp__(self, other):
"""Implements comparison logic for volumes."""
self_size_avl = self.space.get('size_avl_bytes')
other_size_avl = other.space.get('size_avl_bytes')
if self_size_avl is None and other_size_avl is not None:
return -1
elif self_size_avl is not None and other_size_avl is None:
return 1
elif self_size_avl is None and other_size_avl is None:
return 0
elif int(self_size_avl) < int(other_size_avl):
return -1
elif int(self_size_avl) > int(other_size_avl):
return 1
else:
return 0
def __str__(self):
"""Returns human readable form for object."""
vol_str = "NetApp Volume id: %s, aggr: %s,"\
" space: %s, sis: %s, state: %s, qos: %s"\
% (self.id, self.aggr, self.space, self.sis, self.state, self.qos)
return vol_str
def get_cluster_vols_with_ssc(na_server, vserver, volume=None):
"""Gets ssc vols for cluster vserver."""
volumes = query_cluster_vols_for_ssc(na_server, vserver, volume)
sis_vols = get_sis_vol_dict(na_server, vserver, volume)
aggrs = {}
for vol in volumes:
aggr_name = vol.aggr['name']
if aggr_name:
if aggr_name in aggrs:
aggr_attrs = aggrs[aggr_name]
else:
aggr_attrs = query_aggr_options(na_server, aggr_name)
eff_disk_type = query_aggr_storage_disk(na_server, aggr_name)
aggr_attrs['disk_type'] = eff_disk_type
aggrs[aggr_name] = aggr_attrs
vol.aggr['raid_type'] = aggr_attrs.get('raid_type')
vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy')
vol.aggr['disk_type'] = aggr_attrs.get('disk_type')
if vol.id['name'] in sis_vols:
vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup']
vol.sis['compression'] = sis_vols[vol.id['name']]['compression']
else:
vol.sis['dedup'] = False
vol.sis['compression'] = False
if (vol.space['space-guarantee-enabled'] and
(vol.space['space-guarantee'] == 'file' or
vol.space['space-guarantee'] == 'volume')):
vol.space['thin_provisioned'] = False
else:
vol.space['thin_provisioned'] = True
return volumes
def query_cluster_vols_for_ssc(na_server, vserver, volume=None):
"""Queries cluster volumes for ssc."""
query = {'volume-attributes': None}
volume_id = {'volume-id-attributes': {'owning-vserver-name': vserver}}
if volume:
volume_id['volume-id-attributes']['name'] = volume
query['volume-attributes'] = volume_id
des_attr = {'volume-attributes':
['volume-id-attributes',
'volume-mirror-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
result = na_utils.invoke_api(na_server, api_name='volume-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
vols = set()
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res['attributes-list']
vol_attrs = attr_list.get_children()
vols_found = create_vol_list(vol_attrs)
vols.update(vols_found)
return vols
def create_vol_list(vol_attrs):
"""Creates vol list with features from attr list."""
vols = set()
for v in vol_attrs:
try:
# name and vserver are mandatory
# Absence will skip by giving KeyError.
name = v['volume-id-attributes']['name']
vserver = v['volume-id-attributes']['owning-vserver-name']
vol = NetAppVolume(name, vserver)
vol.id['type'] =\
v['volume-id-attributes'].get_child_content('type')
if vol.id['type'] == "tmp":
continue
vol.id['junction_path'] =\
v['volume-id-attributes'].get_child_content('junction-path')
# state attributes mandatory.
vol.state['vserver_root'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-vserver-root'))
if vol.state['vserver_root']:
continue
vol.state['status'] =\
v['volume-state-attributes'].get_child_content('state')
vol.state['inconsistent'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-inconsistent'))
vol.state['invalid'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-invalid'))
vol.state['junction_active'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-junction-active'))
vol.state['cluster_volume'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-cluster-volume'))
if (vol.state['status'] != 'online' or
vol.state['inconsistent'] or vol.state['invalid']):
# offline, invalid and inconsistent volumes are not usable
continue
# aggr attributes mandatory.
vol.aggr['name'] =\
v['volume-id-attributes']['containing-aggregate-name']
# space attributes mandatory.
vol.space['size_avl_bytes'] =\
v['volume-space-attributes']['size-available']
vol.space['size_total_bytes'] =\
v['volume-space-attributes']['size-total']
vol.space['space-guarantee-enabled'] =\
na_utils.to_bool(
v['volume-space-attributes'].get_child_content(
'is-space-guarantee-enabled'))
vol.space['space-guarantee'] =\
v['volume-space-attributes'].get_child_content(
'space-guarantee')
# mirror attributes optional.
if v.get_child_by_name('volume-mirror-attributes'):
vol.mirror['mirrored'] =\
na_utils.to_bool(
v['volume-mirror-attributes'].get_child_content(
'is-data-protection-mirror'))
else:
vol.mirror['mirrored'] = False
# qos attributes optional.
if v.get_child_by_name('volume-qos-attributes'):
vol.qos['qos_policy_group'] =\
v['volume-qos-attributes'].get_child_content(
'policy-group-name')
else:
vol.qos['qos_policy_group'] = None
vols.add(vol)
except KeyError as e:
LOG.debug(_('Unexpected error while creating'
' ssc vol list. Message - %s') % (e.message))
continue
return vols
def query_aggr_options(na_server, aggr_name):
"""Queries cluster aggr for attributes.
Currently queries for raid and ha-policy.
"""
add_elems = {'aggregate': aggr_name}
result = na_utils.invoke_api(na_server,
api_name='aggr-options-list-info',
api_family='cm', query=None,
des_result=None,
additional_elems=add_elems,
is_iter=False)
attrs = {}
for res in result:
options = res.get_child_by_name('options')
if options:
op_list = options.get_children()
for op in op_list:
if op.get_child_content('name') == 'ha_policy':
attrs['ha_policy'] = op.get_child_content('value')
if op.get_child_content('name') == 'raidtype':
attrs['raid_type'] = op.get_child_content('value')
return attrs
def get_sis_vol_dict(na_server, vserver, volume=None):
"""Queries sis for volumes.
If volume is present sis is queried for it.
Records dedup and compression enabled.
"""
sis_vols = {}
query_attr = {'vserver': vserver}
if volume:
vol_path = '/vol/%s' % (volume)
query_attr['path'] = vol_path
query = {'sis-status-info': query_attr}
result = na_utils.invoke_api(na_server,
api_name='sis-get-iter',
api_family='cm',
query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
sis_status = attr_list.get_children()
for sis in sis_status:
path = sis.get_child_content('path')
if not path:
continue
(vol, __, ___) = path.rpartition('/')
if not vol:
continue
v_sis = {}
v_sis['compression'] = na_utils.to_bool(
sis.get_child_content('is-compression-enabled'))
v_sis['dedup'] = na_utils.to_bool(
sis.get_child_content('state'))
sis_vols[vol] = v_sis
return sis_vols
def query_aggr_storage_disk(na_server, aggr):
"""Queries for storage disks assosiated to an aggregate."""
query = {'storage-disk-info': {'disk-raid-info':
{'disk-aggregate-info':
{'aggregate-name': aggr}}}}
des_attr = {'storage-disk-info':
{'disk-raid-info': ['effective-disk-type']}}
result = na_utils.invoke_api(na_server,
api_name='storage-disk-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
storage_disks = attr_list.get_children()
for disk in storage_disks:
raid_info = disk.get_child_by_name('disk-raid-info')
if raid_info:
eff_disk_type =\
raid_info.get_child_content('effective-disk-type')
if eff_disk_type:
return eff_disk_type
else:
continue
return 'unknown'
def get_cluster_ssc(na_server, vserver):
"""Provides cluster volumes with ssc."""
netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver)
mirror_vols = set()
dedup_vols = set()
compress_vols = set()
thin_prov_vols = set()
ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols,
'compression': compress_vols,
'thin': thin_prov_vols, 'all': netapp_volumes}
for vol in netapp_volumes:
if vol.sis['dedup']:
dedup_vols.add(vol)
if vol.sis['compression']:
compress_vols.add(vol)
if vol.mirror['mirrored']:
mirror_vols.add(vol)
if vol.space['thin_provisioned']:
thin_prov_vols.add(vol)
return ssc_map
def refresh_cluster_stale_ssc(*args, **kwargs):
"""Refreshes stale ssc volumes with latest."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = str(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
try:
job_set = na_utils.set_safe_attr(
backend, 'refresh_stale_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror['mirrored']:
vol_set.add(vol)
if k == "dedup" and vol.sis['dedup']:
vol_set.add(vol)
if k == "compression" and vol.sis['compression']:
vol_set.add(vol)
if k == "thin" and vol.space['thin_provisioned']:
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
refresh_stale_ssc()
finally:
na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
def get_cluster_latest_ssc(*args, **kwargs):
"""Updates volumes including ssc."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = str(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
# As this depends on stale job running state
# set flag as soon as job starts to avoid
# job accumulation.
try:
job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_('Running cluster latest ssc job for %(server)s'
' and vserver %(vs)s')
% {'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
def refresh_cluster_ssc(backend, na_server, vserver):
"""Refresh cluster ssc for backend."""
if not isinstance(backend, driver.VolumeDriver):
raise exception.InvalidInput(reason=_("Backend not a VolumeDriver."))
if not isinstance(na_server, api.NaServer):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
LOG.warn(_('ssc job in progress. Returning... '))
return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_newer_than(backend.ssc_run_time, delta_secs))):
t = Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warn(_('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
t = Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
def get_volumes_for_specs(ssc_vols, specs):
"""Shortlists volumes for extra specs provided."""
if specs is None or not isinstance(specs, dict):
return ssc_vols['all']
result = ssc_vols['all']
raid_type = specs.get('netapp:raid_type')
disk_type = specs.get('netapp:disk_type')
qos_policy_group = specs.get('netapp:qos_policy_group')
bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored',
'netapp_dedup', 'netapp_nodedup',
'netapp_compression', 'netapp_nocompression',
'netapp_thin_provisioned', 'netapp_thick_provisioned']
b_specs = {}
for spec in bool_specs_list:
b_specs[spec] = na_utils.to_bool(specs.get(spec))\
if specs.get(spec) else None
def _spec_ineffect(b_specs, spec, opp_spec):
"""If the spec with opposite spec is ineffective."""
if ((b_specs[spec] is None and b_specs[opp_spec] is None)
or (b_specs[spec] == b_specs[opp_spec])):
return True
else:
return False
if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'):
pass
else:
if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False:
result = result & ssc_vols['mirrored']
else:
result = result - ssc_vols['mirrored']
if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'):
pass
else:
if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False:
result = result & ssc_vols['dedup']
else:
result = result - ssc_vols['dedup']
if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'):
pass
else:
if (b_specs['netapp_compression'] or
b_specs['netapp_nocompression'] is False):
result = result & ssc_vols['compression']
else:
result = result - ssc_vols['compression']
if _spec_ineffect(b_specs, 'netapp_thin_provisioned',
'netapp_thick_provisioned'):
pass
else:
if (b_specs['netapp_thin_provisioned'] or
b_specs['netapp_thick_provisioned'] is False):
result = result & ssc_vols['thin']
else:
result = result - ssc_vols['thin']
if raid_type or disk_type or qos_policy_group:
tmp = copy.deepcopy(result)
for vol in tmp:
if raid_type:
vol_raid = vol.aggr['raid_type']
vol_raid = vol_raid.lower() if vol_raid else None
if raid_type.lower() != vol_raid:
result.discard(vol)
if disk_type:
vol_dtype = vol.aggr['disk_type']
vol_dtype = vol_dtype.lower() if vol_dtype else None
if disk_type.lower() != vol_dtype:
result.discard(vol)
if qos_policy_group:
vol_qos = vol.qos['qos_policy_group']
vol_qos = vol_qos.lower() if vol_qos else None
if qos_policy_group.lower() != vol_qos:
result.discard(vol)
return result

View File

@ -25,10 +25,16 @@ NetApp drivers to achieve the desired functionality.
import copy
import socket
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@ -118,3 +124,128 @@ def validate_instantiation(**kwargs):
return
LOG.warn(_("It is not the recommended way to use drivers by NetApp. "
"Please use NetAppDriver to achieve the functionality."))
def invoke_api(na_server, api_name, api_family='cm', query=None,
des_result=None, additional_elems=None,
is_iter=False, records=0, tag=None,
timeout=0, tunnel=None):
"""Invokes any given api call to a NetApp server.
:param na_server: na_server instance
:param api_name: api name string
:param api_family: cm or 7m
:param query: api query as dict
:param des_result: desired result as dict
:param additional_elems: dict other than query and des_result
:param is_iter: is iterator api
:param records: limit for records, 0 for infinite
:param timeout: timeout seconds
:param tunnel: tunnel entity, vserver or vfiler name
"""
record_step = 50
if not (na_server or isinstance(na_server, NaServer)):
msg = _("Requires an NaServer instance.")
raise exception.InvalidInput(data=msg)
server = copy.copy(na_server)
if api_family == 'cm':
server.set_vserver(tunnel)
else:
server.set_vfiler(tunnel)
if timeout > 0:
server.set_timeout(timeout)
iter_records = 0
cond = True
while cond:
na_element = create_api_request(
api_name, query, des_result, additional_elems,
is_iter, record_step, tag)
result = server.invoke_successfully(na_element, True)
if is_iter:
if records > 0:
iter_records = iter_records + record_step
if iter_records >= records:
cond = False
tag_el = result.get_child_by_name('next-tag')
tag = tag_el.get_content() if tag_el else None
if not tag:
cond = False
else:
cond = False
yield result
def create_api_request(api_name, query=None, des_result=None,
additional_elems=None, is_iter=False,
record_step=50, tag=None):
"""Creates a NetApp api request.
:param api_name: api name string
:param query: api query as dict
:param des_result: desired result as dict
:param additional_elems: dict other than query and des_result
:param is_iter: is iterator api
:param record_step: records at a time for iter api
:param tag: next tag for iter api
"""
api_el = NaElement(api_name)
if query:
query_el = NaElement('query')
query_el.translate_struct(query)
api_el.add_child_elem(query_el)
if des_result:
res_el = NaElement('desired-attributes')
res_el.translate_struct(des_result)
api_el.add_child_elem(res_el)
if additional_elems:
api_el.translate_struct(additional_elems)
if is_iter:
api_el.add_new_child('max-records', str(record_step))
if tag:
api_el.add_new_child('tag', tag, True)
return api_el
def to_bool(val):
"""Converts true, yes, y, 1 to True, False otherwise."""
if val:
strg = str(val).lower()
if (strg == 'true' or strg == 'y'
or strg == 'yes' or strg == 'enabled'
or strg == '1'):
return True
else:
return False
else:
return False
@utils.synchronized("safe_set_attr")
def set_safe_attr(instance, attr, val):
"""Sets the attribute in a thread safe manner.
Returns if new val was set on attribute.
If attr already had the value then False.
"""
if not instance or not attr:
return False
old_val = getattr(instance, attr, None)
if val is None and old_val is None:
return False
elif val == old_val:
return False
else:
setattr(instance, attr, val)
return True
def get_volume_extra_specs(volume):
"""Provides extra specs associated with volume."""
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id')
specs = None
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
return specs

View File

@ -414,14 +414,6 @@ class NfsDriver(RemoteFsDriver):
def _find_share(self, volume_size_in_gib):
"""Choose NFS share among available ones for given volume size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'nfs_used_ratio'.
Second validation step: apparent space allocated (differs from actual
space used when using sparse files) and compares the apparent available
space (total_available * nfs_oversub_ratio) to ensure enough space is
available for the new volume.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
@ -434,33 +426,11 @@ class NfsDriver(RemoteFsDriver):
target_share = None
target_share_reserved = 0
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.GiB
for nfs_share in self._mounted_shares:
if not self._is_share_eligible(nfs_share, volume_size_in_gib):
continue
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
# NOTE(morganfainberg): We check the used_ratio first since
# with oversubscription it is possible to not have the actual
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
continue
if apparent_available <= requested_volume_size:
LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
continue
if total_allocated / total_size >= oversub_ratio:
LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
nfs_share)
continue
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = nfs_share
@ -477,14 +447,56 @@ class NfsDriver(RemoteFsDriver):
return target_share
def _is_share_eligible(self, nfs_share, volume_size_in_gib):
"""Verifies NFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'nfs_used_ratio'. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * nfs_oversub_ratio) to ensure enough space is
available for the new volume.
:param nfs_share: nfs share
:param volume_size_in_gib: int size in GB
"""
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.GiB
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
# NOTE(morganfainberg): We check the used_ratio first since
# with oversubscription it is possible to not have the actual
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug(_('%s is above nfs_used_ratio'), nfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug(_('%s reserved space is above nfs_oversub_ratio'),
nfs_share)
return False
return True
def _get_mount_point_for_share(self, nfs_share):
"""Needed by parent class."""
return self._remotefsclient.get_mount_point(nfs_share)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
mount_point = self._get_mount_point_for_share(nfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,

View File

@ -1218,7 +1218,7 @@
#netapp_password=<None>
# Cluster vserver to use for provisioning (string value)
#netapp_vserver=openstack
#netapp_vserver=<None>
# Host name for the storage controller (string value)
#netapp_server_hostname=<None>