HNAS: Add list manageable volume/snapshots

This patch adds the ability to list the manageable volumes and
snapshots to HNAS NFS driver.
Also, adds some missing docstrings.

DocImpact

Change-Id: Ide48b84f22efe409f9493918b27a05cf5782b893
This commit is contained in:
Adriano Rosso 2016-10-11 11:02:51 -03:00
parent 0ef29f11fd
commit fb87dc52bf
5 changed files with 468 additions and 52 deletions

View File

@ -249,6 +249,13 @@ References: \n\
not_a_clone = "\
file-clone-stat: failed to get predecessor snapshot-files: File is not a clone"
file_relatives =\
[' /nfs_cinder/snapshot-lu-1 ',
' /nfs_cinder/snapshot-lu-2 ',
' /nfs_cinder/volume-not-used ',
' /nfs_cinder/snapshot-1 ',
' /nfs_cinder/snapshot-2 ']
class HDSHNASBackendTest(test.TestCase):
@ -811,6 +818,42 @@ class HDSHNASBackendTest(test.TestCase):
self.hnas_backend.create_target('cinder-default', 'fs-cinder',
'pxr6U37LZZJBoMc')
def test_get_cloned_file_relatives(self):
self.mock_object(self.hnas_backend, '_run_cmd', mock.Mock(
side_effect=[(evsfs_list, ''), (file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')]))
out = self.hnas_backend.get_cloned_file_relatives('cinder-lu',
'fs-cinder')
self.assertEqual(file_relatives, out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat-'
'snapshot-file',
'-f', 'fs-cinder',
'00000000004029000d81'
'f26826ffffffffffffff]')
def test_get_cloned_file_relatives_not_clone_except(self):
self.mock_object(self.hnas_backend, '_run_cmd', mock.Mock(
side_effect=[(evsfs_list, ''),
putils.ProcessExecutionError(
stderr='File is not a clone')]))
self.assertRaises(exception.ManageExistingInvalidReference,
self.hnas_backend.get_cloned_file_relatives,
'cinder-lu', 'fs-cinder', True)
def test_get_cloned_file_relatives_not_clone_no_except(self):
self.mock_object(self.hnas_backend, '_run_cmd', mock.Mock(
side_effect=[(evsfs_list, ''), putils.ProcessExecutionError(
stderr='File is not a clone')]))
out = self.hnas_backend.get_cloned_file_relatives('cinder-lu',
'fs-cinder')
self.assertEqual([], out)
def test_check_snapshot_parent_true(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(
@ -823,13 +866,6 @@ class HDSHNASBackendTest(test.TestCase):
'fs-cinder')
self.assertTrue(out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat'
'-snapshot-file', '-f',
'fs-cinder',
'00000000004010000d2011'
'6826ffffffffffffff]')
def test_check_snapshot_parent_false(self):
self.mock_object(self.hnas_backend, '_run_cmd',
@ -843,23 +879,6 @@ class HDSHNASBackendTest(test.TestCase):
'fs-cinder')
self.assertFalse(out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat'
'-snapshot-file', '-f',
'fs-cinder',
'00000000004029000d81f26'
'826ffffffffffffff]')
def test_check_a_not_cloned_file(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(
side_effect=[(evsfs_list, ''),
(not_a_clone, '')]))
self.assertRaises(exception.ManageExistingInvalidReference,
self.hnas_backend.check_snapshot_parent,
'cinder-lu', 'snapshot-name', 'fs-cinder')
def test_get_export_path(self):
export_out = '/export01-husvm'

View File

@ -33,6 +33,7 @@ from cinder.volume.drivers.hitachi import hnas_backend as backend
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume.drivers import nfs as base_nfs
from cinder.volume import utils as vutils
_VOLUME = {'name': 'cinder-volume',
'id': fake.VOLUME_ID,
@ -320,7 +321,7 @@ class HNASNFSDriverTest(test.TestCase):
out = self.driver.get_volume_stats()
self.assertEqual('5.0.0', out['driver_version'])
self.assertEqual('6.0.0', out['driver_version'])
self.assertEqual('Hitachi', out['vendor_name'])
self.assertEqual('NFS', out['storage_protocol'])
@ -432,6 +433,25 @@ class HNASNFSDriverTest(test.TestCase):
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_already_managed(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
expected_size = 1
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
mock.Mock(return_value='/mnt/silver'))
self.mock_object(os.path, 'isfile', mock.Mock(return_value=True))
self.mock_object(utils, 'get_file_size',
mock.Mock(return_value=expected_size))
self.mock_object(vutils, 'check_already_managed_volume',
mock.Mock(return_value=True))
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.driver.manage_existing, self.volume,
existing_vol_ref)
def test_manage_existing_missing_volume_in_backend(self):
self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
@ -610,3 +630,145 @@ class HNASNFSDriverTest(test.TestCase):
check_exit_code=True)
self.driver._get_mount_point_for_share.assert_called_with(
self.snapshot.provider_location)
def test_get_manageable_volumes_not_safe(self):
manageable_vol = [{'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name':
'172.24.49.21:/fs-cinder/volume-1e5177e7-'
'95e5-4a0f-b170-e45f4b469f6a'},
'safe_to_manage': False,
'size': 128}]
rsrc = [self.volume]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
mock.Mock(return_value=path))
vols_exp = [self.volume.name]
self.mock_object(self.driver, '_get_volumes_from_export',
mock.Mock(return_value=vols_exp))
self.mock_object(self.driver, '_get_file_size',
mock.Mock(return_value=self.volume.size))
out = self.driver._get_manageable_resource_info(
rsrc, "volume", None, 1000, 0, ['reference'], ['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with('%s/%s' % (
path, self.volume.name))
self.driver._get_mount_point_for_share(self.volume.provider_location)
self.assertEqual(out, manageable_vol)
def test_get_manageable_volumes(self):
manageable_vol = [{
'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a'},
'safe_to_manage': False,
'size': 128}]
rsrc = [self.volume]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
mock.Mock(return_value=path))
vols_exp = [fake.VOLUME_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
mock.Mock(return_value=vols_exp))
self.mock_object(self.driver, '_get_file_size',
mock.Mock(return_value=self.volume.size))
out = self.driver._get_manageable_resource_info(rsrc, "volume", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with(
'%s/%s' % (path, self.volume.name))
self.driver._get_mount_point_for_share(self.volume.provider_location)
self.assertEqual(out, manageable_vol)
def test_get_manageable_snapshots(self):
manageable_snap = [{
'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab',
'extra_info': None,
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'snapshot-253b2878-ec60-4793-'
'ad19-e65496ec7aab'},
'safe_to_manage': False,
'size': 128,
'source_reference': {'id': '1'}}]
rsrc = [self.snapshot]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
mock.Mock(return_value=path))
vols_exp = [fake.SNAPSHOT_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
mock.Mock(return_value=vols_exp))
self.mock_object(self.driver, '_get_file_size',
mock.Mock(return_value=self.volume.size))
self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives',
mock.Mock(return_value=[' /nfs_cinder/volume-1',
'/nfs_cinder/snapshot2']))
out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_file_size.assert_called_with(
'%s/%s' % (path, self.snapshot.name))
self.driver._get_mount_point_for_share(self.snapshot.provider_location)
self.assertEqual(out, manageable_snap)
def test_get_manageable_snapshots_unknown_origin(self):
manageable_snap = [{
'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab',
'extra_info': 'Could not determine the volume that owns '
'the snapshot',
'reason_not_safe': 'already managed',
'reference': {
'source-name': '172.24.49.21:/fs-cinder/'
'snapshot-253b2878-ec60-4793-'
'ad19-e65496ec7aab'},
'safe_to_manage': False,
'size': 128,
'source_reference': {'id': 'unknown'}}]
rsrc = [self.snapshot]
path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
mock.Mock(return_value=path))
vols_exp = [fake.SNAPSHOT_NAME]
self.mock_object(self.driver, '_get_volumes_from_export',
mock.Mock(return_value=vols_exp))
self.mock_object(self.driver, '_get_file_size',
mock.Mock(return_value=self.volume.size))
self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives',
mock.Mock(return_value=[' /nfs_cinder/volume-1',
' /nfs_cinder/volume-2',
'/nfs_cinder/snapshot2']))
out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None,
1000, 0, ['reference'],
['desc'])
self.driver._get_volumes_from_export.assert_called_with(
'172.24.49.21:/fs-cinder')
self.driver._get_mount_point_for_share(self.snapshot.provider_location)
self.driver._get_file_size.assert_called_with('%s/%s' % (
path, self.snapshot.name))
self.assertEqual(out, manageable_snap)

View File

@ -835,15 +835,20 @@ class HNASSSHBackend(object):
LOG.debug("create_target: alias: %(alias)s fs_label: %(fs_label)s",
{'alias': tgt_alias, 'fs_label': fs_label})
def _get_file_handler(self, volume_path, _evs_id, fs_label):
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'file-clone-stat', '-f', fs_label,
volume_path)
def _get_file_handler(self, volume_path, _evs_id, fs_label,
raise_except):
if "File is not a clone" in out:
msg = (_("%s is not a clone!"), volume_path)
raise exception.ManageExistingInvalidReference(
existing_ref=volume_path, reason=msg)
try:
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'file-clone-stat', '-f', fs_label,
volume_path)
except putils.ProcessExecutionError as e:
if 'File is not a clone' in e.stderr and raise_except:
msg = (_("%s is not a clone!") % volume_path)
raise exception.ManageExistingInvalidReference(
existing_ref=volume_path, reason=msg)
else:
return
lines = out.split('\n')
filehandle_list = []
@ -858,25 +863,57 @@ class HNASSSHBackend(object):
return filehandle_list
def check_snapshot_parent(self, volume_path, snap_name, fs_label):
def get_cloned_file_relatives(self, file_path, fs_label,
raise_except=False):
"""Gets the files related to a clone
:param file_path: path of the cloned file
:param fs_label: filesystem of the cloned file
:param raise_except: If True exception will be raised for files that
aren't clones. If False, only an error message is logged.
:returns: list with names of the related files
"""
relatives = []
_evs_id = self.get_evs(fs_label)
file_handler_list = self._get_file_handler(volume_path, _evs_id,
fs_label)
file_handler_list = self._get_file_handler(file_path, _evs_id,
fs_label, raise_except)
for file_handler in file_handler_list:
out, err = self._run_cmd("console-context", "--evs", _evs_id,
'file-clone-stat-snapshot-file',
'-f', fs_label, file_handler)
if file_handler_list:
for file_handler in file_handler_list:
out, err = self._run_cmd('console-context', '--evs', _evs_id,
'file-clone-stat-snapshot-file', '-f',
fs_label, file_handler)
lines = out.split('\n')
results = out.split('\n')
for line in lines:
if snap_name in line:
LOG.debug("Snapshot %(snap)s found in children list from "
"%(vol)s!", {'snap': snap_name,
'vol': volume_path})
return True
for value in results:
if 'Clone:' in value and file_path not in value:
relative = value.split(':')[1]
relatives.append(relative)
else:
LOG.debug("File %(path)s is not a clone.", {
'path': file_path})
return relatives
def check_snapshot_parent(self, volume_path, snap_name, fs_label):
"""Check if a volume is the snapshot source
:param volume_path: path of the volume
:param snap_name: name of the snapshot
:param fs_label: filesystem label
:return: True if the volume is the snapshot's source or False otherwise
"""
lines = self.get_cloned_file_relatives(volume_path, fs_label, True)
for line in lines:
if snap_name in line:
LOG.debug("Snapshot %(snap)s found in children list from "
"%(vol)s!", {'snap': snap_name,
'vol': volume_path})
return True
LOG.debug("Snapshot %(snap)s was not found in children list from "
"%(vol)s, probably it is not the parent!",
@ -884,6 +921,12 @@ class HNASSSHBackend(object):
return False
def get_export_path(self, export, fs_label):
"""Gets the path of an export on HNAS
:param export: the export's name
:param fs_label: the filesystem name
:returns: string of the export's path
"""
evs_id = self.get_evs(fs_label)
out, err = self._run_cmd("console-context", "--evs", evs_id,
'nfs-export', 'list', export)

View File

@ -28,7 +28,7 @@ from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import interface
from cinder import utils as cutils
@ -38,7 +38,7 @@ from cinder.volume.drivers import nfs
from cinder.volume import utils
HNAS_NFS_VERSION = '5.0.0'
HNAS_NFS_VERSION = '6.0.0'
LOG = logging.getLogger(__name__)
@ -81,6 +81,8 @@ class HNASNFSDriver(nfs.NfsDriver):
Deprecated XML config file
Added support to manage/unmanage snapshots features
Fixed driver stats reporting
Version 6.0.0: Deprecated hnas_svcX_vol_type configuration
Added list-manageable volumes/snapshots support
"""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Hitachi_HNAS_CI"
@ -531,6 +533,10 @@ class HNASNFSDriver(nfs.NfsDriver):
{'vol': volume.id,
'ref': existing_vol_ref['source-name']})
vol_id = utils.extract_id_from_volume_name(vol_name)
if utils.check_already_managed_volume(vol_id):
raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
self._check_pool_and_share(volume, nfs_share)
if vol_name == volume.name:
@ -644,6 +650,11 @@ class HNASNFSDriver(nfs.NfsDriver):
LOG.exception(_LE("The NFS Volume %(cr)s does not exist."),
{'cr': new_path})
def _get_file_size(self, file_path):
file_size = float(cutils.get_file_size(file_path)) / units.Gi
# Round up to next Gb
return int(math.ceil(file_size))
def _manage_existing_get_size(self, existing_ref):
# Attempt to find NFS share, NFS mount, and path from vol_ref.
(nfs_share, nfs_mount, path
@ -654,9 +665,7 @@ class HNASNFSDriver(nfs.NfsDriver):
{'ref': existing_ref['source-name']})
file_path = os.path.join(nfs_mount, path)
file_size = float(cutils.get_file_size(file_path)) / units.Gi
# Round up to next Gb
size = int(math.ceil(file_size))
size = self._get_file_size(file_path)
except (OSError, ValueError):
exception_message = (_("Failed to manage existing volume/snapshot "
"%(name)s, because of error in getting "
@ -683,7 +692,15 @@ class HNASNFSDriver(nfs.NfsDriver):
return self.backend.check_snapshot_parent(volume_path, old_snap_name,
fs_label)
@cutils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
:param snapshot: Cinder volume snapshot to manage
:param existing_ref: Driver-specific information used to identify a
volume snapshot
"""
# Attempt to find NFS share, NFS mount, and volume path from ref.
(nfs_share, nfs_mount, src_snapshot_name
) = self._get_share_mount_and_vol_from_vol_ref(existing_ref)
@ -729,10 +746,19 @@ class HNASNFSDriver(nfs.NfsDriver):
raise exception.VolumeBackendAPIException(data=msg)
return {'provider_location': nfs_share}
@cutils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
return self._manage_existing_get_size(existing_ref)
@cutils.trace
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
:param snapshot: Cinder volume snapshot to unmanage
"""
path = self._get_mount_point_for_share(snapshot.provider_location)
new_name = "unmanage-" + snapshot.name
@ -751,3 +777,165 @@ class HNASNFSDriver(nfs.NfsDriver):
except (OSError, ValueError):
LOG.exception(_LE("The NFS snapshot %(old)s does not exist."),
{'old': old_path})
def _get_volumes_from_export(self, export_path):
mnt_point = self._get_mount_point_for_share(export_path)
vols = self._execute("ls", mnt_point, run_as_root=False,
check_exit_code=True)
vols = vols[0].split('\n')
if '' in vols:
vols.remove('')
return list(vols)
def _get_snapshot_origin(self, snap_path, fs_label):
relatives = self.backend.get_cloned_file_relatives(snap_path, fs_label)
origin = []
if not relatives:
return
elif len(relatives) > 1:
for relative in relatives:
if 'snapshot' not in relative:
origin.append(relative)
else:
origin.append(relatives[0])
return origin
def _get_manageable_resource_info(self, cinder_resources, resource_type,
marker, limit, offset, sort_keys,
sort_dirs):
"""Gets the resources on the backend available for management by Cinder.
Receives the parameters from "get_manageable_volumes" and
"get_manageable_snapshots" and gets the available resources
:param cinder_resources: A list of resources in this host that Cinder
currently manages
:param resource_type: If it's a volume or a snapshot
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys
are 'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
:returns: list of dictionaries, each specifying a volume or snapshot
(resource) in the host, with the following keys:
- reference (dictionary): The reference for a resource,
which can be passed to "manage_existing_snapshot".
- size (int): The size of the resource according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this resource is
safe to manage according to the storage backend.
- reason_not_safe (string): If safe_to_manage is False,
the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the
user
- source_reference (string): Similar to "reference", but for the
snapshot's source volume.
"""
entries = []
exports = {}
bend_rsrc = {}
cinder_ids = [resource.id for resource in cinder_resources]
for service in self.config['services']:
exp_path = self.config['services'][service]['hdp']
exports[exp_path] = (
self.config['services'][service]['export']['fs'])
for exp in exports.keys():
# bend_rsrc has all the resources in the specified exports
# volumes {u'172.24.54.39:/Export-Cinder':
# ['volume-325e7cdc-8f65-40a8-be9a-6172c12c9394',
# ' snapshot-1bfb6f0d-9497-4c12-a052-5426a76cacdc','']}
bend_rsrc[exp] = self._get_volumes_from_export(exp)
mnt_point = self._get_mount_point_for_share(exp)
for resource in bend_rsrc[exp]:
# Ignoring resources of unwanted types
if ((resource_type == 'volume' and 'snapshot' in resource) or
(resource_type == 'snapshot' and
'volume' in resource)):
continue
path = '%s/%s' % (exp, resource)
mnt_path = '%s/%s' % (mnt_point, resource)
size = self._get_file_size(mnt_path)
rsrc_inf = {'reference': {'source-name': path},
'size': size, 'cinder_id': None,
'extra_info': None}
if resource_type == 'volume':
potential_id = utils.extract_id_from_volume_name(resource)
else:
potential_id = utils.extract_id_from_snapshot_name(
resource)
# When a resource is already managed by cinder, it's not
# recommended to manage it again. So we set safe_to_manage =
# False. Otherwise, it is set safe_to_manage = True.
if potential_id in cinder_ids:
rsrc_inf['safe_to_manage'] = False
rsrc_inf['reason_not_safe'] = 'already managed'
rsrc_inf['cinder_id'] = potential_id
else:
rsrc_inf['safe_to_manage'] = True
rsrc_inf['reason_not_safe'] = None
# If it's a snapshot, we try to get its source volume. However,
# this search is not reliable in some cases. So, if it's not
# possible to return a precise result, we return unknown as
# source-reference, throw a warning message and fill the
# extra-info.
if resource_type == 'snapshot':
path = path.split(':')[1]
origin = self._get_snapshot_origin(path, exports[exp])
if not origin:
# if origin is empty, the file is not a clone
continue
elif len(origin) == 1:
origin = origin[0].split('/')[2]
origin = utils.extract_id_from_volume_name(origin)
rsrc_inf['source_reference'] = {'id': origin}
else:
LOG.warning(_LW("Could not determine the volume that "
"owns the snapshot %(snap)s"),
{'snap': resource})
rsrc_inf['source_reference'] = {'id': 'unknown'}
rsrc_inf['extra_info'] = ('Could not determine the '
'volume that owns the '
'snapshot')
entries.append(rsrc_inf)
return utils.paginate_entries_list(entries, marker, limit, offset,
sort_keys, sort_dirs)
@cutils.trace
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder."""
return self._get_manageable_resource_info(cinder_volumes, 'volume',
marker, limit, offset,
sort_keys, sort_dirs)
@cutils.trace
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
"""List snapshots on the backend available for management by Cinder."""
return self._get_manageable_resource_info(cinder_snapshots, 'snapshot',
marker, limit, offset,
sort_keys, sort_dirs)

View File

@ -0,0 +1,4 @@
---
features:
- Added the ability to list manageable volumes and snapshots to HNAS NFS
driver.