SMBFS: manageable volumes

This change allows the SMBFS driver to claim (manage) vhd/x images
present on pre-configured shares.

Note that images having backing (parent) files will be rejected.
Managing snapshots will not be supported either.

The images may reside in subdirectories (and get moved afterwards),
which may be useful when having a folder dedicated to images that
are to be imported in Cinder.

All the logic is included in the new "RemoteFSManageableVolumesMixin"
class so that any related driver may inherit it. For the record,
this works out of the box for the NFS driver (special care has been
taken in this sense).

Implements: blueprint remotefs-manage-unmanage-volume
Change-Id: I54695655e563d84e4fb1b76c42f0127c5fb909f7
This commit is contained in:
Lucian Petrut 2017-10-06 14:31:21 +03:00
parent 5d0816b0d8
commit ed945da6bf
6 changed files with 568 additions and 10 deletions

View File

@ -1001,6 +1001,10 @@ class RemoteFSNoSuitableShareFound(RemoteFSException):
message = _("There is no share which can host %(volume_size)sG") message = _("There is no share which can host %(volume_size)sG")
class RemoteFSInvalidBackingFile(VolumeDriverException):
message = _("File %(path)s has invalid backing file %(backing_file)s.")
# NFS driver # NFS driver
class NfsException(RemoteFSException): class NfsException(RemoteFSException):
message = _("Unknown NFS exception") message = _("Unknown NFS exception")

View File

@ -15,6 +15,7 @@
import collections import collections
import copy import copy
import os import os
import re
import ddt import ddt
import mock import mock
@ -27,6 +28,7 @@ from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume from cinder.tests.unit import fake_volume
from cinder import utils from cinder import utils
from cinder.volume.drivers import remotefs from cinder.volume.drivers import remotefs
from cinder.volume import utils as volume_utils
@ddt.ddt @ddt.ddt
@ -425,7 +427,7 @@ class RemoteFsSnapDriverTestCase(test.TestCase):
expected_basename_calls.append(mock.call(backing_file)) expected_basename_calls.append(mock.call(backing_file))
mock_basename.assert_has_calls(expected_basename_calls) mock_basename.assert_has_calls(expected_basename_calls)
else: else:
self.assertRaises(exception.RemoteFSException, self.assertRaises(exception.RemoteFSInvalidBackingFile,
self._driver._qemu_img_info_base, self._driver._qemu_img_info_base,
mock.sentinel.image_path, mock.sentinel.image_path,
fake_vol_name, basedir) fake_vol_name, basedir)
@ -797,3 +799,332 @@ class RevertToSnapshotMixinTestCase(test.TestCase):
self._fake_snapshot.volume) self._fake_snapshot.volume)
mock_read_info_file.assert_called_once_with( mock_read_info_file.assert_called_once_with(
mock_local_path_vol_info.return_value) mock_local_path_vol_info.return_value)
@ddt.ddt
class RemoteFSManageableVolumesTestCase(test.TestCase):
def setUp(self):
super(RemoteFSManageableVolumesTestCase, self).setUp()
# We'll instantiate this directly for now.
self._driver = remotefs.RemoteFSManageableVolumesMixin()
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_mount_point_for_share', create=True)
@mock.patch.object(os.path, 'isfile')
def test_get_manageable_vol_location_invalid(self, mock_is_file,
mock_get_mount_point):
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver._get_manageable_vol_location,
{})
self._driver._mounted_shares = []
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver._get_manageable_vol_location,
{'source-name': '//hots/share/img'})
self._driver._mounted_shares = ['//host/share']
mock_get_mount_point.return_value = '/fake_mountpoint'
mock_is_file.return_value = False
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver._get_manageable_vol_location,
{'source-name': '//host/share/subdir/img'})
mock_is_file.assert_any_call(
os.path.normpath('/fake_mountpoint/subdir/img'))
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_mount_point_for_share', create=True)
@mock.patch.object(os.path, 'isfile')
def test_get_manageable_vol_location(self, mock_is_file,
mock_get_mount_point):
self._driver._mounted_shares = [
'//host/share2/subdir',
'//host/share/subdir',
'host:/dir/subdir'
]
mock_get_mount_point.return_value = '/fake_mountpoint'
mock_is_file.return_value = True
location_info = self._driver._get_manageable_vol_location(
{'source-name': 'host:/dir/subdir/import/img'})
exp_location_info = {
'share': 'host:/dir/subdir',
'mountpoint': mock_get_mount_point.return_value,
'vol_local_path': '/fake_mountpoint/import/img',
'vol_remote_path': 'host:/dir/subdir/import/img'
}
self.assertEqual(exp_location_info, location_info)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_mount_point_for_share', create=True)
@mock.patch.object(os.path, 'isfile')
@mock.patch.object(os.path, 'normpath', lambda x: x.replace('/', '\\'))
@mock.patch.object(os.path, 'normcase', lambda x: x.lower())
@mock.patch.object(os.path, 'join', lambda *args: '\\'.join(args))
@mock.patch.object(os.path, 'sep', '\\')
def test_get_manageable_vol_location_win32(self, mock_is_file,
mock_get_mount_point):
self._driver._mounted_shares = [
'//host/share2/subdir',
'//host/share/subdir',
'host:/dir/subdir'
]
mock_get_mount_point.return_value = r'c:\fake_mountpoint'
mock_is_file.return_value = True
location_info = self._driver._get_manageable_vol_location(
{'source-name': '//Host/share/Subdir/import/img'})
exp_location_info = {
'share': '//host/share/subdir',
'mountpoint': mock_get_mount_point.return_value,
'vol_local_path': r'c:\fake_mountpoint\import\img',
'vol_remote_path': r'\\host\share\subdir\import\img'
}
self.assertEqual(exp_location_info, location_info)
def test_get_managed_vol_exp_path(self):
fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context)
vol_location = dict(mountpoint='fake-mountpoint')
exp_path = os.path.join(vol_location['mountpoint'],
fake_vol.name)
ret_val = self._driver._get_managed_vol_expected_path(
fake_vol, vol_location)
self.assertEqual(exp_path, ret_val)
@ddt.data(
{'already_managed': True},
{'qemu_side_eff': exception.RemoteFSInvalidBackingFile},
{'qemu_side_eff': Exception},
{'qemu_side_eff': [mock.Mock(backing_file=None,
file_format='fakefmt')]},
{'qemu_side_eff': [mock.Mock(backing_file='backing_file',
file_format='raw')]}
)
@ddt.unpack
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_qemu_img_info', create=True)
def test_check_unmanageable_volume(self, mock_qemu_info,
qemu_side_eff=None,
already_managed=False):
mock_qemu_info.side_effect = qemu_side_eff
manageable = self._driver._is_volume_manageable(
mock.sentinel.volume_path,
already_managed=already_managed)[0]
self.assertFalse(manageable)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_qemu_img_info', create=True)
def test_check_manageable_volume(self, mock_qemu_info,
qemu_side_eff=None,
already_managed=False):
mock_qemu_info.return_value = mock.Mock(
backing_file=None,
file_format='raw')
manageable = self._driver._is_volume_manageable(
mock.sentinel.volume_path)[0]
self.assertTrue(manageable)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_manageable_vol_location')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_is_volume_manageable')
def test_manage_existing_unmanageable(self, mock_check_manageable,
mock_get_location):
fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context)
mock_get_location.return_value = dict(
vol_local_path=mock.sentinel.local_path)
mock_check_manageable.return_value = False, mock.sentinel.resason
self.assertRaises(exception.ManageExistingInvalidReference,
self._driver.manage_existing,
fake_vol,
mock.sentinel.existing_ref)
mock_get_location.assert_called_once_with(mock.sentinel.existing_ref)
mock_check_manageable.assert_called_once_with(
mock.sentinel.local_path)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_manageable_vol_location')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_is_volume_manageable')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_set_rw_permissions', create=True)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_managed_vol_expected_path')
@mock.patch.object(os, 'rename')
def test_manage_existing_manageable(self, mock_rename,
mock_get_exp_path,
mock_set_perm,
mock_check_manageable,
mock_get_location):
fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context)
mock_get_location.return_value = dict(
vol_local_path=mock.sentinel.local_path,
share=mock.sentinel.share)
mock_check_manageable.return_value = True, None
exp_ret_val = {'provider_location': mock.sentinel.share}
ret_val = self._driver.manage_existing(fake_vol,
mock.sentinel.existing_ref)
self.assertEqual(exp_ret_val, ret_val)
mock_get_exp_path.assert_called_once_with(
fake_vol, mock_get_location.return_value)
mock_set_perm.assert_called_once_with(mock.sentinel.local_path)
mock_rename.assert_called_once_with(mock.sentinel.local_path,
mock_get_exp_path.return_value)
@mock.patch.object(image_utils, 'qemu_img_info')
def _get_rounded_manageable_image_size(self, mock_qemu_info):
mock_qemu_info.return_value.virtual_size = 1 << 30 + 1
exp_rounded_size_gb = 2
size = self._driver._get_rounded_manageable_image_size(
mock.sentinel.image_path)
self.assertEqual(exp_rounded_size_gb, size)
mock_qemu_info.assert_called_once_with(mock.sentinel.image_path)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_manageable_vol_location')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_rounded_manageable_image_size')
def test_manage_existing_get_size(self, mock_get_size,
mock_get_location):
mock_get_location.return_value = dict(
vol_local_path=mock.sentinel.image_path)
size = self._driver.manage_existing_get_size(
mock.sentinel.volume,
mock.sentinel.existing_ref)
self.assertEqual(mock_get_size.return_value, size)
mock_get_location.assert_called_once_with(mock.sentinel.existing_ref)
mock_get_size.assert_called_once_with(mock.sentinel.image_path)
@ddt.data(
{},
{'managed_volume': mock.Mock(size=mock.sentinel.sz),
'exp_size': mock.sentinel.sz,
'manageable_check_ret_val': False,
'exp_manageable': False},
{'exp_size': None,
'get_size_side_effect': Exception,
'exp_manageable': False})
@ddt.unpack
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_is_volume_manageable')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_rounded_manageable_image_size')
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_mount_point_for_share', create=True)
def test_get_manageable_volume(
self, mock_get_mount_point,
mock_get_size, mock_check_manageable,
managed_volume=None,
get_size_side_effect=(mock.sentinel.size_gb, ),
manageable_check_ret_val=True,
exp_size=mock.sentinel.size_gb,
exp_manageable=True):
share = '//host/share'
mountpoint = '/fake-mountpoint'
volume_path = '/fake-mountpoint/subdir/vol'
exp_ret_val = {
'reference': {'source-name': '//host/share/subdir/vol'},
'size': exp_size,
'safe_to_manage': exp_manageable,
'reason_not_safe': mock.ANY,
'cinder_id': managed_volume.id if managed_volume else None,
'extra_info': None,
}
mock_get_size.side_effect = get_size_side_effect
mock_check_manageable.return_value = (manageable_check_ret_val,
mock.sentinel.reason)
mock_get_mount_point.return_value = mountpoint
ret_val = self._driver._get_manageable_volume(
share, volume_path, managed_volume)
self.assertEqual(exp_ret_val, ret_val)
mock_check_manageable.assert_called_once_with(
volume_path, already_managed=managed_volume is not None)
mock_get_mount_point.assert_called_once_with(share)
if managed_volume:
mock_get_size.assert_not_called()
else:
mock_get_size.assert_called_once_with(volume_path)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_mount_point_for_share', create=True)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_manageable_volume')
@mock.patch.object(os, 'walk')
@mock.patch.object(os.path, 'join', lambda *args: '/'.join(args))
def test_get_share_manageable_volumes(
self, mock_walk, mock_get_manageable_volume,
mock_get_mount_point):
mount_path = '/fake-mountpoint'
mock_walk.return_value = [
[mount_path, ['subdir'], ['volume-1.vhdx']],
['/fake-mountpoint/subdir', [], ['volume-0', 'volume-3.vhdx']]]
mock_get_manageable_volume.side_effect = [
Exception,
mock.sentinel.managed_volume]
self._driver._MANAGEABLE_IMAGE_RE = re.compile('.*\.(?:vhdx)$')
managed_volumes = {'volume-1': mock.sentinel.vol1}
exp_manageable = [mock.sentinel.managed_volume]
manageable_volumes = self._driver._get_share_manageable_volumes(
mock.sentinel.share,
managed_volumes)
self.assertEqual(exp_manageable, manageable_volumes)
mock_get_manageable_volume.assert_has_calls(
[mock.call(mock.sentinel.share,
'/fake-mountpoint/volume-1.vhdx',
mock.sentinel.vol1),
mock.call(mock.sentinel.share,
'/fake-mountpoint/subdir/volume-3.vhdx',
None)])
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'_get_share_manageable_volumes')
@mock.patch.object(volume_utils, 'paginate_entries_list')
def test_get_manageable_volumes(self, mock_paginate, mock_get_share_vols):
fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context)
self._driver._mounted_shares = [mock.sentinel.share0,
mock.sentinel.share1]
mock_get_share_vols.side_effect = [
Exception, [mock.sentinel.manageable_vol]]
pagination_args = [
mock.sentinel.marker, mock.sentinel.limit,
mock.sentinel.offset, mock.sentinel.sort_keys,
mock.sentinel.sort_dirs]
ret_val = self._driver.get_manageable_volumes(
[fake_vol], *pagination_args)
self.assertEqual(mock_paginate.return_value, ret_val)
mock_paginate.assert_called_once_with(
[mock.sentinel.manageable_vol], *pagination_args)
exp_managed_vols_dict = {fake_vol.name: fake_vol}
mock_get_share_vols.assert_has_calls(
[mock.call(share, exp_managed_vols_dict)
for share in self._driver._mounted_shares])

View File

@ -285,7 +285,7 @@ class WindowsSmbFsTestCase(test.TestCase):
extensions = [ extensions = [
".%s" % ext ".%s" % ext
for ext in self._smbfs_driver._SUPPORTED_IMAGE_FORMATS] for ext in self._smbfs_driver._VALID_IMAGE_EXTENSIONS]
possible_paths = [self._FAKE_VOLUME_PATH + ext possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in extensions] for ext in extensions]
mock_exists.assert_has_calls( mock_exists.assert_has_calls(
@ -816,3 +816,15 @@ class WindowsSmbFsTestCase(test.TestCase):
mock_type = drv._get_vhd_type(qemu_subformat=False) mock_type = drv._get_vhd_type(qemu_subformat=False)
self.assertEqual(mock_type, 3) self.assertEqual(mock_type, 3)
def test_get_managed_vol_expected_path(self):
self._vhdutils.get_vhd_format.return_value = 'vhdx'
vol_location = dict(vol_local_path=mock.sentinel.image_path,
mountpoint=self._FAKE_MNT_POINT)
path = self._smbfs_driver._get_managed_vol_expected_path(
self.volume, vol_location)
self.assertEqual(self._FAKE_VOLUME_PATH, path)
self._vhdutils.get_vhd_format.assert_called_once_with(
mock.sentinel.image_path)

View File

@ -18,6 +18,7 @@ import collections
import hashlib import hashlib
import inspect import inspect
import json import json
import math
import os import os
import re import re
import shutil import shutil
@ -764,10 +765,8 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
} }
if not re.match(backing_file_template, info.backing_file, if not re.match(backing_file_template, info.backing_file,
re.IGNORECASE): re.IGNORECASE):
msg = _("File %(path)s has invalid backing file " raise exception.RemoteFSInvalidBackingFile(
"%(bfile)s, aborting.") % {'path': path, path=path, backing_file=info.backing_file)
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file) info.backing_file = os.path.basename(info.backing_file)
@ -1780,3 +1779,193 @@ class RevertToSnapshotMixin(object):
# this class. # this class.
self._delete(snapshot_path) self._delete(snapshot_path)
self._do_create_snapshot(snapshot, backing_filename, snapshot_path) self._do_create_snapshot(snapshot, backing_filename, snapshot_path)
class RemoteFSManageableVolumesMixin(object):
_SUPPORTED_IMAGE_FORMATS = ['raw', 'qcow2']
_MANAGEABLE_IMAGE_RE = None
def _get_manageable_vol_location(self, existing_ref):
if 'source-name' not in existing_ref:
reason = _('The existing volume reference '
'must contain "source-name".')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
vol_remote_path = os.path.normcase(
os.path.normpath(existing_ref['source-name']))
for mounted_share in self._mounted_shares:
# We don't currently attempt to resolve hostnames. This could
# be troublesome for some distributed shares, which may have
# hostnames resolving to multiple addresses.
norm_share = os.path.normcase(os.path.normpath(mounted_share))
head, match, share_rel_path = vol_remote_path.partition(norm_share)
if not (match and share_rel_path.startswith(os.path.sep)):
continue
mountpoint = self._get_mount_point_for_share(mounted_share)
vol_local_path = os.path.join(mountpoint,
share_rel_path.lstrip(os.path.sep))
LOG.debug("Found mounted share referenced by %s.",
vol_remote_path)
if os.path.isfile(vol_local_path):
LOG.debug("Found volume %(path)s on share %(share)s.",
dict(path=vol_local_path, share=mounted_share))
return dict(share=mounted_share,
mountpoint=mountpoint,
vol_local_path=vol_local_path,
vol_remote_path=vol_remote_path)
else:
LOG.error("Could not find volume %s on the "
"specified share.", vol_remote_path)
break
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=_('Volume not found.'))
def _get_managed_vol_expected_path(self, volume, volume_location):
# This may be overridden by the drivers.
return os.path.join(volume_location['mountpoint'],
volume.name)
def _is_volume_manageable(self, volume_path, already_managed=False):
unmanageable_reason = None
if already_managed:
return False, _('Volume already managed.')
try:
img_info = self._qemu_img_info(volume_path, volume_name=None)
except exception.RemoteFSInvalidBackingFile:
return False, _("Backing file present.")
except Exception:
return False, _("Failed to open image.")
# We're double checking as some drivers do not validate backing
# files through '_qemu_img_info'.
if img_info.backing_file:
return False, _("Backing file present.")
if img_info.file_format not in self._SUPPORTED_IMAGE_FORMATS:
unmanageable_reason = _(
"Unsupported image format: '%s'.") % img_info.file_format
return False, unmanageable_reason
return True, None
def manage_existing(self, volume, existing_ref):
LOG.info('Managing volume %(volume_id)s with ref %(ref)s',
{'volume_id': volume.id, 'ref': existing_ref})
vol_location = self._get_manageable_vol_location(existing_ref)
vol_local_path = vol_location['vol_local_path']
manageable, unmanageable_reason = self._is_volume_manageable(
vol_local_path)
if not manageable:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=unmanageable_reason)
expected_vol_path = self._get_managed_vol_expected_path(
volume, vol_location)
self._set_rw_permissions(vol_local_path)
# This should be the last thing we do.
if expected_vol_path != vol_local_path:
LOG.info("Renaming imported volume image %(src)s to %(dest)s",
dict(src=vol_location['vol_local_path'],
dest=expected_vol_path))
os.rename(vol_location['vol_local_path'],
expected_vol_path)
return {'provider_location': vol_location['share']}
def _get_rounded_manageable_image_size(self, image_path):
image_size = image_utils.qemu_img_info(
image_path, run_as_root=self._execute_as_root).virtual_size
return int(math.ceil(float(image_size) / units.Gi))
def manage_existing_get_size(self, volume, existing_ref):
vol_location = self._get_manageable_vol_location(existing_ref)
volume_path = vol_location['vol_local_path']
return self._get_rounded_manageable_image_size(volume_path)
def unmanage(self, volume):
pass
def _get_manageable_volume(self, share, volume_path, managed_volume=None):
manageable, unmanageable_reason = self._is_volume_manageable(
volume_path, already_managed=managed_volume is not None)
size_gb = None
if managed_volume:
# We may not be able to query in-use images.
size_gb = managed_volume.size
else:
try:
size_gb = self._get_rounded_manageable_image_size(volume_path)
except Exception:
manageable = False
unmanageable_reason = (unmanageable_reason or
_("Failed to get size."))
mountpoint = self._get_mount_point_for_share(share)
norm_mountpoint = os.path.normcase(os.path.normpath(mountpoint))
norm_vol_path = os.path.normcase(os.path.normpath(volume_path))
ref = norm_vol_path.replace(norm_mountpoint, share).replace('\\', '/')
manageable_volume = {
'reference': {'source-name': ref},
'size': size_gb,
'safe_to_manage': manageable,
'reason_not_safe': unmanageable_reason,
'cinder_id': managed_volume.id if managed_volume else None,
'extra_info': None,
}
return manageable_volume
def _get_share_manageable_volumes(self, share, managed_volumes):
manageable_volumes = []
mount_path = self._get_mount_point_for_share(share)
for dir_path, dir_names, file_names in os.walk(mount_path):
for file_name in file_names:
file_name = os.path.normcase(file_name)
img_path = os.path.join(dir_path, file_name)
# In the future, we may have the regex filtering images
# as a config option.
if (not self._MANAGEABLE_IMAGE_RE or
self._MANAGEABLE_IMAGE_RE.match(file_name)):
managed_volume = managed_volumes.get(
os.path.splitext(file_name)[0])
try:
manageable_volume = self._get_manageable_volume(
share, img_path, managed_volume)
manageable_volumes.append(manageable_volume)
except Exception as exc:
LOG.error(
"Failed to get manageable volume info: "
"'%(image_path)s'. Exception: %(exc)s.",
dict(image_path=img_path, exc=exc))
return manageable_volumes
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
manageable_volumes = []
managed_volumes = {vol.name: vol for vol in cinder_volumes}
for share in self._mounted_shares:
try:
manageable_volumes += self._get_share_manageable_volumes(
share, managed_volumes)
except Exception as exc:
LOG.error("Failed to get manageable volumes for "
"share %(share)s. Exception: %(exc)s.",
dict(share=share, exc=exc))
return volume_utils.paginate_entries_list(
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
import os import os
import re
import sys import sys
from os_brick.remotefs import windows_remotefs as remotefs_brick from os_brick.remotefs import windows_remotefs as remotefs_brick
@ -95,6 +96,7 @@ CONF.set_default('reserved_percentage', 5)
@interface.volumedriver @interface.volumedriver
class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin, class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
remotefs_drv.RemoteFSPoolMixin, remotefs_drv.RemoteFSPoolMixin,
remotefs_drv.RemoteFSManageableVolumesMixin,
remotefs_drv.RemoteFSSnapDriverDistributed): remotefs_drv.RemoteFSSnapDriverDistributed):
VERSION = VERSION VERSION = VERSION
@ -113,8 +115,13 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
_MINIMUM_QEMU_IMG_VERSION = '1.6' _MINIMUM_QEMU_IMG_VERSION = '1.6'
_SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX] _SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD,
_VALID_IMAGE_EXTENSIONS = _SUPPORTED_IMAGE_FORMATS _DISK_FORMAT_VHD_LEGACY,
_DISK_FORMAT_VHDX]
_VALID_IMAGE_EXTENSIONS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX]
_MANAGEABLE_IMAGE_RE = re.compile(
'.*\.(?:%s)$' % '|'.join(_VALID_IMAGE_EXTENSIONS),
re.IGNORECASE)
_always_use_temp_snap_when_cloning = False _always_use_temp_snap_when_cloning = False
_thin_provisioning_support = True _thin_provisioning_support = True
@ -277,7 +284,7 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
return local_path_template return local_path_template
def _lookup_local_volume_path(self, volume_path_template): def _lookup_local_volume_path(self, volume_path_template):
for ext in self._SUPPORTED_IMAGE_FORMATS: for ext in self._VALID_IMAGE_EXTENSIONS:
volume_path = (volume_path_template + '.' + ext volume_path = (volume_path_template + '.' + ext
if ext else volume_path_template) if ext else volume_path_template)
if os.path.exists(volume_path): if os.path.exists(volume_path):
@ -295,7 +302,7 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
if volume_path: if volume_path:
ext = os.path.splitext(volume_path)[1].strip('.').lower() ext = os.path.splitext(volume_path)[1].strip('.').lower()
if ext in self._SUPPORTED_IMAGE_FORMATS: if ext in self._VALID_IMAGE_EXTENSIONS:
volume_format = ext volume_format = ext
else: else:
# Hyper-V relies on file extensions so we're enforcing them. # Hyper-V relies on file extensions so we're enforcing them.
@ -611,3 +618,13 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
vhd_type = self._vhd_type_mapping[prov_type] vhd_type = self._vhd_type_mapping[prov_type]
return vhd_type return vhd_type
def _get_managed_vol_expected_path(self, volume, volume_location):
fmt = self._vhdutils.get_vhd_format(volume_location['vol_local_path'])
return os.path.join(volume_location['mountpoint'],
volume.name + ".%s" % fmt).lower()
def _set_rw_permissions(self, path):
# The SMBFS driver does not manage file permissions. We chose
# to let this up to the deployer.
pass

View File

@ -0,0 +1,5 @@
---
features:
- |
The SMBFS driver now supports the volume manage/unmanage feature. Images
residing on preconfigured shares may be listed and managed by Cinder.