[ZFSonLinux] Add 'manage share' feature support
Implement 'manage share' feature in ZFSonLinux share driver. Prerequisites: - share being managed should exist. - share being managed should not be part of any replication relations. - share being managed should not have redefined mountpoint. Details: - share being managed will be renamed by share driver based on taken ID. - share being managed will get "quota" attribute set based on provided value in API (--driver_options size=5) or it will set quota to nearest bigger rounded integer of already used space (2,4 -> 3, 3.6 -> 4). Implements blueprint zfsonlinux-manage-share DocImpact Change-Id: I03eab5e02835fe660dde0020601c02c0136bddeb
This commit is contained in:
parent
5b11b87dc7
commit
f284db4e07
@ -167,7 +167,7 @@ if [[ "$DRIVER" == "lvm" ]]; then
|
||||
elif [[ "$DRIVER" == "zfsonlinux" ]]; then
|
||||
MANILA_TEMPEST_CONCURRENCY=8
|
||||
RUN_MANILA_CG_TESTS=False
|
||||
RUN_MANILA_MANAGE_TESTS=False
|
||||
RUN_MANILA_MANAGE_TESTS=True
|
||||
iniset $TEMPEST_CONFIG share run_migration_tests False
|
||||
iniset $TEMPEST_CONFIG share run_quota_tests True
|
||||
iniset $TEMPEST_CONFIG share run_replication_tests True
|
||||
|
@ -33,7 +33,7 @@ Mapping of share drivers and share features support
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
|
||||
| Driver name | create delete share | manage unmanage share | extend share | shrink share | create delete snapshot | create share from snapshot | manage unmanage snapshot |
|
||||
+========================================+=======================+=======================+==============+==============+========================+============================+==========================+
|
||||
| ZFSonLinux | M | \- | M | M | M | M | \- |
|
||||
| ZFSonLinux | M | N | M | M | M | M | \- |
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
|
||||
| Generic (Cinder as back-end) | J | K | L | L | J | J | M |
|
||||
+----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
|
||||
|
@ -40,6 +40,8 @@ The following operations are supported:
|
||||
|
||||
* Create NFS Share
|
||||
* Delete NFS Share
|
||||
* Manage NFS Share
|
||||
* Unmanage NFS Share
|
||||
* Allow NFS Share access
|
||||
* Only IP access type is supported for NFS
|
||||
* Both access levels are supported - 'RW' and 'RO'
|
||||
@ -79,7 +81,6 @@ The ZFSonLinux share driver has the following restrictions:
|
||||
* 'Promote share replica' operation will switch roles of
|
||||
current 'secondary' replica and 'active'. It does not make more than
|
||||
one active replica available.
|
||||
* 'Manage share' operation is not yet implemented.
|
||||
* 'SaMBa' based sharing is not yet implemented.
|
||||
* 'Thick provisioning' is not yet implemented.
|
||||
|
||||
|
@ -446,6 +446,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Is called to remove a share."""
|
||||
pool_name = self.private_storage.get(share['id'], 'pool_name')
|
||||
pool_name = pool_name or share_utils.extract_host(
|
||||
share["host"], level="pool")
|
||||
dataset_name = self.private_storage.get(share['id'], 'dataset_name')
|
||||
if not dataset_name:
|
||||
dataset_name = self._get_dataset_name(share)
|
||||
@ -625,6 +627,98 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
return self._get_share_helper(share['share_proto']).update_access(
|
||||
dataset_name, access_rules, add_rules, delete_rules)
|
||||
|
||||
def manage_existing(self, share, driver_options):
|
||||
"""Manage existing ZFS dataset as manila share.
|
||||
|
||||
ZFSonLinux driver accepts only one driver_option 'size'.
|
||||
If an administrator provides this option, then such quota will be set
|
||||
to dataset and used as share size. Otherwise, driver will set quota
|
||||
equal to nearest bigger rounded integer of usage size.
|
||||
Driver does not expect mountpoint to be changed (should be equal
|
||||
to default that is "/%(dataset_name)s").
|
||||
|
||||
:param share: share data
|
||||
:param driver_options: Empty dict or dict with 'size' option.
|
||||
:return: dict with share size and its export locations.
|
||||
"""
|
||||
old_export_location = share["export_locations"][0]["path"]
|
||||
old_dataset_name = old_export_location.split(":/")[-1]
|
||||
|
||||
scheduled_pool_name = share_utils.extract_host(
|
||||
share["host"], level="pool")
|
||||
actual_pool_name = old_dataset_name.split("/")[0]
|
||||
|
||||
new_dataset_name = self._get_dataset_name(share)
|
||||
|
||||
# Calculate quota for managed dataset
|
||||
quota = driver_options.get("size")
|
||||
if not quota:
|
||||
consumed_space = self.get_zfs_option(old_dataset_name, "used")
|
||||
consumed_space = utils.translate_string_size_to_float(
|
||||
consumed_space)
|
||||
quota = int(consumed_space) + 1
|
||||
share["size"] = int(quota)
|
||||
|
||||
# Save dataset-specific data in private storage
|
||||
options = self._get_dataset_creation_options(share, is_readonly=False)
|
||||
ssh_cmd = "%(username)s@%(host)s" % {
|
||||
"username": self.configuration.zfs_ssh_username,
|
||||
"host": self.service_ip,
|
||||
}
|
||||
self.private_storage.update(
|
||||
share["id"], {
|
||||
"entity_type": "share",
|
||||
"dataset_name": new_dataset_name,
|
||||
"ssh_cmd": ssh_cmd, # used in replication
|
||||
"pool_name": actual_pool_name, # used in replication
|
||||
"used_options": " ".join(options),
|
||||
}
|
||||
)
|
||||
|
||||
# Perform checks on requested dataset
|
||||
if actual_pool_name != scheduled_pool_name:
|
||||
raise exception.ZFSonLinuxException(
|
||||
_("Cannot manage share '%(share_id)s' "
|
||||
"(share_instance '%(si_id)s'), because scheduled "
|
||||
"pool '%(sch)s' and actual '%(actual)s' differ.") % {
|
||||
"share_id": share["share_id"],
|
||||
"si_id": share["id"],
|
||||
"sch": scheduled_pool_name,
|
||||
"actual": actual_pool_name})
|
||||
|
||||
out, err = self.zfs("list", "-r", actual_pool_name)
|
||||
data = self.parse_zfs_answer(out)
|
||||
for datum in data:
|
||||
if datum["NAME"] == old_dataset_name:
|
||||
break
|
||||
else:
|
||||
raise exception.ZFSonLinuxException(
|
||||
_("Cannot manage share '%(share_id)s' "
|
||||
"(share_instance '%(si_id)s'), because dataset "
|
||||
"'%(dataset)s' not found in zpool '%(zpool)s'.") % {
|
||||
"share_id": share["share_id"],
|
||||
"si_id": share["id"],
|
||||
"dataset": old_dataset_name,
|
||||
"zpool": actual_pool_name})
|
||||
|
||||
# Rename dataset
|
||||
out, err = self.execute("sudo", "mount")
|
||||
if "%s " % old_dataset_name in out:
|
||||
self.zfs_with_retry("umount", "-f", old_dataset_name)
|
||||
time.sleep(1)
|
||||
self.zfs_with_retry("rename", old_dataset_name, new_dataset_name)
|
||||
self.zfs("mount", new_dataset_name)
|
||||
|
||||
# Apply options to dataset
|
||||
for option in options:
|
||||
self.zfs("set", option, new_dataset_name)
|
||||
|
||||
# Get new export locations of renamed dataset
|
||||
export_locations = self._get_share_helper(
|
||||
share["share_proto"]).get_exports(new_dataset_name)
|
||||
|
||||
return {"size": share["size"], "export_locations": export_locations}
|
||||
|
||||
def unmanage(self, share):
|
||||
"""Removes the specified share from Manila management."""
|
||||
self.private_storage.delete(share['id'])
|
||||
|
@ -124,6 +124,10 @@ class ExecuteMixin(driver.ExecuteMixin):
|
||||
"""ZFS shell commands executor."""
|
||||
return self.execute('sudo', 'zfs', *cmd, **kwargs)
|
||||
|
||||
def zfs_with_retry(self, *cmd, **kwargs):
|
||||
"""ZFS shell commands executor."""
|
||||
return self.execute_with_retry('sudo', 'zfs', *cmd, **kwargs)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class NASHelperBase(object):
|
||||
|
@ -1039,6 +1039,149 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
share_server={'id': 'fake_server'},
|
||||
)
|
||||
|
||||
@ddt.data(
|
||||
({}, True),
|
||||
({"size": 5}, True),
|
||||
({"size": 5, "foo": "bar"}, False),
|
||||
({"size": "5", "foo": "bar"}, True),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_manage_share_success_expected(self, driver_options, mount_exists):
|
||||
old_dataset_name = "foopool/path/to/old/dataset/name"
|
||||
new_dataset_name = "foopool/path/to/new/dataset/name"
|
||||
share = {
|
||||
"id": "fake_share_instance_id",
|
||||
"share_id": "fake_share_id",
|
||||
"export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}],
|
||||
"host": "foobackend@foohost#foopool",
|
||||
"share_proto": "NFS",
|
||||
}
|
||||
|
||||
mock_get_extra_specs_from_share = self.mock_object(
|
||||
zfs_driver.share_types,
|
||||
'get_extra_specs_from_share',
|
||||
mock.Mock(return_value={}))
|
||||
mock_sleep = self.mock_object(zfs_driver.time, "sleep")
|
||||
mock__get_dataset_name = self.mock_object(
|
||||
self.driver, "_get_dataset_name",
|
||||
mock.Mock(return_value=new_dataset_name))
|
||||
mock_helper = self.mock_object(self.driver, "_get_share_helper")
|
||||
mock_zfs = self.mock_object(
|
||||
self.driver, "zfs",
|
||||
mock.Mock(return_value=("fake_out", "fake_error")))
|
||||
mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry")
|
||||
|
||||
mock_execute = self.mock_object(self.driver, "execute")
|
||||
if mount_exists:
|
||||
mock_execute.return_value = "%s " % old_dataset_name, "fake_err"
|
||||
else:
|
||||
mock_execute.return_value = ("foo", "bar")
|
||||
mock_parse_zfs_answer = self.mock_object(
|
||||
self.driver,
|
||||
"parse_zfs_answer",
|
||||
mock.Mock(return_value=[
|
||||
{"NAME": "some_other_dataset_1"},
|
||||
{"NAME": old_dataset_name},
|
||||
{"NAME": "some_other_dataset_2"},
|
||||
]))
|
||||
mock_get_zfs_option = self.mock_object(
|
||||
self.driver, 'get_zfs_option', mock.Mock(return_value="4G"))
|
||||
|
||||
result = self.driver.manage_existing(share, driver_options)
|
||||
|
||||
self.assertTrue(mock_helper.return_value.get_exports.called)
|
||||
self.assertTrue(mock_zfs_with_retry.called)
|
||||
self.assertEqual(2, len(result))
|
||||
self.assertIn("size", result)
|
||||
self.assertIn("export_locations", result)
|
||||
self.assertEqual(5, result["size"])
|
||||
self.assertEqual(
|
||||
mock_helper.return_value.get_exports.return_value,
|
||||
result["export_locations"])
|
||||
if mount_exists:
|
||||
mock_sleep.assert_called_once_with(1)
|
||||
mock_execute.assert_called_once_with("sudo", "mount")
|
||||
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
||||
if driver_options.get("size"):
|
||||
self.assertFalse(mock_get_zfs_option.called)
|
||||
else:
|
||||
mock_get_zfs_option.assert_called_once_with(
|
||||
old_dataset_name, "used")
|
||||
mock__get_dataset_name.assert_called_once_with(share)
|
||||
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
||||
|
||||
def test_manage_share_wrong_pool(self):
|
||||
old_dataset_name = "foopool/path/to/old/dataset/name"
|
||||
new_dataset_name = "foopool/path/to/new/dataset/name"
|
||||
share = {
|
||||
"id": "fake_share_instance_id",
|
||||
"share_id": "fake_share_id",
|
||||
"export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}],
|
||||
"host": "foobackend@foohost#barpool",
|
||||
"share_proto": "NFS",
|
||||
}
|
||||
|
||||
mock_get_extra_specs_from_share = self.mock_object(
|
||||
zfs_driver.share_types,
|
||||
'get_extra_specs_from_share',
|
||||
mock.Mock(return_value={}))
|
||||
mock__get_dataset_name = self.mock_object(
|
||||
self.driver, "_get_dataset_name",
|
||||
mock.Mock(return_value=new_dataset_name))
|
||||
mock_get_zfs_option = self.mock_object(
|
||||
self.driver, 'get_zfs_option', mock.Mock(return_value="4G"))
|
||||
|
||||
self.assertRaises(
|
||||
exception.ZFSonLinuxException,
|
||||
self.driver.manage_existing,
|
||||
share, {}
|
||||
)
|
||||
|
||||
mock__get_dataset_name.assert_called_once_with(share)
|
||||
mock_get_zfs_option.assert_called_once_with(old_dataset_name, "used")
|
||||
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
||||
|
||||
def test_manage_share_dataset_not_found(self):
|
||||
old_dataset_name = "foopool/path/to/old/dataset/name"
|
||||
new_dataset_name = "foopool/path/to/new/dataset/name"
|
||||
share = {
|
||||
"id": "fake_share_instance_id",
|
||||
"share_id": "fake_share_id",
|
||||
"export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}],
|
||||
"host": "foobackend@foohost#foopool",
|
||||
"share_proto": "NFS",
|
||||
}
|
||||
|
||||
mock_get_extra_specs_from_share = self.mock_object(
|
||||
zfs_driver.share_types,
|
||||
'get_extra_specs_from_share',
|
||||
mock.Mock(return_value={}))
|
||||
mock__get_dataset_name = self.mock_object(
|
||||
self.driver, "_get_dataset_name",
|
||||
mock.Mock(return_value=new_dataset_name))
|
||||
mock_get_zfs_option = self.mock_object(
|
||||
self.driver, 'get_zfs_option', mock.Mock(return_value="4G"))
|
||||
mock_zfs = self.mock_object(
|
||||
self.driver, "zfs",
|
||||
mock.Mock(return_value=("fake_out", "fake_error")))
|
||||
mock_parse_zfs_answer = self.mock_object(
|
||||
self.driver,
|
||||
"parse_zfs_answer",
|
||||
mock.Mock(return_value=[{"NAME": "some_other_dataset_1"}]))
|
||||
|
||||
self.assertRaises(
|
||||
exception.ZFSonLinuxException,
|
||||
self.driver.manage_existing,
|
||||
share, {}
|
||||
)
|
||||
|
||||
mock__get_dataset_name.assert_called_once_with(share)
|
||||
mock_get_zfs_option.assert_called_once_with(old_dataset_name, "used")
|
||||
mock_zfs.assert_called_once_with(
|
||||
"list", "-r", old_dataset_name.split("/")[0])
|
||||
mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0])
|
||||
mock_get_extra_specs_from_share.assert_called_once_with(share)
|
||||
|
||||
def test_unmanage(self):
|
||||
share = {'id': 'fake_share_id'}
|
||||
self.mock_object(self.driver.private_storage, 'delete')
|
||||
@ -1076,8 +1219,6 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
|
||||
self.driver.get_zfs_option.assert_called_once_with(
|
||||
dataset_name, 'mountpoint')
|
||||
self.assertEqual(31, zfs_driver.time.time.call_count)
|
||||
self.assertEqual(29, zfs_driver.time.sleep.call_count)
|
||||
self.assertEqual(29, zfs_driver.LOG.debug.call_count)
|
||||
|
||||
def test__delete_dataset_or_snapshot_with_retry_temp_of(self):
|
||||
@ -1098,7 +1239,6 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
|
||||
self.driver.get_zfs_option.assert_called_once_with(
|
||||
dataset_name, 'mountpoint')
|
||||
self.assertEqual(3, zfs_driver.time.time.call_count)
|
||||
self.assertEqual(2, self.driver.execute.call_count)
|
||||
self.assertEqual(1, zfs_driver.LOG.debug.call_count)
|
||||
zfs_driver.LOG.debug.assert_called_once_with(
|
||||
|
@ -213,6 +213,16 @@ foo_res opt_3 some_value local"""
|
||||
self.driver.execute.asssert_called_once_with(
|
||||
'sudo', 'zfs', 'foo', 'bar')
|
||||
|
||||
def test_zfs_with_retry(self):
|
||||
self.mock_object(self.driver, 'execute')
|
||||
self.mock_object(self.driver, 'execute_with_retry')
|
||||
|
||||
self.driver.zfs_with_retry('foo', 'bar')
|
||||
|
||||
self.assertEqual(0, self.driver.execute.call_count)
|
||||
self.driver.execute_with_retry.asssert_called_once_with(
|
||||
'sudo', 'zfs', 'foo', 'bar')
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NFSviaZFSHelperTestCase(test.TestCase):
|
||||
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Added support of 'manage share' feature to ZFSonLinux driver.
|
Loading…
Reference in New Issue
Block a user