Merge "[ZFSonLinux] Create share from snapshot in different backends"

This commit is contained in:
Zuul 2020-04-10 09:03:38 +00:00 committed by Gerrit Code Review
commit c23797fe0f
5 changed files with 96 additions and 33 deletions

View File

@ -243,6 +243,7 @@ elif [[ "$DRIVER" == "zfsonlinux" ]]; then
iniset $TEMPEST_CONFIG share image_with_share_tools 'manila-service-image-master'
iniset $TEMPEST_CONFIG auth use_dynamic_credentials True
iniset $TEMPEST_CONFIG share capability_snapshot_support True
iniset $TEMPEST_CONFIG share run_create_share_from_snapshot_in_another_pool_or_az_tests True
elif [[ "$DRIVER" == "dummy" ]]; then
MANILA_TEMPEST_CONCURRENCY=24
MANILA_CONFIGURE_DEFAULT_TYPES=False

View File

@ -158,6 +158,8 @@ elif [[ "$DRIVER" == "zfsonlinux" ]]; then
MANILA_SERVICE_IMAGE_ENABLED=True
echo "SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" >> $localconf
echo "RUN_MANILA_REPLICATION_TESTS=True" >> $localconf
# Enable using the scheduler when creating a share from snapshot
echo "MANILA_USE_SCHEDULER_CREATING_SHARE_FROM_SNAPSHOT=True" >> $localconf
# Set the replica_state_update_interval to 60 seconds to make
# replication tests run faster. The default is 300, which is greater than
# the build timeout for ZFS on the gate.

View File

@ -582,31 +582,48 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create a share from snapshot."""
src_backend_name = share_utils.extract_host(
snapshot.share_instance['host'], level='backend_name'
)
src_snapshot_name = self._get_saved_snapshot_name(snapshot)
dataset_name = self._get_dataset_name(share)
ssh_cmd = '%(username)s@%(host)s' % {
dst_backend_ssh_cmd = '%(username)s@%(host)s' % {
'username': self.configuration.zfs_ssh_username,
'host': self.service_ip,
}
pool_name = share_utils.extract_host(share['host'], level='pool')
dst_backend_pool_name = share_utils.extract_host(share['host'],
level='pool')
options = self._get_dataset_creation_options(share, is_readonly=False)
self.private_storage.update(
share['id'], {
'entity_type': 'share',
'dataset_name': dataset_name,
'ssh_cmd': ssh_cmd, # used in replication
'pool_name': pool_name, # used in replication
'ssh_cmd': dst_backend_ssh_cmd, # used in replication
'pool_name': dst_backend_pool_name, # used in replication
'used_options': options,
}
)
snapshot_name = self._get_saved_snapshot_name(snapshot)
# NOTE(andrebeltrami): Implementing the support for create share
# from snapshot in different backends in different hosts
src_config = get_backend_configuration(src_backend_name)
src_backend_ssh_cmd = '%(username)s@%(host)s' % {
'username': src_config.zfs_ssh_username,
'host': src_config.zfs_service_ip,
}
self.execute(
# NOTE(vponomaryov): SSH is used as workaround for 'execute'
# implementation restriction that does not support usage of '|'.
'ssh', ssh_cmd,
'sudo', 'zfs', 'send', '-vD', snapshot_name, '|',
# implementation restriction that does not support usage
# of '|'.
'ssh', src_backend_ssh_cmd,
'sudo', 'zfs', 'send', '-vD', src_snapshot_name, '|',
'ssh', dst_backend_ssh_cmd,
'sudo', 'zfs', 'receive', '-v', dataset_name,
)
# Apply options based on used share type that may differ from
# one used for original share.
for option in options:
@ -615,7 +632,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
# Delete with retry as right after creation it may be temporary busy.
self.execute_with_retry(
'sudo', 'zfs', 'destroy',
dataset_name + '@' + snapshot_name.split('@')[-1])
dataset_name + '@' + src_snapshot_name.split('@')[-1])
return self._get_share_helper(
share['share_proto']).create_exports(dataset_name)

View File

@ -23,6 +23,7 @@ from manila import exception
from manila.share.drivers.ganesha import utils as ganesha_utils
from manila.share.drivers.zfsonlinux import driver as zfs_driver
from manila import test
from manila.tests import db_utils
CONF = cfg.CONF
@ -792,7 +793,13 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
share_server={'id': 'fake_server'},
)
def test_create_share_from_snapshot(self):
@ddt.data({'src_backend_name': 'backend_a', 'src_user': 'someuser',
'src_ip': '2.2.2.2'},
{'src_backend_name': 'backend_b', 'src_user': 'someuser2',
'src_ip': '3.3.3.3'})
@ddt.unpack
def test_create_share_from_snapshot(self, src_backend_name, src_user,
src_ip):
mock_get_helper = self.mock_object(self.driver, '_get_share_helper')
self.mock_object(self.driver, 'zfs')
self.mock_object(self.driver, 'execute')
@ -801,19 +808,31 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
'get_extra_specs_from_share',
mock.Mock(return_value={}))
context = 'fake_context'
share = {
'id': 'fake_share_id',
'host': 'hostname@backend_name#bar',
'share_proto': 'NFS',
'size': 4,
}
snapshot = {
'id': 'fake_snapshot_instance_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'hostname@backend_name#bar',
'size': 4,
'share_instance_id': share['id'],
}
dst_backend_name = 'backend_a'
parent_share = db_utils.create_share_without_instance(
id='fake_share_id_1',
size=4
)
parent_instance = db_utils.create_share_instance(
id='fake_parent_instance',
share_id=parent_share['id'],
host='hostname@%s#bar' % src_backend_name
)
share = db_utils.create_share(
id='fake_share_id_2',
host='hostname@%s#bar' % dst_backend_name,
size=4
)
snapshot = db_utils.create_snapshot(
id='fake_snap_id_1',
share_id='fake_share_id_1'
)
snap_instance = db_utils.create_snapshot_instance(
id='fake_snap_instance',
snapshot_id=snapshot['id'],
share_instance_id=parent_instance['id']
)
dataset_name = 'bar/subbar/some_prefix_%s' % share['id']
snap_tag = 'prefix_%s' % snapshot['id']
snap_name = '%(dataset)s@%(tag)s' % {
@ -823,43 +842,60 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
self.driver.share_export_ip = '1.1.1.1'
self.driver.service_ip = '2.2.2.2'
self.driver.private_storage.update(
snapshot['id'], {'snapshot_name': snap_name})
snap_instance['id'], {'snapshot_name': snap_name})
self.driver.private_storage.update(
snapshot['snapshot_id'], {'snapshot_tag': snap_tag})
snap_instance['snapshot_id'], {'snapshot_tag': snap_tag})
self.driver.private_storage.update(
snapshot['share_instance_id'], {'dataset_name': dataset_name})
snap_instance['share_instance_id'],
{'dataset_name': dataset_name})
self.mock_object(
zfs_driver, 'get_backend_configuration',
mock.Mock(return_value=type(
'FakeConfig', (object,), {
'zfs_ssh_username': src_user,
'zfs_service_ip': src_ip
})))
result = self.driver.create_share_from_snapshot(
context, share, snapshot, share_server=None)
context, share, snap_instance, share_server=None)
self.assertEqual(
mock_get_helper.return_value.create_exports.return_value,
result,
)
dst_ssh_host = (self.configuration.zfs_ssh_username +
'@' + self.driver.service_ip)
src_ssh_host = src_user + '@' + src_ip
self.assertEqual(
'share',
self.driver.private_storage.get(share['id'], 'entity_type'))
self.assertEqual(
dataset_name,
self.driver.private_storage.get(share['id'], 'dataset_name'))
self.driver.private_storage.get(
snap_instance['share_instance_id'], 'dataset_name'))
self.assertEqual(
'someuser@2.2.2.2',
dst_ssh_host,
self.driver.private_storage.get(share['id'], 'ssh_cmd'))
self.assertEqual(
'bar',
self.driver.private_storage.get(share['id'], 'pool_name'))
self.driver.execute.assert_has_calls([
mock.call(
'ssh', 'someuser@2.2.2.2',
'ssh', src_ssh_host,
'sudo', 'zfs', 'send', '-vD', snap_name, '|',
'ssh', dst_ssh_host,
'sudo', 'zfs', 'receive', '-v',
'bar/subbar/some_prefix_fake_share_id'),
'%s' % dataset_name),
mock.call(
'sudo', 'zfs', 'destroy',
'bar/subbar/some_prefix_fake_share_id@%s' % snap_tag),
'%s@%s' % (dataset_name, snap_tag)),
])
self.driver.zfs.assert_has_calls([
mock.call('set', opt, 'bar/subbar/some_prefix_fake_share_id')
mock.call('set', opt, '%s' % dataset_name)
for opt in ('quota=4G', 'bark=barv', 'readonly=off', 'fook=foov')
], any_order=True)
mock_get_helper.assert_has_calls([

View File

@ -0,0 +1,7 @@
---
upgrade:
In this release, the operation create share from snapshot was improved
in the ZFSonLinux driver. Now, the operator using the ZFSonLinux driver
can create a share from snapshot in different pools or backends by
specifying the Manila API configuration option
[DEFAULT]/use_scheduler_creating_share_from_snapshot.