Merge "[ZFSonLinux] Fix share migration support"
This commit is contained in:
commit
29c05e72f5
@ -141,7 +141,7 @@ elif [[ "$DRIVER" == "zfsonlinux" ]]; then
|
|||||||
# replication tests run faster. The default is 300, which is greater than
|
# replication tests run faster. The default is 300, which is greater than
|
||||||
# the build timeout for ZFS on the gate.
|
# the build timeout for ZFS on the gate.
|
||||||
echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path
|
echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path
|
||||||
echo "MANILA_ZFSONLINUX_USE_SSH=False" >> $localrc_path
|
echo "MANILA_ZFSONLINUX_USE_SSH=True" >> $localrc_path
|
||||||
elif [[ "$DRIVER" == "container" ]]; then
|
elif [[ "$DRIVER" == "container" ]]; then
|
||||||
echo "SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver" >> $localrc_path
|
echo "SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver" >> $localrc_path
|
||||||
echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
|
echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
|
||||||
|
@ -19,6 +19,7 @@ and exports them as shares.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import math
|
import math
|
||||||
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
@ -1377,9 +1378,12 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
src_dataset_name = self.private_storage.get(
|
src_dataset_name = self.private_storage.get(
|
||||||
source_share['id'], 'dataset_name')
|
source_share['id'], 'dataset_name')
|
||||||
dst_dataset_name = self._get_dataset_name(destination_share)
|
dst_dataset_name = self._get_dataset_name(destination_share)
|
||||||
ssh_cmd = '%(username)s@%(host)s' % {
|
backend_name = share_utils.extract_host(
|
||||||
'username': self.configuration.zfs_ssh_username,
|
destination_share['host'], level='backend_name')
|
||||||
'host': self.service_ip,
|
config = get_backend_configuration(backend_name)
|
||||||
|
remote_ssh_cmd = '%(username)s@%(host)s' % {
|
||||||
|
'username': config.zfs_ssh_username,
|
||||||
|
'host': config.zfs_service_ip,
|
||||||
}
|
}
|
||||||
snapshot_tag = self._get_migration_snapshot_tag(destination_share)
|
snapshot_tag = self._get_migration_snapshot_tag(destination_share)
|
||||||
src_snapshot_name = (
|
src_snapshot_name = (
|
||||||
@ -1396,7 +1400,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
self.private_storage.update(destination_share['id'], {
|
self.private_storage.update(destination_share['id'], {
|
||||||
'entity_type': 'share',
|
'entity_type': 'share',
|
||||||
'dataset_name': dst_dataset_name,
|
'dataset_name': dst_dataset_name,
|
||||||
'ssh_cmd': ssh_cmd,
|
'ssh_cmd': remote_ssh_cmd,
|
||||||
'pool_name': share_utils.extract_host(
|
'pool_name': share_utils.extract_host(
|
||||||
destination_share['host'], level='pool'),
|
destination_share['host'], level='pool'),
|
||||||
'migr_snapshot_tag': snapshot_tag,
|
'migr_snapshot_tag': snapshot_tag,
|
||||||
@ -1407,17 +1411,17 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
|
|
||||||
# Send/receive temporary snapshot
|
# Send/receive temporary snapshot
|
||||||
cmd = (
|
cmd = (
|
||||||
'nohup '
|
|
||||||
'sudo zfs send -vDR ' + src_snapshot_name + ' '
|
'sudo zfs send -vDR ' + src_snapshot_name + ' '
|
||||||
'| ssh ' + ssh_cmd + ' '
|
'| ssh ' + remote_ssh_cmd + ' '
|
||||||
'sudo zfs receive -v ' + dst_dataset_name + ' '
|
'sudo zfs receive -v ' + dst_dataset_name
|
||||||
'& exit 0'
|
|
||||||
)
|
)
|
||||||
# TODO(vponomaryov): following works only
|
filename = dst_dataset_name.replace('/', '_')
|
||||||
# when config option zfs_use_ssh is set to "False". Because
|
with utils.tempdir() as tmpdir:
|
||||||
# SSH coneector does not support that "shell=True" feature that is
|
tmpfilename = os.path.join(tmpdir, '%s.sh' % filename)
|
||||||
# required in current situation.
|
with open(tmpfilename, "w") as migr_script:
|
||||||
self.execute(cmd, shell=True)
|
migr_script.write(cmd)
|
||||||
|
self.execute('sudo', 'chmod', '755', tmpfilename)
|
||||||
|
self.execute('nohup', tmpfilename, '&')
|
||||||
|
|
||||||
@ensure_share_server_not_provided
|
@ensure_share_server_not_provided
|
||||||
def migration_continue(
|
def migration_continue(
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
import ddt
|
import ddt
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from manila import context
|
from manila import context
|
||||||
@ -29,6 +30,7 @@ CONF = cfg.CONF
|
|||||||
class FakeConfig(object):
|
class FakeConfig(object):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.driver_handles_share_servers = False
|
self.driver_handles_share_servers = False
|
||||||
|
self.share_driver = 'fake_share_driver_name'
|
||||||
self.share_backend_name = 'FAKE_BACKEND_NAME'
|
self.share_backend_name = 'FAKE_BACKEND_NAME'
|
||||||
self.zfs_share_export_ip = kwargs.get(
|
self.zfs_share_export_ip = kwargs.get(
|
||||||
"zfs_share_export_ip", "1.1.1.1")
|
"zfs_share_export_ip", "1.1.1.1")
|
||||||
@ -2044,3 +2046,63 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
self.assertIn('id', result)
|
self.assertIn('id', result)
|
||||||
self.assertEqual(expected_status, result['status'])
|
self.assertEqual(expected_status, result['status'])
|
||||||
self.assertEqual(snapshot_instance['id'], result['id'])
|
self.assertEqual(snapshot_instance['id'], result['id'])
|
||||||
|
|
||||||
|
def test_migration_start(self):
|
||||||
|
src_share = {
|
||||||
|
'id': 'fake_src_share_id',
|
||||||
|
'host': 'foohost@foobackend#foopool',
|
||||||
|
}
|
||||||
|
src_dataset_name = 'foo_dataset_name'
|
||||||
|
dst_share = {
|
||||||
|
'id': 'fake_dst_share_id',
|
||||||
|
'host': 'barhost@barbackend#barpool',
|
||||||
|
}
|
||||||
|
dst_dataset_name = 'bar_dataset_name'
|
||||||
|
snapshot_tag = 'fake_migration_snapshot_tag'
|
||||||
|
self.mock_object(
|
||||||
|
self.driver,
|
||||||
|
'_get_dataset_name',
|
||||||
|
mock.Mock(return_value=dst_dataset_name))
|
||||||
|
self.mock_object(
|
||||||
|
self.driver,
|
||||||
|
'_get_migration_snapshot_tag',
|
||||||
|
mock.Mock(return_value=snapshot_tag))
|
||||||
|
self.mock_object(
|
||||||
|
zfs_driver,
|
||||||
|
'get_backend_configuration',
|
||||||
|
mock.Mock(return_value=type(
|
||||||
|
'FakeConfig', (object,), {
|
||||||
|
'zfs_ssh_username': (
|
||||||
|
self.driver.configuration.zfs_ssh_username),
|
||||||
|
'zfs_service_ip': self.driver.configuration.zfs_service_ip,
|
||||||
|
})))
|
||||||
|
self.mock_object(self.driver, 'execute')
|
||||||
|
self.driver.private_storage.update(
|
||||||
|
src_share['id'], {'dataset_name': src_dataset_name})
|
||||||
|
|
||||||
|
self.driver.migration_start(
|
||||||
|
self._context, src_share, dst_share)
|
||||||
|
|
||||||
|
src_snapshot_name = (
|
||||||
|
'%(dataset_name)s@%(snapshot_tag)s' % {
|
||||||
|
'snapshot_tag': snapshot_tag,
|
||||||
|
'dataset_name': src_dataset_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.driver.execute.assert_has_calls([
|
||||||
|
mock.call('sudo', 'zfs', 'snapshot', src_snapshot_name),
|
||||||
|
mock.call('sudo', 'chmod', '755', mock.ANY),
|
||||||
|
mock.call('nohup', mock.ANY, '&'),
|
||||||
|
])
|
||||||
|
self.driver._get_migration_snapshot_tag.assert_called_once_with(
|
||||||
|
dst_share)
|
||||||
|
self.driver._get_dataset_name.assert_called_once_with(
|
||||||
|
dst_share)
|
||||||
|
for k, v in (('dataset_name', dst_dataset_name),
|
||||||
|
('migr_snapshot_tag', snapshot_tag),
|
||||||
|
('pool_name', 'barpool'),
|
||||||
|
('ssh_cmd',
|
||||||
|
self.driver.configuration.zfs_ssh_username + '@' +
|
||||||
|
self.driver.configuration.zfs_service_ip)):
|
||||||
|
self.assertEqual(
|
||||||
|
v, self.driver.private_storage.get(dst_share['id'], k))
|
||||||
|
Loading…
Reference in New Issue
Block a user