[ZFSonLinux] Add share migration support
Add support of share migration feature to ZFsonLinux driver. Implements BP zfsonlinux-share-migration Change-Id: If9e1fec9a6d4f96d2bb5c176c6249fafc7e3e891
This commit is contained in:
parent
6dad1666de
commit
976e2d58cb
@ -174,6 +174,7 @@ elif [[ "$DRIVER" == "zfsonlinux" ]]; then
|
|||||||
RUN_MANILA_MANAGE_TESTS=True
|
RUN_MANILA_MANAGE_TESTS=True
|
||||||
RUN_MANILA_MANAGE_SNAPSHOT_TESTS=True
|
RUN_MANILA_MANAGE_SNAPSHOT_TESTS=True
|
||||||
iniset $TEMPEST_CONFIG share run_host_assisted_migration_tests False
|
iniset $TEMPEST_CONFIG share run_host_assisted_migration_tests False
|
||||||
|
iniset $TEMPEST_CONFIG share run_driver_assisted_migration_tests True
|
||||||
iniset $TEMPEST_CONFIG share run_quota_tests True
|
iniset $TEMPEST_CONFIG share run_quota_tests True
|
||||||
iniset $TEMPEST_CONFIG share run_replication_tests True
|
iniset $TEMPEST_CONFIG share run_replication_tests True
|
||||||
iniset $TEMPEST_CONFIG share run_shrink_tests True
|
iniset $TEMPEST_CONFIG share run_shrink_tests True
|
||||||
|
@ -141,7 +141,7 @@ elif [[ "$DRIVER" == "zfsonlinux" ]]; then
|
|||||||
# replication tests run faster. The default is 300, which is greater than
|
# replication tests run faster. The default is 300, which is greater than
|
||||||
# the build timeout for ZFS on the gate.
|
# the build timeout for ZFS on the gate.
|
||||||
echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path
|
echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path
|
||||||
echo "MANILA_ZFSONLINUX_USE_SSH=True" >> $localrc_path
|
echo "MANILA_ZFSONLINUX_USE_SSH=False" >> $localrc_path
|
||||||
elif [[ "$DRIVER" == "container" ]]; then
|
elif [[ "$DRIVER" == "container" ]]; then
|
||||||
echo "SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver" >> $localrc_path
|
echo "SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver" >> $localrc_path
|
||||||
echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
|
echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
|
||||||
|
@ -144,6 +144,9 @@ zpool: CommandFilter, zpool, root
|
|||||||
# manila/share/drivers/zfsonlinux/utils.py
|
# manila/share/drivers/zfsonlinux/utils.py
|
||||||
zfs: CommandFilter, zfs, root
|
zfs: CommandFilter, zfs, root
|
||||||
|
|
||||||
|
# manila/share/drivers/zfsonlinux/driver.py
|
||||||
|
kill: CommandFilter, kill, root
|
||||||
|
|
||||||
# manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s'
|
# manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s'
|
||||||
ls: CommandFilter, ls, root
|
ls: CommandFilter, ls, root
|
||||||
|
|
||||||
|
@ -30,8 +30,10 @@ from oslo_utils import timeutils
|
|||||||
from manila.common import constants
|
from manila.common import constants
|
||||||
from manila import exception
|
from manila import exception
|
||||||
from manila.i18n import _, _LI, _LW
|
from manila.i18n import _, _LI, _LW
|
||||||
|
from manila.share import configuration
|
||||||
from manila.share import driver
|
from manila.share import driver
|
||||||
from manila.share.drivers.zfsonlinux import utils as zfs_utils
|
from manila.share.drivers.zfsonlinux import utils as zfs_utils
|
||||||
|
from manila.share.manager import share_manager_opts # noqa
|
||||||
from manila.share import share_types
|
from manila.share import share_types
|
||||||
from manila.share import utils as share_utils
|
from manila.share import utils as share_utils
|
||||||
from manila import utils
|
from manila import utils
|
||||||
@ -109,6 +111,11 @@ zfsonlinux_opts = [
|
|||||||
required=True,
|
required=True,
|
||||||
default="tmp_snapshot_for_replication_",
|
default="tmp_snapshot_for_replication_",
|
||||||
help="Set snapshot prefix for usage in ZFS replication. Required."),
|
help="Set snapshot prefix for usage in ZFS replication. Required."),
|
||||||
|
cfg.StrOpt(
|
||||||
|
"zfs_migration_snapshot_prefix",
|
||||||
|
required=True,
|
||||||
|
default="tmp_snapshot_for_share_migration_",
|
||||||
|
help="Set snapshot prefix for usage in ZFS migration. Required."),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -119,7 +126,8 @@ LOG = log.getLogger(__name__)
|
|||||||
def ensure_share_server_not_provided(f):
|
def ensure_share_server_not_provided(f):
|
||||||
|
|
||||||
def wrap(self, context, *args, **kwargs):
|
def wrap(self, context, *args, **kwargs):
|
||||||
server = kwargs.get('share_server')
|
server = kwargs.get(
|
||||||
|
"share_server", kwargs.get("destination_share_server"))
|
||||||
if server:
|
if server:
|
||||||
raise exception.InvalidInput(
|
raise exception.InvalidInput(
|
||||||
reason=_("Share server handling is not available. "
|
reason=_("Share server handling is not available. "
|
||||||
@ -131,6 +139,27 @@ def ensure_share_server_not_provided(f):
|
|||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend_configuration(backend_name):
|
||||||
|
config_stanzas = CONF.list_all_sections()
|
||||||
|
if backend_name not in config_stanzas:
|
||||||
|
msg = _("Could not find backend stanza %(backend_name)s in "
|
||||||
|
"configuration which is required for share replication and "
|
||||||
|
"migration. Available stanzas are %(stanzas)s")
|
||||||
|
params = {
|
||||||
|
"stanzas": config_stanzas,
|
||||||
|
"backend_name": backend_name,
|
||||||
|
}
|
||||||
|
raise exception.BadConfigurationException(reason=msg % params)
|
||||||
|
|
||||||
|
config = configuration.Configuration(
|
||||||
|
driver.share_opts, config_group=backend_name)
|
||||||
|
config.append_config_values(zfsonlinux_opts)
|
||||||
|
config.append_config_values(share_manager_opts)
|
||||||
|
config.append_config_values(driver.ssh_opts)
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@ -138,6 +167,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
[False], *args, config_opts=[zfsonlinux_opts], **kwargs)
|
[False], *args, config_opts=[zfsonlinux_opts], **kwargs)
|
||||||
self.replica_snapshot_prefix = (
|
self.replica_snapshot_prefix = (
|
||||||
self.configuration.zfs_replica_snapshot_prefix)
|
self.configuration.zfs_replica_snapshot_prefix)
|
||||||
|
self.migration_snapshot_prefix = (
|
||||||
|
self.configuration.zfs_migration_snapshot_prefix)
|
||||||
self.backend_name = self.configuration.safe_get(
|
self.backend_name = self.configuration.safe_get(
|
||||||
'share_backend_name') or 'ZFSonLinux'
|
'share_backend_name') or 'ZFSonLinux'
|
||||||
self.zpool_list = self._get_zpool_list()
|
self.zpool_list = self._get_zpool_list()
|
||||||
@ -151,6 +182,29 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
# Set config based capabilities
|
# Set config based capabilities
|
||||||
self._init_common_capabilities()
|
self._init_common_capabilities()
|
||||||
|
|
||||||
|
self._shell_executors = {}
|
||||||
|
|
||||||
|
def _get_shell_executor_by_host(self, host):
|
||||||
|
backend_name = share_utils.extract_host(host, level='backend_name')
|
||||||
|
if backend_name in CONF.enabled_share_backends:
|
||||||
|
# Return executor of this host
|
||||||
|
return self.execute
|
||||||
|
elif backend_name not in self._shell_executors:
|
||||||
|
config = get_backend_configuration(backend_name)
|
||||||
|
self._shell_executors[backend_name] = (
|
||||||
|
zfs_utils.get_remote_shell_executor(
|
||||||
|
ip=config.zfs_service_ip,
|
||||||
|
port=22,
|
||||||
|
conn_timeout=config.ssh_conn_timeout,
|
||||||
|
login=config.zfs_ssh_username,
|
||||||
|
password=config.zfs_ssh_user_password,
|
||||||
|
privatekey=config.zfs_ssh_private_key_path,
|
||||||
|
max_size=10,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# Return executor of remote host
|
||||||
|
return self._shell_executors[backend_name]
|
||||||
|
|
||||||
def _init_common_capabilities(self):
|
def _init_common_capabilities(self):
|
||||||
self.common_capabilities = {}
|
self.common_capabilities = {}
|
||||||
if 'dedup=on' in self.dataset_creation_options:
|
if 'dedup=on' in self.dataset_creation_options:
|
||||||
@ -432,7 +486,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
share['id'], {
|
share['id'], {
|
||||||
'entity_type': 'share',
|
'entity_type': 'share',
|
||||||
'dataset_name': dataset_name,
|
'dataset_name': dataset_name,
|
||||||
'ssh_cmd': ssh_cmd, # used in replication
|
'ssh_cmd': ssh_cmd, # used with replication and migration
|
||||||
'pool_name': pool_name, # used in replication
|
'pool_name': pool_name, # used in replication
|
||||||
'used_options': ' '.join(options),
|
'used_options': ' '.join(options),
|
||||||
}
|
}
|
||||||
@ -463,7 +517,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
|
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
|
||||||
snapshots = self.parse_zfs_answer(out)
|
snapshots = self.parse_zfs_answer(out)
|
||||||
full_snapshot_prefix = (
|
full_snapshot_prefix = (
|
||||||
dataset_name + '@' + self.replica_snapshot_prefix)
|
dataset_name + '@')
|
||||||
for snap in snapshots:
|
for snap in snapshots:
|
||||||
if full_snapshot_prefix in snap['NAME']:
|
if full_snapshot_prefix in snap['NAME']:
|
||||||
self._delete_dataset_or_snapshot_with_retry(snap['NAME'])
|
self._delete_dataset_or_snapshot_with_retry(snap['NAME'])
|
||||||
@ -626,8 +680,10 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
delete_rules, share_server=None):
|
delete_rules, share_server=None):
|
||||||
"""Updates access rules for given share."""
|
"""Updates access rules for given share."""
|
||||||
dataset_name = self._get_dataset_name(share)
|
dataset_name = self._get_dataset_name(share)
|
||||||
|
executor = self._get_shell_executor_by_host(share['host'])
|
||||||
return self._get_share_helper(share['share_proto']).update_access(
|
return self._get_share_helper(share['share_proto']).update_access(
|
||||||
dataset_name, access_rules, add_rules, delete_rules)
|
dataset_name, access_rules, add_rules, delete_rules,
|
||||||
|
executor=executor)
|
||||||
|
|
||||||
def manage_existing(self, share, driver_options):
|
def manage_existing(self, share, driver_options):
|
||||||
"""Manage existing ZFS dataset as manila share.
|
"""Manage existing ZFS dataset as manila share.
|
||||||
@ -793,6 +849,22 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
msg = _("Active replica not found.")
|
msg = _("Active replica not found.")
|
||||||
raise exception.ReplicationException(reason=msg)
|
raise exception.ReplicationException(reason=msg)
|
||||||
|
|
||||||
|
def _get_migration_snapshot_prefix(self, share_instance):
|
||||||
|
"""Returns migration-based snapshot prefix."""
|
||||||
|
migration_snapshot_prefix = "%s_%s" % (
|
||||||
|
self.migration_snapshot_prefix,
|
||||||
|
share_instance['id'].replace('-', '_'))
|
||||||
|
return migration_snapshot_prefix
|
||||||
|
|
||||||
|
def _get_migration_snapshot_tag(self, share_instance):
|
||||||
|
"""Returns migration- and time-based snapshot tag."""
|
||||||
|
current_time = timeutils.utcnow().isoformat()
|
||||||
|
snapshot_tag = "%s_time_%s" % (
|
||||||
|
self._get_migration_snapshot_prefix(share_instance), current_time)
|
||||||
|
snapshot_tag = (
|
||||||
|
snapshot_tag.replace('-', '_').replace('.', '_').replace(':', '_'))
|
||||||
|
return snapshot_tag
|
||||||
|
|
||||||
@ensure_share_server_not_provided
|
@ensure_share_server_not_provided
|
||||||
def create_replica(self, context, replica_list, new_replica,
|
def create_replica(self, context, replica_list, new_replica,
|
||||||
access_rules, replica_snapshots, share_server=None):
|
access_rules, replica_snapshots, share_server=None):
|
||||||
@ -1279,3 +1351,185 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
|||||||
return_dict.update({'status': constants.STATUS_ERROR})
|
return_dict.update({'status': constants.STATUS_ERROR})
|
||||||
|
|
||||||
return return_dict
|
return return_dict
|
||||||
|
|
||||||
|
@ensure_share_server_not_provided
|
||||||
|
def migration_check_compatibility(
|
||||||
|
self, context, source_share, destination_share,
|
||||||
|
share_server=None, destination_share_server=None):
|
||||||
|
"""Is called to test compatibility with destination backend."""
|
||||||
|
backend_name = share_utils.extract_host(
|
||||||
|
destination_share['host'], level='backend_name')
|
||||||
|
config = get_backend_configuration(backend_name)
|
||||||
|
compatible = self.configuration.share_driver == config.share_driver
|
||||||
|
return {
|
||||||
|
'compatible': compatible,
|
||||||
|
'writable': False,
|
||||||
|
'preserve_metadata': True,
|
||||||
|
'nondisruptive': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
@ensure_share_server_not_provided
|
||||||
|
def migration_start(
|
||||||
|
self, context, source_share, destination_share,
|
||||||
|
share_server=None, destination_share_server=None):
|
||||||
|
"""Is called to start share migration."""
|
||||||
|
|
||||||
|
src_dataset_name = self.private_storage.get(
|
||||||
|
source_share['id'], 'dataset_name')
|
||||||
|
dst_dataset_name = self._get_dataset_name(destination_share)
|
||||||
|
ssh_cmd = '%(username)s@%(host)s' % {
|
||||||
|
'username': self.configuration.zfs_ssh_username,
|
||||||
|
'host': self.service_ip,
|
||||||
|
}
|
||||||
|
snapshot_tag = self._get_migration_snapshot_tag(destination_share)
|
||||||
|
src_snapshot_name = (
|
||||||
|
'%(dataset_name)s@%(snapshot_tag)s' % {
|
||||||
|
'snapshot_tag': snapshot_tag,
|
||||||
|
'dataset_name': src_dataset_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save valuable data to DB
|
||||||
|
self.private_storage.update(source_share['id'], {
|
||||||
|
'migr_snapshot_tag': snapshot_tag,
|
||||||
|
})
|
||||||
|
self.private_storage.update(destination_share['id'], {
|
||||||
|
'entity_type': 'share',
|
||||||
|
'dataset_name': dst_dataset_name,
|
||||||
|
'ssh_cmd': ssh_cmd,
|
||||||
|
'pool_name': share_utils.extract_host(
|
||||||
|
destination_share['host'], level='pool'),
|
||||||
|
'migr_snapshot_tag': snapshot_tag,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Create temporary snapshot on src host.
|
||||||
|
self.execute('sudo', 'zfs', 'snapshot', src_snapshot_name)
|
||||||
|
|
||||||
|
# Send/receive temporary snapshot
|
||||||
|
cmd = (
|
||||||
|
'nohup '
|
||||||
|
'sudo zfs send -vDR ' + src_snapshot_name + ' '
|
||||||
|
'| ssh ' + ssh_cmd + ' '
|
||||||
|
'sudo zfs receive -v ' + dst_dataset_name + ' '
|
||||||
|
'& exit 0'
|
||||||
|
)
|
||||||
|
# TODO(vponomaryov): following works only
|
||||||
|
# when config option zfs_use_ssh is set to "False". Because
|
||||||
|
# SSH coneector does not support that "shell=True" feature that is
|
||||||
|
# required in current situation.
|
||||||
|
self.execute(cmd, shell=True)
|
||||||
|
|
||||||
|
@ensure_share_server_not_provided
|
||||||
|
def migration_continue(
|
||||||
|
self, context, source_share, destination_share,
|
||||||
|
share_server=None, destination_share_server=None):
|
||||||
|
"""Is called in source share's backend to continue migration."""
|
||||||
|
|
||||||
|
snapshot_tag = self.private_storage.get(
|
||||||
|
destination_share['id'], 'migr_snapshot_tag')
|
||||||
|
|
||||||
|
out, err = self.execute('ps', 'aux')
|
||||||
|
if not '@%s' % snapshot_tag in out:
|
||||||
|
dst_dataset_name = self.private_storage.get(
|
||||||
|
destination_share['id'], 'dataset_name')
|
||||||
|
try:
|
||||||
|
self.execute(
|
||||||
|
'sudo', 'zfs', 'get', 'quota', dst_dataset_name,
|
||||||
|
executor=self._get_shell_executor_by_host(
|
||||||
|
destination_share['host']),
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
except exception.ProcessExecutionError as e:
|
||||||
|
raise exception.ZFSonLinuxException(msg=_(
|
||||||
|
'Migration process is absent and dst dataset '
|
||||||
|
'returned following error: %s') % e)
|
||||||
|
|
||||||
|
@ensure_share_server_not_provided
|
||||||
|
def migration_complete(
|
||||||
|
self, context, source_share, destination_share,
|
||||||
|
share_server=None, destination_share_server=None):
|
||||||
|
"""Is called to perform 2nd phase of driver migration of a given share.
|
||||||
|
|
||||||
|
"""
|
||||||
|
dst_dataset_name = self.private_storage.get(
|
||||||
|
destination_share['id'], 'dataset_name')
|
||||||
|
snapshot_tag = self.private_storage.get(
|
||||||
|
destination_share['id'], 'migr_snapshot_tag')
|
||||||
|
dst_snapshot_name = (
|
||||||
|
'%(dataset_name)s@%(snapshot_tag)s' % {
|
||||||
|
'snapshot_tag': snapshot_tag,
|
||||||
|
'dataset_name': dst_dataset_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
dst_executor = self._get_shell_executor_by_host(
|
||||||
|
destination_share['host'])
|
||||||
|
|
||||||
|
# Destroy temporary migration snapshot on dst host
|
||||||
|
self.execute(
|
||||||
|
'sudo', 'zfs', 'destroy', dst_snapshot_name,
|
||||||
|
executor=dst_executor,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get export locations of new share instance
|
||||||
|
export_locations = self._get_share_helper(
|
||||||
|
destination_share['share_proto']).create_exports(
|
||||||
|
dst_dataset_name,
|
||||||
|
executor=dst_executor)
|
||||||
|
|
||||||
|
# Destroy src share and temporary migration snapshot on src (this) host
|
||||||
|
self.delete_share(context, source_share)
|
||||||
|
|
||||||
|
return export_locations
|
||||||
|
|
||||||
|
@ensure_share_server_not_provided
|
||||||
|
def migration_cancel(
|
||||||
|
self, context, source_share, destination_share,
|
||||||
|
share_server=None, destination_share_server=None):
|
||||||
|
"""Is called to cancel driver migration."""
|
||||||
|
|
||||||
|
src_dataset_name = self.private_storage.get(
|
||||||
|
source_share['id'], 'dataset_name')
|
||||||
|
dst_dataset_name = self.private_storage.get(
|
||||||
|
destination_share['id'], 'dataset_name')
|
||||||
|
ssh_cmd = self.private_storage.get(
|
||||||
|
destination_share['id'], 'ssh_cmd')
|
||||||
|
snapshot_tag = self.private_storage.get(
|
||||||
|
destination_share['id'], 'migr_snapshot_tag')
|
||||||
|
|
||||||
|
# Kill migration process if exists
|
||||||
|
try:
|
||||||
|
out, err = self.execute('ps', 'aux')
|
||||||
|
lines = out.split('\n')
|
||||||
|
for line in lines:
|
||||||
|
if '@%s' % snapshot_tag in line:
|
||||||
|
migr_pid = [
|
||||||
|
x for x in line.strip().split(' ') if x != ''][1]
|
||||||
|
self.execute('sudo', 'kill', '-9', migr_pid)
|
||||||
|
except exception.ProcessExecutionError as e:
|
||||||
|
LOG.warning(_LW(
|
||||||
|
"Caught following error trying to kill migration process: %s"),
|
||||||
|
e)
|
||||||
|
|
||||||
|
# Sleep couple of seconds before destroying updated objects
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# Destroy snapshot on source host
|
||||||
|
self._delete_dataset_or_snapshot_with_retry(
|
||||||
|
src_dataset_name + '@' + snapshot_tag)
|
||||||
|
|
||||||
|
# Destroy dataset and its migration snapshot on destination host
|
||||||
|
try:
|
||||||
|
self.execute(
|
||||||
|
'ssh', ssh_cmd,
|
||||||
|
'sudo', 'zfs', 'destroy', '-r', dst_dataset_name,
|
||||||
|
)
|
||||||
|
except exception.ProcessExecutionError as e:
|
||||||
|
LOG.warning(_LW(
|
||||||
|
"Failed to destroy destination dataset with following error: "
|
||||||
|
"%s"),
|
||||||
|
e)
|
||||||
|
|
||||||
|
LOG.debug(
|
||||||
|
"Migration of share with ID '%s' has been canceled." %
|
||||||
|
source_share["id"])
|
||||||
|
@ -50,13 +50,27 @@ def zfs_dataset_synchronized(f):
|
|||||||
return wrapped_func
|
return wrapped_func
|
||||||
|
|
||||||
|
|
||||||
|
def get_remote_shell_executor(
|
||||||
|
ip, port, conn_timeout, login=None, password=None, privatekey=None,
|
||||||
|
max_size=10):
|
||||||
|
return ganesha_utils.SSHExecutor(
|
||||||
|
ip=ip,
|
||||||
|
port=port,
|
||||||
|
conn_timeout=conn_timeout,
|
||||||
|
login=login,
|
||||||
|
password=password,
|
||||||
|
privatekey=privatekey,
|
||||||
|
max_size=max_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ExecuteMixin(driver.ExecuteMixin):
|
class ExecuteMixin(driver.ExecuteMixin):
|
||||||
|
|
||||||
def init_execute_mixin(self, *args, **kwargs):
|
def init_execute_mixin(self, *args, **kwargs):
|
||||||
"""Init method for mixin called in the end of driver's __init__()."""
|
"""Init method for mixin called in the end of driver's __init__()."""
|
||||||
super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs)
|
super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs)
|
||||||
if self.configuration.zfs_use_ssh:
|
if self.configuration.zfs_use_ssh:
|
||||||
self.ssh_executor = ganesha_utils.SSHExecutor(
|
self.ssh_executor = get_remote_shell_executor(
|
||||||
ip=self.configuration.zfs_service_ip,
|
ip=self.configuration.zfs_service_ip,
|
||||||
port=22,
|
port=22,
|
||||||
conn_timeout=self.configuration.ssh_conn_timeout,
|
conn_timeout=self.configuration.ssh_conn_timeout,
|
||||||
@ -70,9 +84,13 @@ class ExecuteMixin(driver.ExecuteMixin):
|
|||||||
|
|
||||||
def execute(self, *cmd, **kwargs):
|
def execute(self, *cmd, **kwargs):
|
||||||
"""Common interface for running shell commands."""
|
"""Common interface for running shell commands."""
|
||||||
executor = self._execute
|
if kwargs.get('executor'):
|
||||||
if self.ssh_executor:
|
executor = kwargs.get('executor')
|
||||||
|
elif self.ssh_executor:
|
||||||
executor = self.ssh_executor
|
executor = self.ssh_executor
|
||||||
|
else:
|
||||||
|
executor = self._execute
|
||||||
|
kwargs.pop('executor', None)
|
||||||
if cmd[0] == 'sudo':
|
if cmd[0] == 'sudo':
|
||||||
kwargs['run_as_root'] = True
|
kwargs['run_as_root'] = True
|
||||||
cmd = cmd[1:]
|
cmd = cmd[1:]
|
||||||
@ -88,11 +106,13 @@ class ExecuteMixin(driver.ExecuteMixin):
|
|||||||
LOG.warning(_LW("Failed to run command, got error: %s"), e)
|
LOG.warning(_LW("Failed to run command, got error: %s"), e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _get_option(self, resource_name, option_name, pool_level=False):
|
def _get_option(self, resource_name, option_name, pool_level=False,
|
||||||
|
**kwargs):
|
||||||
"""Returns value of requested zpool or zfs dataset option."""
|
"""Returns value of requested zpool or zfs dataset option."""
|
||||||
app = 'zpool' if pool_level else 'zfs'
|
app = 'zpool' if pool_level else 'zfs'
|
||||||
|
|
||||||
out, err = self.execute('sudo', app, 'get', option_name, resource_name)
|
out, err = self.execute(
|
||||||
|
'sudo', app, 'get', option_name, resource_name, **kwargs)
|
||||||
|
|
||||||
data = self.parse_zfs_answer(out)
|
data = self.parse_zfs_answer(out)
|
||||||
option = data[0]['VALUE']
|
option = data[0]['VALUE']
|
||||||
@ -112,13 +132,13 @@ class ExecuteMixin(driver.ExecuteMixin):
|
|||||||
data.append(dict(zip(keys, values)))
|
data.append(dict(zip(keys, values)))
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def get_zpool_option(self, zpool_name, option_name):
|
def get_zpool_option(self, zpool_name, option_name, **kwargs):
|
||||||
"""Returns value of requested zpool option."""
|
"""Returns value of requested zpool option."""
|
||||||
return self._get_option(zpool_name, option_name, True)
|
return self._get_option(zpool_name, option_name, True, **kwargs)
|
||||||
|
|
||||||
def get_zfs_option(self, dataset_name, option_name):
|
def get_zfs_option(self, dataset_name, option_name, **kwargs):
|
||||||
"""Returns value of requested zfs dataset option."""
|
"""Returns value of requested zfs dataset option."""
|
||||||
return self._get_option(dataset_name, option_name, False)
|
return self._get_option(dataset_name, option_name, False, **kwargs)
|
||||||
|
|
||||||
def zfs(self, *cmd, **kwargs):
|
def zfs(self, *cmd, **kwargs):
|
||||||
"""ZFS shell commands executor."""
|
"""ZFS shell commands executor."""
|
||||||
@ -148,20 +168,20 @@ class NASHelperBase(object):
|
|||||||
"""Performs checks for required stuff."""
|
"""Performs checks for required stuff."""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def create_exports(self, dataset_name):
|
def create_exports(self, dataset_name, executor):
|
||||||
"""Creates share exports."""
|
"""Creates share exports."""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_exports(self, dataset_name, service):
|
def get_exports(self, dataset_name, service, executor):
|
||||||
"""Gets/reads share exports."""
|
"""Gets/reads share exports."""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def remove_exports(self, dataset_name):
|
def remove_exports(self, dataset_name, executor):
|
||||||
"""Removes share exports."""
|
"""Removes share exports."""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def update_access(self, dataset_name, access_rules, add_rules,
|
def update_access(self, dataset_name, access_rules, add_rules,
|
||||||
delete_rules):
|
delete_rules, executor):
|
||||||
"""Update access rules for specified ZFS dataset."""
|
"""Update access rules for specified ZFS dataset."""
|
||||||
|
|
||||||
|
|
||||||
@ -199,13 +219,17 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
|||||||
LOG.exception(msg, e)
|
LOG.exception(msg, e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def create_exports(self, dataset_name):
|
# Init that class instance attribute on start of manila-share service
|
||||||
"""Creates NFS share exports for given ZFS dataset."""
|
self.is_kernel_version
|
||||||
return self.get_exports(dataset_name)
|
|
||||||
|
|
||||||
def get_exports(self, dataset_name):
|
def create_exports(self, dataset_name, executor=None):
|
||||||
|
"""Creates NFS share exports for given ZFS dataset."""
|
||||||
|
return self.get_exports(dataset_name, executor=executor)
|
||||||
|
|
||||||
|
def get_exports(self, dataset_name, executor=None):
|
||||||
"""Gets/reads NFS share export for given ZFS dataset."""
|
"""Gets/reads NFS share export for given ZFS dataset."""
|
||||||
mountpoint = self.get_zfs_option(dataset_name, 'mountpoint')
|
mountpoint = self.get_zfs_option(
|
||||||
|
dataset_name, 'mountpoint', executor=executor)
|
||||||
return [
|
return [
|
||||||
{
|
{
|
||||||
"path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
|
"path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
|
||||||
@ -218,12 +242,13 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
@zfs_dataset_synchronized
|
@zfs_dataset_synchronized
|
||||||
def remove_exports(self, dataset_name):
|
def remove_exports(self, dataset_name, executor=None):
|
||||||
"""Removes NFS share exports for given ZFS dataset."""
|
"""Removes NFS share exports for given ZFS dataset."""
|
||||||
sharenfs = self.get_zfs_option(dataset_name, 'sharenfs')
|
sharenfs = self.get_zfs_option(
|
||||||
|
dataset_name, 'sharenfs', executor=executor)
|
||||||
if sharenfs == 'off':
|
if sharenfs == 'off':
|
||||||
return
|
return
|
||||||
self.zfs("set", "sharenfs=off", dataset_name)
|
self.zfs("set", "sharenfs=off", dataset_name, executor=executor)
|
||||||
|
|
||||||
def _get_parsed_access_to(self, access_to):
|
def _get_parsed_access_to(self, access_to):
|
||||||
netmask = utils.cidr_to_netmask(access_to)
|
netmask = utils.cidr_to_netmask(access_to)
|
||||||
@ -233,7 +258,7 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
|||||||
|
|
||||||
@zfs_dataset_synchronized
|
@zfs_dataset_synchronized
|
||||||
def update_access(self, dataset_name, access_rules, add_rules,
|
def update_access(self, dataset_name, access_rules, add_rules,
|
||||||
delete_rules, make_all_ro=False):
|
delete_rules, make_all_ro=False, executor=None):
|
||||||
"""Update access rules for given ZFS dataset exported as NFS share."""
|
"""Update access rules for given ZFS dataset exported as NFS share."""
|
||||||
rw_rules = []
|
rw_rules = []
|
||||||
ro_rules = []
|
ro_rules = []
|
||||||
@ -267,7 +292,8 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
|||||||
rules.append("%s:ro,no_root_squash" % rule)
|
rules.append("%s:ro,no_root_squash" % rule)
|
||||||
rules_str = "sharenfs=" + (' '.join(rules) or 'off')
|
rules_str = "sharenfs=" + (' '.join(rules) or 'off')
|
||||||
|
|
||||||
out, err = self.zfs('list', '-r', dataset_name.split('/')[0])
|
out, err = self.zfs(
|
||||||
|
'list', '-r', dataset_name.split('/')[0], executor=executor)
|
||||||
data = self.parse_zfs_answer(out)
|
data = self.parse_zfs_answer(out)
|
||||||
for datum in data:
|
for datum in data:
|
||||||
if datum['NAME'] == dataset_name:
|
if datum['NAME'] == dataset_name:
|
||||||
@ -287,4 +313,7 @@ class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
|
|||||||
continue
|
continue
|
||||||
access_to = self._get_parsed_access_to(rule['access_to'])
|
access_to = self._get_parsed_access_to(rule['access_to'])
|
||||||
export_location = access_to + ':' + mountpoint
|
export_location = access_to + ':' + mountpoint
|
||||||
self.execute('sudo', 'exportfs', '-u', export_location)
|
self.execute(
|
||||||
|
'sudo', 'exportfs', '-u', export_location,
|
||||||
|
executor=executor,
|
||||||
|
)
|
||||||
|
@ -48,6 +48,8 @@ class FakeConfig(object):
|
|||||||
"zfs_ssh_private_key_path", '/fake/path')
|
"zfs_ssh_private_key_path", '/fake/path')
|
||||||
self.zfs_replica_snapshot_prefix = kwargs.get(
|
self.zfs_replica_snapshot_prefix = kwargs.get(
|
||||||
"zfs_replica_snapshot_prefix", "tmp_snapshot_for_replication_")
|
"zfs_replica_snapshot_prefix", "tmp_snapshot_for_replication_")
|
||||||
|
self.zfs_migration_snapshot_prefix = kwargs.get(
|
||||||
|
"zfs_migration_snapshot_prefix", "tmp_snapshot_for_migration_")
|
||||||
self.zfs_dataset_creation_options = kwargs.get(
|
self.zfs_dataset_creation_options = kwargs.get(
|
||||||
"zfs_dataset_creation_options", ["fook=foov", "bark=barv"])
|
"zfs_dataset_creation_options", ["fook=foov", "bark=barv"])
|
||||||
self.network_config_group = kwargs.get(
|
self.network_config_group = kwargs.get(
|
||||||
@ -1034,12 +1036,18 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
|||||||
def test_update_access(self):
|
def test_update_access(self):
|
||||||
self.mock_object(self.driver, '_get_dataset_name')
|
self.mock_object(self.driver, '_get_dataset_name')
|
||||||
mock_helper = self.mock_object(self.driver, '_get_share_helper')
|
mock_helper = self.mock_object(self.driver, '_get_share_helper')
|
||||||
share = {'share_proto': 'NFS'}
|
mock_shell_executor = self.mock_object(
|
||||||
|
self.driver, '_get_shell_executor_by_host')
|
||||||
|
share = {
|
||||||
|
'share_proto': 'NFS',
|
||||||
|
'host': 'foo_host@bar_backend@quuz_pool',
|
||||||
|
}
|
||||||
|
|
||||||
result = self.driver.update_access(
|
result = self.driver.update_access(
|
||||||
'fake_context', share, [1], [2], [3])
|
'fake_context', share, [1], [2], [3])
|
||||||
|
|
||||||
self.driver._get_dataset_name.assert_called_once_with(share)
|
self.driver._get_dataset_name.assert_called_once_with(share)
|
||||||
|
mock_shell_executor.assert_called_once_with(share['host'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
mock_helper.return_value.update_access.return_value,
|
mock_helper.return_value.update_access.return_value,
|
||||||
result,
|
result,
|
||||||
|
@ -77,6 +77,16 @@ class ExecuteMixinTestCase(test.TestCase):
|
|||||||
max_size=10,
|
max_size=10,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_execute_with_provided_executor(self):
|
||||||
|
self.mock_object(self.driver, '_execute')
|
||||||
|
fake_executor = mock.Mock()
|
||||||
|
|
||||||
|
self.driver.execute('fake', '--foo', '--bar', executor=fake_executor)
|
||||||
|
|
||||||
|
self.assertFalse(self.driver._execute.called)
|
||||||
|
self.assertFalse(self.ssh_executor.called)
|
||||||
|
fake_executor.assert_called_once_with('fake', '--foo', '--bar')
|
||||||
|
|
||||||
def test_local_shell_execute(self):
|
def test_local_shell_execute(self):
|
||||||
self.mock_object(self.driver, '_execute')
|
self.mock_object(self.driver, '_execute')
|
||||||
|
|
||||||
@ -264,6 +274,7 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
])
|
])
|
||||||
|
|
||||||
def test_is_kernel_version_true(self):
|
def test_is_kernel_version_true(self):
|
||||||
|
delattr(self.helper, '_is_kernel_version')
|
||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
|
|
||||||
self.assertTrue(self.helper.is_kernel_version)
|
self.assertTrue(self.helper.is_kernel_version)
|
||||||
@ -273,6 +284,7 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
])
|
])
|
||||||
|
|
||||||
def test_is_kernel_version_false(self):
|
def test_is_kernel_version_false(self):
|
||||||
|
delattr(self.helper, '_is_kernel_version')
|
||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
zfs_utils.utils.execute.side_effect = (
|
zfs_utils.utils.execute.side_effect = (
|
||||||
exception.ProcessExecutionError('Fake'))
|
exception.ProcessExecutionError('Fake'))
|
||||||
@ -284,6 +296,7 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
])
|
])
|
||||||
|
|
||||||
def test_is_kernel_version_second_call(self):
|
def test_is_kernel_version_second_call(self):
|
||||||
|
delattr(self.helper, '_is_kernel_version')
|
||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
|
|
||||||
self.assertTrue(self.helper.is_kernel_version)
|
self.assertTrue(self.helper.is_kernel_version)
|
||||||
@ -317,7 +330,8 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
result = self.helper.get_exports('foo')
|
result = self.helper.get_exports('foo')
|
||||||
|
|
||||||
self.assertEqual(expected, result)
|
self.assertEqual(expected, result)
|
||||||
self.helper.get_zfs_option.assert_called_once_with('foo', 'mountpoint')
|
self.helper.get_zfs_option.assert_called_once_with(
|
||||||
|
'foo', 'mountpoint', executor=None)
|
||||||
|
|
||||||
def test_remove_exports(self):
|
def test_remove_exports(self):
|
||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
@ -326,7 +340,8 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.helper.remove_exports('foo')
|
self.helper.remove_exports('foo')
|
||||||
|
|
||||||
self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
|
self.helper.get_zfs_option.assert_called_once_with(
|
||||||
|
'foo', 'sharenfs', executor=None)
|
||||||
zfs_utils.utils.execute.assert_called_once_with(
|
zfs_utils.utils.execute.assert_called_once_with(
|
||||||
'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True)
|
'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True)
|
||||||
|
|
||||||
@ -337,7 +352,8 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.helper.remove_exports('foo')
|
self.helper.remove_exports('foo')
|
||||||
|
|
||||||
self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
|
self.helper.get_zfs_option.assert_called_once_with(
|
||||||
|
'foo', 'sharenfs', executor=None)
|
||||||
self.assertEqual(0, zfs_utils.utils.execute.call_count)
|
self.assertEqual(0, zfs_utils.utils.execute.call_count)
|
||||||
|
|
||||||
@ddt.data(
|
@ddt.data(
|
||||||
@ -357,6 +373,7 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
@ddt.unpack
|
@ddt.unpack
|
||||||
def test_update_access_rw_and_ro(self, modinfo_response, access_str,
|
def test_update_access_rw_and_ro(self, modinfo_response, access_str,
|
||||||
make_all_ro):
|
make_all_ro):
|
||||||
|
delattr(self.helper, '_is_kernel_version')
|
||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
dataset_name = 'zpoolz/foo_dataset_name/fake'
|
dataset_name = 'zpoolz/foo_dataset_name/fake'
|
||||||
zfs_utils.utils.execute.side_effect = [
|
zfs_utils.utils.execute.side_effect = [
|
||||||
@ -444,7 +461,6 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
self.helper.update_access(dataset_name, access_rules, [], [])
|
self.helper.update_access(dataset_name, access_rules, [], [])
|
||||||
|
|
||||||
zfs_utils.utils.execute.assert_has_calls([
|
zfs_utils.utils.execute.assert_has_calls([
|
||||||
mock.call('modinfo', 'zfs'),
|
|
||||||
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
|
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
|
||||||
])
|
])
|
||||||
zfs_utils.LOG.warning.assert_called_once_with(
|
zfs_utils.LOG.warning.assert_called_once_with(
|
||||||
@ -455,7 +471,6 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
zfs_utils.utils.execute.reset_mock()
|
zfs_utils.utils.execute.reset_mock()
|
||||||
dataset_name = 'zpoolz/foo_dataset_name/fake'
|
dataset_name = 'zpoolz/foo_dataset_name/fake'
|
||||||
zfs_utils.utils.execute.side_effect = [
|
zfs_utils.utils.execute.side_effect = [
|
||||||
('fake_modinfo_result', ''),
|
|
||||||
("""NAME USED AVAIL REFER MOUNTPOINT\n
|
("""NAME USED AVAIL REFER MOUNTPOINT\n
|
||||||
%s 2.58M 14.8G 27.5K /%s\n
|
%s 2.58M 14.8G 27.5K /%s\n
|
||||||
""" % (dataset_name, dataset_name), ''),
|
""" % (dataset_name, dataset_name), ''),
|
||||||
@ -465,7 +480,6 @@ class NFSviaZFSHelperTestCase(test.TestCase):
|
|||||||
self.helper.update_access(dataset_name, [], [], [])
|
self.helper.update_access(dataset_name, [], [], [])
|
||||||
|
|
||||||
zfs_utils.utils.execute.assert_has_calls([
|
zfs_utils.utils.execute.assert_has_calls([
|
||||||
mock.call('modinfo', 'zfs'),
|
|
||||||
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
|
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
|
||||||
mock.call('zfs', 'set', 'sharenfs=off', dataset_name,
|
mock.call('zfs', 'set', 'sharenfs=off', dataset_name,
|
||||||
run_as_root=True),
|
run_as_root=True),
|
||||||
|
Loading…
Reference in New Issue
Block a user