glusterfs: volume mapped share layout
The volume management done by gluster_native has been isolated and captured in a separate layout class. gluster_native implements only {allow,deny}_access, for the rest it uses the layout code. Semantics is preserved with one difference: the Manila host now is assumed to be set up so that it can mount the GlusterFS volumes without any complications. Earlier we assumed to not have cert based access from Manila host, and turned therefore SSL off and on on the GlusterFS side. This does not make sense for the separate, layout-agnostic logic. (Nb. we already wanted to make the move to set this assumption, regardless of the layout work.) Partially implements bp modular-glusterfs-share-layouts Change-Id: I3cbc55eed0f61fe4808873f78811b6c3fd1c66aa
This commit is contained in:
parent
f697cc0df4
commit
bf3c40e439
|
@ -55,7 +55,7 @@ import manila.share.drivers.emc.plugins.isilon.isilon
|
|||
import manila.share.drivers.generic
|
||||
import manila.share.drivers.glusterfs
|
||||
import manila.share.drivers.glusterfs.layout
|
||||
import manila.share.drivers.glusterfs_native
|
||||
import manila.share.drivers.glusterfs.layout_volume
|
||||
import manila.share.drivers.hdfs.hdfs_native
|
||||
import manila.share.drivers.hds.sop
|
||||
import manila.share.drivers.hitachi.hds_hnas
|
||||
|
@ -114,8 +114,8 @@ _global_opt_lists = [
|
|||
manila.share.drivers.emc.driver.EMC_NAS_OPTS,
|
||||
manila.share.drivers.generic.share_opts,
|
||||
manila.share.drivers.glusterfs.GlusterfsManilaShare_opts,
|
||||
manila.share.drivers.glusterfs_native.glusterfs_native_manila_share_opts,
|
||||
manila.share.drivers.glusterfs.layout.glusterfs_share_layout_opts,
|
||||
manila.share.drivers.glusterfs.layout_volume.glusterfs_volume_mapped_opts,
|
||||
manila.share.drivers.hdfs.hdfs_native.hdfs_native_share_opts,
|
||||
manila.share.drivers.hds.sop.hdssop_share_opts,
|
||||
manila.share.drivers.hitachi.hds_hnas.hds_hnas_opts,
|
||||
|
|
|
@ -0,0 +1,589 @@
|
|||
# Copyright (c) 2015 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""GlusterFS volume mapped share layout."""
|
||||
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import shutil
|
||||
import string
|
||||
import tempfile
|
||||
import xml.etree.cElementTree as etree
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.i18n import _LW
|
||||
from manila.share.drivers.glusterfs import common
|
||||
from manila.share.drivers.glusterfs import layout
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
glusterfs_volume_mapped_opts = [
|
||||
cfg.ListOpt('glusterfs_servers',
|
||||
default=[],
|
||||
deprecated_name='glusterfs_targets',
|
||||
help='List of GlusterFS servers that can be used to create '
|
||||
'shares. Each GlusterFS server should be of the form '
|
||||
'[remoteuser@]<volserver>, and they are assumed to '
|
||||
'belong to distinct Gluster clusters.'),
|
||||
cfg.StrOpt('glusterfs_native_server_password',
|
||||
default=None,
|
||||
secret=True,
|
||||
help='Remote GlusterFS server node\'s login password. '
|
||||
'This is not required if '
|
||||
'\'glusterfs_native_path_to_private_key\' is '
|
||||
'configured.'),
|
||||
cfg.StrOpt('glusterfs_native_path_to_private_key',
|
||||
default=None,
|
||||
help='Path of Manila host\'s private SSH key file.'),
|
||||
cfg.StrOpt('glusterfs_volume_pattern',
|
||||
default=None,
|
||||
help='Regular expression template used to filter '
|
||||
'GlusterFS volumes for share creation. '
|
||||
'The regex template can optionally (ie. with support '
|
||||
'of the GlusterFS backend) contain the #{size} '
|
||||
'parameter which matches an integer (sequence of '
|
||||
'digits) in which case the value shall be interpreted as '
|
||||
'size of the volume in GB. Examples: '
|
||||
'"manila-share-volume-\d+$", '
|
||||
'"manila-share-volume-#{size}G-\d+$"; '
|
||||
'with matching volume names, respectively: '
|
||||
'"manila-share-volume-12", "manila-share-volume-3G-13". '
|
||||
'In latter example, the number that matches "#{size}", '
|
||||
'that is, 3, is an indication that the size of volume '
|
||||
'is 3G.'),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(glusterfs_volume_mapped_opts)
|
||||
|
||||
# The dict specifying named parameters
|
||||
# that can be used with glusterfs_volume_pattern
|
||||
# in #{<param>} format.
|
||||
# For each of them we give regex pattern it matches
|
||||
# and a transformer function ('trans') for the matched
|
||||
# string value.
|
||||
# Currently we handle only #{size}.
|
||||
PATTERN_DICT = {'size': {'pattern': '(?P<size>\d+)', 'trans': int}}
|
||||
|
||||
|
||||
class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
||||
|
||||
_snapshots_are_supported = True
|
||||
|
||||
def __init__(self, driver, *args, **kwargs):
|
||||
super(GlusterfsVolumeMappedLayout, self).__init__(
|
||||
driver, *args, **kwargs)
|
||||
self.gluster_used_vols = set()
|
||||
self.configuration.append_config_values(
|
||||
glusterfs_volume_mapped_opts)
|
||||
self.gluster_nosnap_vols_dict = {}
|
||||
self.volume_pattern = self._compile_volume_pattern()
|
||||
self.volume_pattern_keys = self.volume_pattern.groupindex.keys()
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
# format check for srvaddr
|
||||
self._glustermanager(srvaddr, False)
|
||||
self.glusterfs_versions = {}
|
||||
|
||||
def _compile_volume_pattern(self):
|
||||
"""Compile a RegexObject from the config specified regex template.
|
||||
|
||||
(cfg.glusterfs_volume_pattern)
|
||||
"""
|
||||
|
||||
subdict = {}
|
||||
for key, val in six.iteritems(PATTERN_DICT):
|
||||
subdict[key] = val['pattern']
|
||||
|
||||
# Using templates with placeholder syntax #{<var>}
|
||||
class CustomTemplate(string.Template):
|
||||
delimiter = '#'
|
||||
|
||||
volume_pattern = CustomTemplate(
|
||||
self.configuration.glusterfs_volume_pattern).substitute(
|
||||
subdict)
|
||||
return re.compile(volume_pattern)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Setup the GlusterFS volumes."""
|
||||
glusterfs_versions, exceptions = {}, {}
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
try:
|
||||
glusterfs_versions[srvaddr] = self._glustermanager(
|
||||
srvaddr, False).get_gluster_version()
|
||||
except exception.GlusterfsException as exc:
|
||||
exceptions[srvaddr] = six.text_type(exc)
|
||||
if exceptions:
|
||||
for srvaddr, excmsg in six.iteritems(exceptions):
|
||||
LOG.error(_LE("'gluster version' failed on server "
|
||||
"%(server)s with: %(message)s"),
|
||||
{'server': srvaddr, 'message': excmsg})
|
||||
raise exception.GlusterfsException(_(
|
||||
"'gluster version' failed on servers %s") % (
|
||||
','.join(exceptions.keys())))
|
||||
notsupp_servers = []
|
||||
for srvaddr, vers in six.iteritems(glusterfs_versions):
|
||||
if common.GlusterManager.numreduct(
|
||||
vers) < self.driver.GLUSTERFS_VERSION_MIN:
|
||||
notsupp_servers.append(srvaddr)
|
||||
if notsupp_servers:
|
||||
gluster_version_min_str = '.'.join(
|
||||
six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
|
||||
for srvaddr in notsupp_servers:
|
||||
LOG.error(_LE("GlusterFS version %(version)s on server "
|
||||
"%(server)s is not supported, "
|
||||
"minimum requirement: %(minvers)s"),
|
||||
{'server': srvaddr,
|
||||
'version': '.'.join(glusterfs_versions[srvaddr]),
|
||||
'minvers': gluster_version_min_str})
|
||||
raise exception.GlusterfsException(_(
|
||||
"Unsupported GlusterFS version on servers %(servers)s, "
|
||||
"minimum requirement: %(minvers)s") % {
|
||||
'servers': ','.join(notsupp_servers),
|
||||
'minvers': gluster_version_min_str})
|
||||
self.glusterfs_versions = glusterfs_versions
|
||||
|
||||
gluster_volumes_initial = set(self._fetch_gluster_volumes())
|
||||
if not gluster_volumes_initial:
|
||||
# No suitable volumes are found on the Gluster end.
|
||||
# Raise exception.
|
||||
msg = (_("Gluster backend does not provide any volume "
|
||||
"matching pattern %s"
|
||||
) % self.configuration.glusterfs_volume_pattern)
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
LOG.info(_LI("Found %d Gluster volumes allocated for Manila."
|
||||
), len(gluster_volumes_initial))
|
||||
|
||||
self._check_mount_glusterfs()
|
||||
|
||||
def _glustermanager(self, gluster_address, req_volume=True):
|
||||
"""Create GlusterManager object for gluster_address."""
|
||||
|
||||
return common.GlusterManager(
|
||||
gluster_address, self.driver._execute,
|
||||
self.configuration.glusterfs_native_path_to_private_key,
|
||||
self.configuration.glusterfs_native_server_password,
|
||||
requires={'volume': req_volume})
|
||||
|
||||
def _share_manager(self, share):
|
||||
"""Return GlusterManager object representing share's backend."""
|
||||
return self._glustermanager(share['export_location'])
|
||||
|
||||
def _fetch_gluster_volumes(self):
|
||||
"""Do a 'gluster volume list | grep <volume pattern>'.
|
||||
|
||||
Aggregate the results from all servers.
|
||||
Extract the named groups from the matching volume names
|
||||
using the specs given in PATTERN_DICT.
|
||||
Return a dict with keys of the form <server>:/<volname>
|
||||
and values being dicts that map names of named groups
|
||||
to their extracted value.
|
||||
"""
|
||||
|
||||
volumes_dict = {}
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
gluster_mgr = self._glustermanager(srvaddr, False)
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call('volume', 'list')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msgdict = {'err': exc.stderr, 'hostinfo': ''}
|
||||
if gluster_mgr.user:
|
||||
msgdict['hostinfo'] = ' on host %s' % gluster_mgr.host
|
||||
LOG.error(_LE("Error retrieving volume list%(hostinfo)s: "
|
||||
"%(err)s") % msgdict)
|
||||
raise exception.GlusterfsException(
|
||||
_('gluster volume list failed'))
|
||||
for volname in out.split("\n"):
|
||||
patmatch = self.volume_pattern.match(volname)
|
||||
if not patmatch:
|
||||
continue
|
||||
pattern_dict = {}
|
||||
for key in self.volume_pattern_keys:
|
||||
keymatch = patmatch.group(key)
|
||||
if keymatch is None:
|
||||
pattern_dict[key] = None
|
||||
else:
|
||||
trans = PATTERN_DICT[key].get('trans', lambda x: x)
|
||||
pattern_dict[key] = trans(keymatch)
|
||||
comp_vol = gluster_mgr.components.copy()
|
||||
comp_vol.update({'volume': volname})
|
||||
gluster_mgr_vol = self._glustermanager(comp_vol)
|
||||
volumes_dict[gluster_mgr_vol.qualified] = pattern_dict
|
||||
return volumes_dict
|
||||
|
||||
@utils.synchronized("glusterfs_native", external=False)
|
||||
def _pop_gluster_vol(self, size=None):
|
||||
"""Pick an unbound volume.
|
||||
|
||||
Do a _fetch_gluster_volumes() first to get the complete
|
||||
list of usable volumes.
|
||||
Keep only the unbound ones (ones that are not yet used to
|
||||
back a share).
|
||||
If size is given, try to pick one which has a size specification
|
||||
(according to the 'size' named group of the volume pattern),
|
||||
and its size is greater-than-or-equal to the given size.
|
||||
Return the volume chosen (in <host>:/<volname> format).
|
||||
"""
|
||||
|
||||
voldict = self._fetch_gluster_volumes()
|
||||
# calculate the set of unused volumes
|
||||
unused_vols = set(voldict) - self.gluster_used_vols
|
||||
|
||||
if not unused_vols:
|
||||
# No volumes available for use as share. Warn user.
|
||||
LOG.warn(_LW("No unused gluster volumes available for use as "
|
||||
"share! Create share won't be supported unless "
|
||||
"existing shares are deleted or some gluster "
|
||||
"volumes are created with names matching "
|
||||
"'glusterfs_volume_pattern'."))
|
||||
else:
|
||||
LOG.info(_LI("Number of gluster volumes in use: "
|
||||
"%(inuse-numvols)s. Number of gluster volumes "
|
||||
"available for use as share: %(unused-numvols)s"),
|
||||
{'inuse-numvols': len(self.gluster_used_vols),
|
||||
'unused-numvols': len(unused_vols)})
|
||||
|
||||
# volmap is the data structure used to categorize and sort
|
||||
# the unused volumes. It's a nested dictionary of structure
|
||||
# {<size>: <hostmap>}
|
||||
# where <size> is either an integer or None,
|
||||
# <hostmap> is a dictionary of structure {<host>: <vols>}
|
||||
# where <host> is a host name (IP address), <vols> is a list
|
||||
# of volumes (gluster addresses).
|
||||
volmap = {None: {}}
|
||||
# if both caller has specified size and 'size' occurs as
|
||||
# a parameter in the volume pattern...
|
||||
if size and 'size' in self.volume_pattern_keys:
|
||||
# then this function is used to extract the
|
||||
# size value for a given volume from the voldict...
|
||||
get_volsize = lambda vol: voldict[vol]['size']
|
||||
else:
|
||||
# else just use a stub.
|
||||
get_volsize = lambda vol: None
|
||||
for vol in unused_vols:
|
||||
# For each unused volume, we extract the <size>
|
||||
# and <host> values with which it can be inserted
|
||||
# into the volmap, and conditionally perform
|
||||
# the insertion (with the condition being: once
|
||||
# caller specified size and a size indication was
|
||||
# found in the volume name, we require that the
|
||||
# indicated size adheres to caller's spec).
|
||||
volsize = get_volsize(vol)
|
||||
if not volsize or volsize >= size:
|
||||
hostmap = volmap.get(volsize)
|
||||
if not hostmap:
|
||||
hostmap = {}
|
||||
volmap[volsize] = hostmap
|
||||
host = self._glustermanager(vol).host
|
||||
hostvols = hostmap.get(host)
|
||||
if not hostvols:
|
||||
hostvols = []
|
||||
hostmap[host] = hostvols
|
||||
hostvols.append(vol)
|
||||
if len(volmap) > 1:
|
||||
# volmap has keys apart from the default None,
|
||||
# ie. volumes with sensible and adherent size
|
||||
# indication have been found. Then pick the smallest
|
||||
# of the size values.
|
||||
chosen_size = sorted(n for n in volmap.keys() if n)[0]
|
||||
else:
|
||||
chosen_size = None
|
||||
chosen_hostmap = volmap[chosen_size]
|
||||
if not chosen_hostmap:
|
||||
msg = (_("Couldn't find a free gluster volume to use."))
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
# From the hosts we choose randomly to tend towards
|
||||
# even distribution of share backing volumes among
|
||||
# Gluster clusters.
|
||||
chosen_host = random.choice(list(chosen_hostmap.keys()))
|
||||
# Within a host's volumes, choose alphabetically first,
|
||||
# to make it predictable.
|
||||
vol = sorted(chosen_hostmap[chosen_host])[0]
|
||||
self.driver._setup_via_manager(self._glustermanager(vol))
|
||||
self.gluster_used_vols.add(vol)
|
||||
return vol
|
||||
|
||||
@utils.synchronized("glusterfs_native", external=False)
|
||||
def _push_gluster_vol(self, exp_locn):
|
||||
try:
|
||||
self.gluster_used_vols.remove(exp_locn)
|
||||
except KeyError:
|
||||
msg = (_("Couldn't find the share in used list."))
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
def _wipe_gluster_vol(self, gluster_mgr):
|
||||
|
||||
# Create a temporary mount.
|
||||
gluster_export = gluster_mgr.export
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
common._mount_gluster_vol(self.driver._execute, gluster_export,
|
||||
tmpdir)
|
||||
except exception.GlusterfsException:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
raise
|
||||
|
||||
# Delete the contents of a GlusterFS volume that is temporarily
|
||||
# mounted.
|
||||
# From GlusterFS version 3.7, two directories, '.trashcan' at the root
|
||||
# of the GlusterFS volume and 'internal_op' within the '.trashcan'
|
||||
# directory, are internally created when a GlusterFS volume is started.
|
||||
# GlusterFS does not allow unlink(2) of the two directories. So do not
|
||||
# delete the paths of the two directories, but delete their contents
|
||||
# along with the rest of the contents of the volume.
|
||||
srvaddr = gluster_mgr.host_access
|
||||
if common.GlusterManager.numreduct(self.glusterfs_versions[srvaddr]
|
||||
) < (3, 7):
|
||||
cmd = ['find', tmpdir, '-mindepth', '1', '-delete']
|
||||
else:
|
||||
ignored_dirs = map(lambda x: os.path.join(tmpdir, *x),
|
||||
[('.trashcan', ), ('.trashcan', 'internal_op')])
|
||||
ignored_dirs = list(ignored_dirs)
|
||||
cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path',
|
||||
ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete']
|
||||
|
||||
try:
|
||||
self.driver._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error trying to wipe gluster volume. "
|
||||
"gluster_export: %(export)s, Error: %(error)s") %
|
||||
{'export': gluster_export, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
finally:
|
||||
# Unmount.
|
||||
common._umount_gluster_vol(self.driver._execute, tmpdir)
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
def create_share(self, context, share, share_server=None):
|
||||
"""Create a share using GlusterFS volume.
|
||||
|
||||
1 Manila share = 1 GlusterFS volume. Pick an unused
|
||||
GlusterFS volume for use as a share.
|
||||
"""
|
||||
try:
|
||||
export_location = self._pop_gluster_vol(share['size'])
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error creating share %(share_id)s"),
|
||||
{'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
# TODO(deepakcs): Enable quota and set it to the share size.
|
||||
|
||||
# For native protocol, the export_location should be of the form:
|
||||
# server:/volname
|
||||
LOG.info(_LI("export_location sent back from create_share: %s"),
|
||||
(export_location,))
|
||||
return export_location
|
||||
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Delete a share on the GlusterFS volume.
|
||||
|
||||
1 Manila share = 1 GlusterFS volume. Put the gluster
|
||||
volume back in the available list.
|
||||
"""
|
||||
gmgr = self._share_manager(share)
|
||||
try:
|
||||
self._wipe_gluster_vol(gmgr)
|
||||
self._push_gluster_vol(gmgr.qualified)
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error during delete_share request for "
|
||||
"share %(share_id)s"), {'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
# TODO(deepakcs): Disable quota.
|
||||
|
||||
@staticmethod
|
||||
def _find_actual_backend_snapshot_name(gluster_mgr, snapshot):
|
||||
args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error retrieving snapshot list: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
|
||||
if len(snapgrep) != 1:
|
||||
msg = (_("Failed to identify backing GlusterFS object "
|
||||
"for snapshot %(snap_id)s of share %(share_id)s: "
|
||||
"a single candidate was expected, %(found)d was found.") %
|
||||
{'snap_id': snapshot['id'],
|
||||
'share_id': snapshot['share_id'],
|
||||
'found': len(snapgrep)})
|
||||
raise exception.GlusterfsException(msg)
|
||||
backend_snapshot_name = snapgrep[0]
|
||||
return backend_snapshot_name
|
||||
|
||||
def create_share_from_snapshot(self, context, share, snapshot,
|
||||
share_server=None):
|
||||
old_gmgr = self._share_manager(snapshot['share'])
|
||||
|
||||
# Snapshot clone feature in GlusterFS server essential to support this
|
||||
# API is available in GlusterFS server versions 3.7 and higher. So do
|
||||
# a version check.
|
||||
vers = self.glusterfs_versions[old_gmgr.host_access]
|
||||
minvers = (3, 7)
|
||||
if common.GlusterManager.numreduct(vers) < minvers:
|
||||
minvers_str = '.'.join(six.text_type(c) for c in minvers)
|
||||
vers_str = '.'.join(vers)
|
||||
msg = (_("GlusterFS version %(version)s on server %(server)s does "
|
||||
"not support creation of shares from snapshot. "
|
||||
"minimum requirement: %(minversion)s") %
|
||||
{'version': vers_str, 'server': old_gmgr.host,
|
||||
'minversion': minvers_str})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
# Clone the snapshot. The snapshot clone, a new GlusterFS volume
|
||||
# would serve as a share.
|
||||
backend_snapshot_name = self._find_actual_backend_snapshot_name(
|
||||
old_gmgr, snapshot)
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
args_tuple = (('snapshot', 'activate', backend_snapshot_name,
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, backend_snapshot_name))
|
||||
try:
|
||||
for args in args_tuple:
|
||||
out, err = old_gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error creating share from snapshot: %s"),
|
||||
exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
# Get a manager for the the new volume/share.
|
||||
comp_vol = old_gmgr.components.copy()
|
||||
comp_vol.update({'volume': volume})
|
||||
gmgr = self._glustermanager(comp_vol)
|
||||
self.driver._setup_via_manager(gmgr, old_gmgr)
|
||||
self.gluster_used_vols.add(gmgr.qualified)
|
||||
return gmgr.qualified
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Creates a snapshot."""
|
||||
|
||||
gluster_mgr = self._share_manager(snapshot['share'])
|
||||
if gluster_mgr.qualified in self.gluster_nosnap_vols_dict:
|
||||
opret, operrno = -1, 0
|
||||
operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified]
|
||||
else:
|
||||
args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'],
|
||||
gluster_mgr.volume)
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException("gluster %s failed" %
|
||||
' '.join(args))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
'gluster volume info %s: no data received' %
|
||||
gluster_mgr.volume
|
||||
)
|
||||
|
||||
outxml = etree.fromstring(out)
|
||||
opret = int(outxml.find('opRet').text)
|
||||
operrno = int(outxml.find('opErrno').text)
|
||||
operrstr = outxml.find('opErrstr').text
|
||||
|
||||
if opret == -1:
|
||||
vers = self.glusterfs_versions[gluster_mgr.host_access]
|
||||
if common.GlusterManager.numreduct(vers) > (3, 6):
|
||||
# This logic has not yet been implemented in GlusterFS 3.6
|
||||
if operrno == 0:
|
||||
self.gluster_nosnap_vols_dict[
|
||||
gluster_mgr.qualified] = operrstr
|
||||
msg = _("Share %(share_id)s does not support snapshots: "
|
||||
"%(errstr)s.") % {'share_id': snapshot['share_id'],
|
||||
'errstr': operrstr}
|
||||
LOG.error(msg)
|
||||
raise exception.ShareSnapshotNotSupported(msg)
|
||||
raise exception.GlusterfsException(
|
||||
_("Creating snapshot for share %(share_id)s failed "
|
||||
"with %(errno)d: %(errstr)s") % {
|
||||
'share_id': snapshot['share_id'],
|
||||
'errno': operrno,
|
||||
'errstr': operrstr})
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Deletes a snapshot."""
|
||||
|
||||
gluster_mgr = self._share_manager(snapshot['share'])
|
||||
backend_snapshot_name = self._find_actual_backend_snapshot_name(
|
||||
gluster_mgr, snapshot)
|
||||
args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
|
||||
'--mode=script')
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error deleting snapshot: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
_('gluster snapshot delete %s: no data received') %
|
||||
gluster_mgr.volume
|
||||
)
|
||||
|
||||
outxml = etree.fromstring(out)
|
||||
opret = int(outxml.find('opRet').text)
|
||||
operrno = int(outxml.find('opErrno').text)
|
||||
operrstr = outxml.find('opErrstr').text
|
||||
|
||||
if opret:
|
||||
raise exception.GlusterfsException(
|
||||
_("Deleting snapshot %(snap_id)s of share %(share_id)s failed "
|
||||
"with %(errno)d: %(errstr)s") % {
|
||||
'snap_id': snapshot['id'],
|
||||
'share_id': snapshot['share_id'],
|
||||
'errno': operrno,
|
||||
'errstr': operrstr})
|
||||
|
||||
def ensure_share(self, context, share, share_server=None):
|
||||
"""Invoked to ensure that share is exported."""
|
||||
self.gluster_used_vols.add(share['export_location'])
|
||||
|
||||
# Debt...
|
||||
|
||||
def manage_existing(self, share, driver_options):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unmanage(self, share):
|
||||
raise NotImplementedError()
|
||||
|
||||
def extend_share(self, share, new_size, share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def shrink_share(self, share, new_size, share_server=None):
|
||||
raise NotImplementedError()
|
|
@ -25,87 +25,30 @@ with the GlusterFS backend can mount and hence use the share.
|
|||
Supports working with multiple glusterfs volumes.
|
||||
"""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import shutil
|
||||
import string
|
||||
import tempfile
|
||||
import xml.etree.cElementTree as etree
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.i18n import _LW
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.glusterfs import common
|
||||
from manila.share.drivers.glusterfs import layout
|
||||
from manila import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
glusterfs_native_manila_share_opts = [
|
||||
cfg.ListOpt('glusterfs_servers',
|
||||
default=[],
|
||||
deprecated_name='glusterfs_targets',
|
||||
help='List of GlusterFS servers that can be used to create '
|
||||
'shares. Each GlusterFS server should be of the form '
|
||||
'[remoteuser@]<volserver>, and they are assumed to '
|
||||
'belong to distinct Gluster clusters.'),
|
||||
cfg.StrOpt('glusterfs_native_server_password',
|
||||
default=None,
|
||||
secret=True,
|
||||
help='Remote GlusterFS server node\'s login password. '
|
||||
'This is not required if '
|
||||
'\'glusterfs_native_path_to_private_key\' is '
|
||||
'configured.'),
|
||||
cfg.StrOpt('glusterfs_native_path_to_private_key',
|
||||
default=None,
|
||||
help='Path of Manila host\'s private SSH key file.'),
|
||||
cfg.StrOpt('glusterfs_volume_pattern',
|
||||
default=None,
|
||||
help='Regular expression template used to filter '
|
||||
'GlusterFS volumes for share creation. '
|
||||
'The regex template can optionally (ie. with support '
|
||||
'of the GlusterFS backend) contain the #{size} '
|
||||
'parameter which matches an integer (sequence of '
|
||||
'digits) in which case the value shall be interpreted as '
|
||||
'size of the volume in GB. Examples: '
|
||||
'"manila-share-volume-\d+$", '
|
||||
'"manila-share-volume-#{size}G-\d+$"; '
|
||||
'with matching volume names, respectively: '
|
||||
'"manila-share-volume-12", "manila-share-volume-3G-13". '
|
||||
'In latter example, the number that matches "#{size}", '
|
||||
'that is, 3, is an indication that the size of volume '
|
||||
'is 3G.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(glusterfs_native_manila_share_opts)
|
||||
|
||||
ACCESS_TYPE_CERT = 'cert'
|
||||
AUTH_SSL_ALLOW = 'auth.ssl-allow'
|
||||
CLIENT_SSL = 'client.ssl'
|
||||
NFS_EXPORT_VOL = 'nfs.export-volumes'
|
||||
SERVER_SSL = 'server.ssl'
|
||||
# The dict specifying named parameters
|
||||
# that can be used with glusterfs_volume_pattern
|
||||
# in #{<param>} format.
|
||||
# For each of them we give regex pattern it matches
|
||||
# and a transformer function ('trans') for the matched
|
||||
# string value.
|
||||
# Currently we handle only #{size}.
|
||||
PATTERN_DICT = {'size': {'pattern': '(?P<size>\d+)', 'trans': int}}
|
||||
|
||||
GLUSTERFS_VERSION_MIN = (3, 6)
|
||||
|
||||
|
||||
class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
class GlusterfsNativeShareDriver(driver.ExecuteMixin,
|
||||
layout.GlusterfsShareDriverBase):
|
||||
"""GlusterFS native protocol (glusterfs) share driver.
|
||||
|
||||
Executes commands relating to Shares.
|
||||
|
@ -117,171 +60,24 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
1.1 - Support for working with multiple gluster volumes.
|
||||
"""
|
||||
|
||||
GLUSTERFS_VERSION_MIN = (3, 6)
|
||||
|
||||
supported_layouts = ('layout_volume.GlusterfsVolumeMappedLayout',)
|
||||
supported_protocls = ('GLUSTERFS',)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GlusterfsNativeShareDriver, self).__init__(
|
||||
False, *args, **kwargs)
|
||||
self._helpers = None
|
||||
self.gluster_used_vols = set()
|
||||
self.configuration.append_config_values(
|
||||
glusterfs_native_manila_share_opts)
|
||||
self.gluster_nosnap_vols_dict = {}
|
||||
self.backend_name = self.configuration.safe_get(
|
||||
'share_backend_name') or 'GlusterFS-Native'
|
||||
self.volume_pattern = self._compile_volume_pattern()
|
||||
self.volume_pattern_keys = self.volume_pattern.groupindex.keys()
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
# format check for srvaddr
|
||||
self._glustermanager(srvaddr, False)
|
||||
self.glusterfs_versions = {}
|
||||
|
||||
def _compile_volume_pattern(self):
|
||||
"""Compile a RegexObject from the config specified regex template.
|
||||
|
||||
(cfg.glusterfs_volume_pattern)
|
||||
"""
|
||||
|
||||
subdict = {}
|
||||
for key, val in six.iteritems(PATTERN_DICT):
|
||||
subdict[key] = val['pattern']
|
||||
|
||||
# Using templates with placeholder syntax #{<var>}
|
||||
class CustomTemplate(string.Template):
|
||||
delimiter = '#'
|
||||
|
||||
volume_pattern = CustomTemplate(
|
||||
self.configuration.glusterfs_volume_pattern).substitute(
|
||||
subdict)
|
||||
return re.compile(volume_pattern)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Setup the GlusterFS volumes."""
|
||||
super(GlusterfsNativeShareDriver, self).do_setup(context)
|
||||
|
||||
# We don't use a service mount as its not necessary for us.
|
||||
# Do some sanity checks.
|
||||
glusterfs_versions, exceptions = {}, {}
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
try:
|
||||
glusterfs_versions[srvaddr] = self._glustermanager(
|
||||
srvaddr, False).get_gluster_version()
|
||||
except exception.GlusterfsException as exc:
|
||||
exceptions[srvaddr] = six.text_type(exc)
|
||||
if exceptions:
|
||||
for srvaddr, excmsg in six.iteritems(exceptions):
|
||||
LOG.error(_LE("'gluster version' failed on server "
|
||||
"%(server)s with: %(message)s"),
|
||||
{'server': srvaddr, 'message': excmsg})
|
||||
raise exception.GlusterfsException(_(
|
||||
"'gluster version' failed on servers %s") % (
|
||||
','.join(exceptions.keys())))
|
||||
notsupp_servers = []
|
||||
for srvaddr, vers in six.iteritems(glusterfs_versions):
|
||||
if common.GlusterManager.numreduct(
|
||||
vers) < GLUSTERFS_VERSION_MIN:
|
||||
notsupp_servers.append(srvaddr)
|
||||
if notsupp_servers:
|
||||
gluster_version_min_str = '.'.join(
|
||||
six.text_type(c) for c in GLUSTERFS_VERSION_MIN)
|
||||
for srvaddr in notsupp_servers:
|
||||
LOG.error(_LE("GlusterFS version %(version)s on server "
|
||||
"%(server)s is not supported, "
|
||||
"minimum requirement: %(minvers)s"),
|
||||
{'server': srvaddr,
|
||||
'version': '.'.join(glusterfs_versions[srvaddr]),
|
||||
'minvers': gluster_version_min_str})
|
||||
raise exception.GlusterfsException(_(
|
||||
"Unsupported GlusterFS version on servers %(servers)s, "
|
||||
"minimum requirement: %(minvers)s") % {
|
||||
'servers': ','.join(notsupp_servers),
|
||||
'minvers': gluster_version_min_str})
|
||||
self.glusterfs_versions = glusterfs_versions
|
||||
|
||||
gluster_volumes_initial = set(self._fetch_gluster_volumes())
|
||||
if not gluster_volumes_initial:
|
||||
# No suitable volumes are found on the Gluster end.
|
||||
# Raise exception.
|
||||
msg = (_("Gluster backend does not provide any volume "
|
||||
"matching pattern %s"
|
||||
) % self.configuration.glusterfs_volume_pattern)
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
LOG.info(_LI("Found %d Gluster volumes allocated for Manila."
|
||||
), len(gluster_volumes_initial))
|
||||
|
||||
try:
|
||||
self._execute('mount.glusterfs', check_exit_code=False)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
msg = (_("mount.glusterfs is not installed."))
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
else:
|
||||
msg = (_("Error running mount.glusterfs."))
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
def _glustermanager(self, gluster_address, req_volume=True):
|
||||
"""Create GlusterManager object for gluster_address."""
|
||||
|
||||
return common.GlusterManager(
|
||||
gluster_address, self._execute,
|
||||
self.configuration.glusterfs_native_path_to_private_key,
|
||||
self.configuration.glusterfs_native_server_password,
|
||||
requires={'volume': req_volume})
|
||||
|
||||
def _share_manager(self, share):
|
||||
"""Return GlusterManager object representing share's backend."""
|
||||
return self._glustermanager(share['export_location'])
|
||||
|
||||
def _fetch_gluster_volumes(self):
|
||||
"""Do a 'gluster volume list | grep <volume pattern>'.
|
||||
|
||||
Aggregate the results from all servers.
|
||||
Extract the named groups from the matching volume names
|
||||
using the specs given in PATTERN_DICT.
|
||||
Return a dict with keys of the form <server>:/<volname>
|
||||
and values being dicts that map names of named groups
|
||||
to their extracted value.
|
||||
"""
|
||||
|
||||
volumes_dict = {}
|
||||
for srvaddr in self.configuration.glusterfs_servers:
|
||||
gluster_mgr = self._glustermanager(srvaddr, False)
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call('volume', 'list')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msgdict = {'err': exc.stderr, 'hostinfo': ''}
|
||||
if gluster_mgr.user:
|
||||
msgdict['hostinfo'] = ' on host %s' % gluster_mgr.host
|
||||
LOG.error(_LE("Error retrieving volume list%(hostinfo)s: "
|
||||
"%(err)s") % msgdict)
|
||||
raise exception.GlusterfsException(
|
||||
_('gluster volume list failed'))
|
||||
for volname in out.split("\n"):
|
||||
patmatch = self.volume_pattern.match(volname)
|
||||
if not patmatch:
|
||||
continue
|
||||
pattern_dict = {}
|
||||
for key in self.volume_pattern_keys:
|
||||
keymatch = patmatch.group(key)
|
||||
if keymatch is None:
|
||||
pattern_dict[key] = None
|
||||
else:
|
||||
trans = PATTERN_DICT[key].get('trans', lambda x: x)
|
||||
pattern_dict[key] = trans(keymatch)
|
||||
comp_vol = gluster_mgr.components.copy()
|
||||
comp_vol.update({'volume': volname})
|
||||
gluster_mgr_vol = self._glustermanager(comp_vol)
|
||||
volumes_dict[gluster_mgr_vol.qualified] = pattern_dict
|
||||
return volumes_dict
|
||||
|
||||
def _setup_gluster_vol(self, vol):
|
||||
def _setup_via_manager(self, gluster_mgr, gluster_mgr_parent=None):
|
||||
# Enable gluster volumes for SSL access only.
|
||||
|
||||
gluster_mgr = self._glustermanager(vol)
|
||||
|
||||
ssl_allow_opt = gluster_mgr.get_gluster_vol_option(AUTH_SSL_ALLOW)
|
||||
ssl_allow_opt = (gluster_mgr_parent if gluster_mgr_parent else
|
||||
gluster_mgr).get_gluster_vol_option(
|
||||
AUTH_SSL_ALLOW)
|
||||
if not ssl_allow_opt:
|
||||
# Not having AUTH_SSL_ALLOW set is a problematic edge case.
|
||||
# - In GlusterFS 3.6, it implies that access is allowed to
|
||||
|
@ -297,13 +93,40 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
for option, value in six.iteritems(
|
||||
{NFS_EXPORT_VOL: 'off', CLIENT_SSL: 'on', SERVER_SSL: 'on'}
|
||||
gluster_actions = []
|
||||
if gluster_mgr_parent:
|
||||
# The clone of the snapshot, the new volume, retains the authorized
|
||||
# access list of the snapshotted volume/share, which includes
|
||||
# identities of the backend servers and Manila clients. So only
|
||||
# retain the identities of the GlusterFS servers volume in the
|
||||
# authorized access list of the new volume. The identities of
|
||||
# GlusterFS are easy to figure as they're pre-fixed by
|
||||
# "glusterfs-server".
|
||||
#
|
||||
# Wrt. GlusterFS' parsing of auth.ssl-allow, please see code from
|
||||
# https://github.com/gluster/glusterfs/blob/v3.6.2/
|
||||
# xlators/protocol/auth/login/src/login.c#L80
|
||||
# until end of gf_auth() function
|
||||
old_access_list = re.split('[ ,]', ssl_allow_opt)
|
||||
regex = re.compile('\Aglusterfs-server*')
|
||||
access_to = ','.join(filter(regex.match, old_access_list))
|
||||
gluster_actions.append(('set', AUTH_SSL_ALLOW, access_to))
|
||||
|
||||
for option, value in (
|
||||
(NFS_EXPORT_VOL, 'off'), (CLIENT_SSL, 'on'), (SERVER_SSL, 'on')
|
||||
):
|
||||
gluster_actions.append(('set', option, value))
|
||||
|
||||
if not gluster_mgr_parent:
|
||||
# TODO(deepakcs) Remove this once ssl options can be
|
||||
# set dynamically.
|
||||
gluster_actions.append(('stop', '--mode=script'))
|
||||
gluster_actions.append(('start',))
|
||||
|
||||
for action in gluster_actions:
|
||||
try:
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'set', gluster_mgr.volume,
|
||||
option, value)
|
||||
'volume', action[0], gluster_mgr.volume, *action[1:])
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error in gluster volume set during volume setup. "
|
||||
"volume: %(volname)s, option: %(option)s, "
|
||||
|
@ -313,436 +136,9 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
# TODO(deepakcs) Remove this once ssl options can be
|
||||
# set dynamically.
|
||||
common._restart_gluster_vol(gluster_mgr)
|
||||
|
||||
@utils.synchronized("glusterfs_native", external=False)
|
||||
def _pop_gluster_vol(self, size=None):
|
||||
"""Pick an unbound volume.
|
||||
|
||||
Do a _fetch_gluster_volumes() first to get the complete
|
||||
list of usable volumes.
|
||||
Keep only the unbound ones (ones that are not yet used to
|
||||
back a share).
|
||||
If size is given, try to pick one which has a size specification
|
||||
(according to the 'size' named group of the volume pattern),
|
||||
and its size is greater-than-or-equal to the given size.
|
||||
Return the volume chosen (in <host>:/<volname> format).
|
||||
"""
|
||||
|
||||
voldict = self._fetch_gluster_volumes()
|
||||
# calculate the set of unused volumes
|
||||
unused_vols = set(voldict) - self.gluster_used_vols
|
||||
|
||||
if not unused_vols:
|
||||
# No volumes available for use as share. Warn user.
|
||||
msg = (_("No unused gluster volumes available for use as share! "
|
||||
"Create share won't be supported unless existing shares "
|
||||
"are deleted or some gluster volumes are created with "
|
||||
"names matching 'glusterfs_volume_pattern'."))
|
||||
LOG.warn(msg)
|
||||
else:
|
||||
LOG.info(_LI("Number of gluster volumes in use: "
|
||||
"%(inuse-numvols)s. Number of gluster volumes "
|
||||
"available for use as share: %(unused-numvols)s"),
|
||||
{'inuse-numvols': len(self.gluster_used_vols),
|
||||
'unused-numvols': len(unused_vols)})
|
||||
|
||||
# volmap is the data structure used to categorize and sort
|
||||
# the unused volumes. It's a nested dictionary of structure
|
||||
# {<size>: <hostmap>}
|
||||
# where <size> is either an integer or None,
|
||||
# <hostmap> is a dictionary of structure {<host>: <vols>}
|
||||
# where <host> is a host name (IP address), <vols> is a list
|
||||
# of volumes (gluster addresses).
|
||||
volmap = {None: {}}
|
||||
# if both caller has specified size and 'size' occurs as
|
||||
# a parameter in the volume pattern...
|
||||
if size and 'size' in self.volume_pattern_keys:
|
||||
# then this function is used to extract the
|
||||
# size value for a given volume from the voldict...
|
||||
get_volsize = lambda vol: voldict[vol]['size']
|
||||
else:
|
||||
# else just use a stub.
|
||||
get_volsize = lambda vol: None
|
||||
for vol in unused_vols:
|
||||
# For each unused volume, we extract the <size>
|
||||
# and <host> values with which it can be inserted
|
||||
# into the volmap, and conditionally perform
|
||||
# the insertion (with the condition being: once
|
||||
# caller specified size and a size indication was
|
||||
# found in the volume name, we require that the
|
||||
# indicated size adheres to caller's spec).
|
||||
volsize = get_volsize(vol)
|
||||
if not volsize or volsize >= size:
|
||||
hostmap = volmap.get(volsize)
|
||||
if not hostmap:
|
||||
hostmap = {}
|
||||
volmap[volsize] = hostmap
|
||||
host = self._glustermanager(vol).host
|
||||
hostvols = hostmap.get(host)
|
||||
if not hostvols:
|
||||
hostvols = []
|
||||
hostmap[host] = hostvols
|
||||
hostvols.append(vol)
|
||||
if len(volmap) > 1:
|
||||
# volmap has keys apart from the default None,
|
||||
# ie. volumes with sensible and adherent size
|
||||
# indication have been found. Then pick the smallest
|
||||
# of the size values.
|
||||
chosen_size = sorted(n for n in volmap.keys() if n)[0]
|
||||
else:
|
||||
chosen_size = None
|
||||
chosen_hostmap = volmap[chosen_size]
|
||||
if not chosen_hostmap:
|
||||
msg = (_("Couldn't find a free gluster volume to use."))
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
# From the hosts we choose randomly to tend towards
|
||||
# even distribution of share backing volumes among
|
||||
# Gluster clusters.
|
||||
chosen_host = random.choice(list(chosen_hostmap.keys()))
|
||||
# Within a host's volumes, choose alphabetically first,
|
||||
# to make it predictable.
|
||||
vol = sorted(chosen_hostmap[chosen_host])[0]
|
||||
self._setup_gluster_vol(vol)
|
||||
self.gluster_used_vols.add(vol)
|
||||
return vol
|
||||
|
||||
@utils.synchronized("glusterfs_native", external=False)
|
||||
def _push_gluster_vol(self, exp_locn):
|
||||
try:
|
||||
self.gluster_used_vols.remove(exp_locn)
|
||||
except KeyError:
|
||||
msg = (_("Couldn't find the share in used list."))
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
def _wipe_gluster_vol(self, gluster_mgr):
|
||||
|
||||
# Reset the SSL options.
|
||||
try:
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'set', gluster_mgr.volume,
|
||||
CLIENT_SSL, 'off')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error in gluster volume set during _wipe_gluster_vol. "
|
||||
"Volume: %(volname)s, Option: %(option)s, "
|
||||
"Error: %(error)s") %
|
||||
{'volname': gluster_mgr.volume,
|
||||
'option': CLIENT_SSL, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
try:
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'set', gluster_mgr.volume,
|
||||
SERVER_SSL, 'off')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error in gluster volume set during _wipe_gluster_vol. "
|
||||
"Volume: %(volname)s, Option: %(option)s, "
|
||||
"Error: %(error)s") %
|
||||
{'volname': gluster_mgr.volume,
|
||||
'option': SERVER_SSL, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
common._restart_gluster_vol(gluster_mgr)
|
||||
|
||||
# Create a temporary mount.
|
||||
gluster_export = gluster_mgr.export
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
common._mount_gluster_vol(self._execute, gluster_export, tmpdir)
|
||||
except exception.GlusterfsException:
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
raise
|
||||
|
||||
# Delete the contents of a GlusterFS volume that is temporarily
|
||||
# mounted.
|
||||
# From GlusterFS version 3.7, two directories, '.trashcan' at the root
|
||||
# of the GlusterFS volume and 'internal_op' within the '.trashcan'
|
||||
# directory, are internally created when a GlusterFS volume is started.
|
||||
# GlusterFS does not allow unlink(2) of the two directories. So do not
|
||||
# delete the paths of the two directories, but delete their contents
|
||||
# along with the rest of the contents of the volume.
|
||||
srvaddr = gluster_mgr.host_access
|
||||
if common.GlusterManager.numreduct(self.glusterfs_versions[srvaddr]
|
||||
) < (3, 7):
|
||||
cmd = ['find', tmpdir, '-mindepth', '1', '-delete']
|
||||
else:
|
||||
ignored_dirs = map(lambda x: os.path.join(tmpdir, *x),
|
||||
[('.trashcan', ), ('.trashcan', 'internal_op')])
|
||||
ignored_dirs = list(ignored_dirs)
|
||||
cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path',
|
||||
ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete']
|
||||
|
||||
try:
|
||||
self._execute(*cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error trying to wipe gluster volume. "
|
||||
"gluster_export: %(export)s, Error: %(error)s") %
|
||||
{'export': gluster_export, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
finally:
|
||||
# Unmount.
|
||||
common._umount_gluster_vol(self._execute, tmpdir)
|
||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
||||
|
||||
# Set the SSL options.
|
||||
try:
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'set', gluster_mgr.volume,
|
||||
CLIENT_SSL, 'on')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error in gluster volume set during _wipe_gluster_vol. "
|
||||
"Volume: %(volname)s, Option: %(option)s, "
|
||||
"Error: %(error)s") %
|
||||
{'volname': gluster_mgr.volume,
|
||||
'option': CLIENT_SSL, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
try:
|
||||
gluster_mgr.gluster_call(
|
||||
'volume', 'set', gluster_mgr.volume,
|
||||
SERVER_SSL, 'on')
|
||||
except exception.ProcessExecutionError as exc:
|
||||
msg = (_("Error in gluster volume set during _wipe_gluster_vol. "
|
||||
"Volume: %(volname)s, Option: %(option)s, "
|
||||
"Error: %(error)s") %
|
||||
{'volname': gluster_mgr.volume,
|
||||
'option': SERVER_SSL, 'error': exc.stderr})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
common._restart_gluster_vol(gluster_mgr)
|
||||
|
||||
def get_network_allocations_number(self):
|
||||
return 0
|
||||
|
||||
def create_share(self, context, share, share_server=None):
|
||||
"""Create a share using GlusterFS volume.
|
||||
|
||||
1 Manila share = 1 GlusterFS volume. Pick an unused
|
||||
GlusterFS volume for use as a share.
|
||||
"""
|
||||
try:
|
||||
export_location = self._pop_gluster_vol(share['size'])
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error creating share %(share_id)s"),
|
||||
{'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
# TODO(deepakcs): Enable quota and set it to the share size.
|
||||
|
||||
# For native protocol, the export_location should be of the form:
|
||||
# server:/volname
|
||||
LOG.info(_LI("export_location sent back from create_share: %s"),
|
||||
(export_location,))
|
||||
return export_location
|
||||
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Delete a share on the GlusterFS volume.
|
||||
|
||||
1 Manila share = 1 GlusterFS volume. Put the gluster
|
||||
volume back in the available list.
|
||||
"""
|
||||
gmgr = self._share_manager(share)
|
||||
try:
|
||||
self._wipe_gluster_vol(gmgr)
|
||||
self._push_gluster_vol(gmgr.qualified)
|
||||
except exception.GlusterfsException:
|
||||
msg = (_LE("Error during delete_share request for "
|
||||
"share %(share_id)s"), {'share_id': share['id']})
|
||||
LOG.error(msg)
|
||||
raise
|
||||
|
||||
# TODO(deepakcs): Disable quota.
|
||||
|
||||
@staticmethod
|
||||
def _find_actual_backend_snapshot_name(gluster_mgr, snapshot):
|
||||
args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error retrieving snapshot list: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
|
||||
if len(snapgrep) != 1:
|
||||
msg = (_("Failed to identify backing GlusterFS object "
|
||||
"for snapshot %(snap_id)s of share %(share_id)s: "
|
||||
"a single candidate was expected, %(found)d was found.") %
|
||||
{'snap_id': snapshot['id'],
|
||||
'share_id': snapshot['share_id'],
|
||||
'found': len(snapgrep)})
|
||||
raise exception.GlusterfsException(msg)
|
||||
backend_snapshot_name = snapgrep[0]
|
||||
return backend_snapshot_name
|
||||
|
||||
def create_share_from_snapshot(self, context, share, snapshot,
|
||||
share_server=None):
|
||||
old_gmgr = self._share_manager(snapshot['share'])
|
||||
|
||||
# Snapshot clone feature in GlusterFS server essential to support this
|
||||
# API is available in GlusterFS server versions 3.7 and higher. So do
|
||||
# a version check.
|
||||
vers = self.glusterfs_versions[old_gmgr.host_access]
|
||||
minvers = (3, 7)
|
||||
if common.GlusterManager.numreduct(vers) < minvers:
|
||||
minvers_str = '.'.join(six.text_type(c) for c in minvers)
|
||||
vers_str = '.'.join(vers)
|
||||
msg = (_("GlusterFS version %(version)s on server %(server)s does "
|
||||
"not support creation of shares from snapshot. "
|
||||
"minimum requirement: %(minversion)s") %
|
||||
{'version': vers_str, 'server': old_gmgr.host,
|
||||
'minversion': minvers_str})
|
||||
LOG.error(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
# Clone the snapshot. The snapshot clone, a new GlusterFS volume
|
||||
# would serve as a share.
|
||||
backend_snapshot_name = self._find_actual_backend_snapshot_name(
|
||||
old_gmgr, snapshot)
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
args_tuple = (('snapshot', 'activate', backend_snapshot_name,
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, backend_snapshot_name))
|
||||
try:
|
||||
for args in args_tuple:
|
||||
out, err = old_gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error creating share from snapshot: %s"),
|
||||
exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
# Get a manager for the the new volume/share.
|
||||
comp_vol = old_gmgr.components.copy()
|
||||
comp_vol.update({'volume': volume})
|
||||
gmgr = self._glustermanager(comp_vol)
|
||||
|
||||
# Configure the GlusterFS volume to be used as share.
|
||||
# 1. The clone of the snapshot, the new volume, retains the authorized
|
||||
# access list of the snapshotted volume/share, which includes
|
||||
# identities of the backend servers and Manila clients. So only
|
||||
# retain the identities of the GlusterFS servers volume in the
|
||||
# authorized access list of the new volume. The identities of
|
||||
# GlusterFS are easy to figure as they're pre-fixed by
|
||||
# "glusterfs-server".
|
||||
# 2. Start the new volume.
|
||||
old_access = gmgr.get_gluster_vol_option(AUTH_SSL_ALLOW)
|
||||
# wrt. GlusterFS' parsing of auth.ssl-allow, please see code from
|
||||
# https://github.com/gluster/glusterfs/blob/v3.6.2/
|
||||
# xlators/protocol/auth/login/src/login.c#L80
|
||||
# until end of gf_auth() function
|
||||
old_access_list = re.split('[ ,]', old_access)
|
||||
regex = re.compile('\Aglusterfs-server*')
|
||||
access_to = ','.join(filter(regex.match, old_access_list))
|
||||
args_tuple = (('volume', 'set', gmgr.volume, AUTH_SSL_ALLOW,
|
||||
access_to),
|
||||
('volume', 'start', gmgr.volume))
|
||||
try:
|
||||
for args in args_tuple:
|
||||
out, err = gmgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error creating share from snapshot: %s"),
|
||||
exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
self.gluster_used_vols.add(gmgr.qualified)
|
||||
return gmgr.qualified
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Creates a snapshot."""
|
||||
|
||||
gluster_mgr = self._share_manager(snapshot['share'])
|
||||
if gluster_mgr.qualified in self.gluster_nosnap_vols_dict:
|
||||
opret, operrno = -1, 0
|
||||
operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified]
|
||||
else:
|
||||
args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'],
|
||||
gluster_mgr.volume)
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException("gluster %s failed" %
|
||||
' '.join(args))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
'gluster volume info %s: no data received' %
|
||||
gluster_mgr.volume
|
||||
)
|
||||
|
||||
outxml = etree.fromstring(out)
|
||||
opret = int(outxml.find('opRet').text)
|
||||
operrno = int(outxml.find('opErrno').text)
|
||||
operrstr = outxml.find('opErrstr').text
|
||||
|
||||
if opret == -1:
|
||||
vers = self.glusterfs_versions[gluster_mgr.host_access]
|
||||
if common.GlusterManager.numreduct(vers) > (3, 6):
|
||||
# This logic has not yet been implemented in GlusterFS 3.6
|
||||
if operrno == 0:
|
||||
self.gluster_nosnap_vols_dict[
|
||||
gluster_mgr.qualified] = operrstr
|
||||
msg = _("Share %(share_id)s does not support snapshots: "
|
||||
"%(errstr)s.") % {'share_id': snapshot['share_id'],
|
||||
'errstr': operrstr}
|
||||
LOG.error(msg)
|
||||
raise exception.ShareSnapshotNotSupported(msg)
|
||||
raise exception.GlusterfsException(
|
||||
_("Creating snapshot for share %(share_id)s failed "
|
||||
"with %(errno)d: %(errstr)s") % {
|
||||
'share_id': snapshot['share_id'],
|
||||
'errno': operrno,
|
||||
'errstr': operrstr})
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Deletes a snapshot."""
|
||||
|
||||
gluster_mgr = self._share_manager(snapshot['share'])
|
||||
backend_snapshot_name = self._find_actual_backend_snapshot_name(
|
||||
gluster_mgr, snapshot)
|
||||
args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
|
||||
'--mode=script')
|
||||
try:
|
||||
out, err = gluster_mgr.gluster_call(*args)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_LE("Error deleting snapshot: %s"), exc.stderr)
|
||||
raise exception.GlusterfsException(_("gluster %s failed") %
|
||||
' '.join(args))
|
||||
|
||||
if not out:
|
||||
raise exception.GlusterfsException(
|
||||
_('gluster snapshot delete %s: no data received') %
|
||||
gluster_mgr.volume
|
||||
)
|
||||
|
||||
outxml = etree.fromstring(out)
|
||||
opret = int(outxml.find('opRet').text)
|
||||
operrno = int(outxml.find('opErrno').text)
|
||||
operrstr = outxml.find('opErrstr').text
|
||||
|
||||
if opret:
|
||||
raise exception.GlusterfsException(
|
||||
_("Deleting snapshot %(snap_id)s of share %(share_id)s failed "
|
||||
"with %(errno)d: %(errstr)s") % {
|
||||
'snap_id': snapshot['id'],
|
||||
'share_id': snapshot['share_id'],
|
||||
'errno': operrno,
|
||||
'errstr': operrstr})
|
||||
|
||||
@utils.synchronized("glusterfs_native_access", external=False)
|
||||
def allow_access(self, context, share, access, share_server=None):
|
||||
def _allow_access_via_manager(self, gluster_mgr, context, share, access,
|
||||
share_server=None):
|
||||
"""Allow access to a share using certs.
|
||||
|
||||
Add the SSL CN (Common Name) that's allowed to access the server.
|
||||
|
@ -751,7 +147,6 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
if access['access_type'] != ACCESS_TYPE_CERT:
|
||||
raise exception.InvalidShareAccess(_("Only 'cert' access type "
|
||||
"allowed"))
|
||||
gluster_mgr = self._share_manager(share)
|
||||
|
||||
ssl_allow_opt = gluster_mgr.get_gluster_vol_option(AUTH_SSL_ALLOW)
|
||||
# wrt. GlusterFS' parsing of auth.ssl-allow, please see code from
|
||||
|
@ -789,7 +184,8 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
common._restart_gluster_vol(gluster_mgr)
|
||||
|
||||
@utils.synchronized("glusterfs_native_access", external=False)
|
||||
def deny_access(self, context, share, access, share_server=None):
|
||||
def _deny_access_via_manager(self, gluster_mgr, context, share, access,
|
||||
share_server=None):
|
||||
"""Deny access to a share that's using cert based auth.
|
||||
|
||||
Remove the SSL CN (Common Name) that's allowed to access the server.
|
||||
|
@ -799,7 +195,6 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
raise exception.InvalidShareAccess(_("Only 'cert' access type "
|
||||
"allowed for access "
|
||||
"removal."))
|
||||
gluster_mgr = self._share_manager(share)
|
||||
|
||||
ssl_allow_opt = gluster_mgr.get_gluster_vol_option(AUTH_SSL_ALLOW)
|
||||
ssl_allow = re.split('[ ,]', ssl_allow_opt)
|
||||
|
@ -853,6 +248,5 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
|
||||
super(GlusterfsNativeShareDriver, self)._update_share_stats(data)
|
||||
|
||||
def ensure_share(self, context, share, share_server=None):
|
||||
"""Invoked to ensure that share is exported."""
|
||||
self.gluster_used_vols.add(share['export_location'])
|
||||
def get_network_allocations_number(self):
|
||||
return 0
|
||||
|
|
|
@ -0,0 +1,899 @@
|
|||
# Copyright (c) 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
""" GlusterFS volume mapped share layout testcases.
|
||||
"""
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from manila.common import constants
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.share import configuration as config
|
||||
from manila.share.drivers.glusterfs import common
|
||||
from manila.share.drivers.glusterfs import layout_volume
|
||||
from manila import test
|
||||
from manila.tests import fake_utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def new_share(**kwargs):
|
||||
share = {
|
||||
'id': 'fakeid',
|
||||
'name': 'fakename',
|
||||
'size': 1,
|
||||
'share_proto': 'glusterfs',
|
||||
}
|
||||
share.update(kwargs)
|
||||
return share
|
||||
|
||||
|
||||
def glusterXMLOut(**kwargs):
|
||||
|
||||
template = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>%(ret)d</opRet>
|
||||
<opErrno>%(errno)d</opErrno>
|
||||
<opErrstr>fake error</opErrstr>
|
||||
</cliOutput>"""
|
||||
|
||||
return template % kwargs, ''
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
||||
"""Tests GlusterfsVolumeMappedLayout."""
|
||||
|
||||
def setUp(self):
|
||||
super(GlusterfsVolumeMappedLayoutTestCase, self).setUp()
|
||||
fake_utils.stub_out_utils_execute(self)
|
||||
self._execute = fake_utils.fake_execute
|
||||
self._context = context.get_admin_context()
|
||||
|
||||
self.glusterfs_target1 = 'root@host1:/gv1'
|
||||
self.glusterfs_target2 = 'root@host2:/gv2'
|
||||
self.glusterfs_server1 = 'root@host1'
|
||||
self.glusterfs_server2 = 'root@host2'
|
||||
self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1'
|
||||
self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2'
|
||||
self.share1 = new_share(
|
||||
export_location=self.glusterfs_target1,
|
||||
status=constants.STATUS_AVAILABLE)
|
||||
self.share2 = new_share(
|
||||
export_location=self.glusterfs_target2,
|
||||
status=constants.STATUS_AVAILABLE)
|
||||
gmgr = common.GlusterManager
|
||||
self.gmgr1 = gmgr(self.glusterfs_server1, self._execute, None, None,
|
||||
requires={'volume': False})
|
||||
self.gmgr2 = gmgr(self.glusterfs_server2, self._execute, None, None,
|
||||
requires={'volume': False})
|
||||
self.glusterfs_volumes_dict = (
|
||||
{'root@host1:/manila-share-1-1G': {'size': 1},
|
||||
'root@host2:/manila-share-2-2G': {'size': 2}})
|
||||
self.glusterfs_used_vols = set([
|
||||
'root@host1:/manila-share-1-1G',
|
||||
'root@host2:/manila-share-2-2G'])
|
||||
|
||||
CONF.set_default('glusterfs_servers',
|
||||
[self.glusterfs_server1, self.glusterfs_server2])
|
||||
CONF.set_default('glusterfs_native_server_password',
|
||||
'fake_password')
|
||||
CONF.set_default('glusterfs_native_path_to_private_key',
|
||||
'/fakepath/to/privatekey')
|
||||
CONF.set_default('glusterfs_volume_pattern',
|
||||
'manila-share-\d+-#{size}G$')
|
||||
CONF.set_default('driver_handles_share_servers', False)
|
||||
|
||||
self.fake_driver = mock.Mock()
|
||||
self.mock_object(self.fake_driver, '_execute',
|
||||
self._execute)
|
||||
self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6)
|
||||
|
||||
self.fake_conf = config.Configuration(None)
|
||||
self.mock_object(tempfile, 'mkdtemp',
|
||||
mock.Mock(return_value='/tmp/tmpKGHKJ'))
|
||||
self.mock_object(common.GlusterManager, 'make_gluster_call')
|
||||
|
||||
with mock.patch.object(layout_volume.GlusterfsVolumeMappedLayout,
|
||||
'_glustermanager',
|
||||
side_effect=[self.gmgr1, self.gmgr2]):
|
||||
self._layout = layout_volume.GlusterfsVolumeMappedLayout(
|
||||
self.fake_driver, configuration=self.fake_conf)
|
||||
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6'),
|
||||
self.glusterfs_server2: ('3', '7')}
|
||||
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
|
||||
self.addCleanup(fake_utils.fake_execute_clear_log)
|
||||
|
||||
@ddt.data({"test_kwargs": {}, "requires": {"volume": True}},
|
||||
{"test_kwargs": {'req_volume': False},
|
||||
"requires": {"volume": False}})
|
||||
@ddt.unpack
|
||||
def test_glustermanager(self, test_kwargs, requires):
|
||||
fake_obj = mock.Mock()
|
||||
self.mock_object(common, 'GlusterManager',
|
||||
mock.Mock(return_value=fake_obj))
|
||||
|
||||
ret = self._layout._glustermanager(self.glusterfs_target1,
|
||||
**test_kwargs)
|
||||
|
||||
common.GlusterManager.assert_called_once_with(
|
||||
self.glusterfs_target1, self._execute,
|
||||
self._layout.configuration.glusterfs_native_path_to_private_key,
|
||||
self._layout.configuration.glusterfs_native_server_password,
|
||||
requires=requires)
|
||||
self.assertEqual(fake_obj, ret)
|
||||
|
||||
def test_compile_volume_pattern(self):
|
||||
volume_pattern = 'manila-share-\d+-(?P<size>\d+)G$'
|
||||
|
||||
ret = self._layout._compile_volume_pattern()
|
||||
|
||||
self.assertEqual(re.compile(volume_pattern), ret)
|
||||
|
||||
def test_fetch_gluster_volumes(self):
|
||||
self.mock_object(
|
||||
self.gmgr1, 'gluster_call',
|
||||
mock.Mock(return_value=(self.glusterfs_server1_volumes, '')))
|
||||
self.mock_object(
|
||||
self.gmgr2, 'gluster_call',
|
||||
mock.Mock(return_value=(self.glusterfs_server2_volumes, '')))
|
||||
_glustermanager_calls = (
|
||||
self.gmgr1,
|
||||
common.GlusterManager('root@host1:/manila-share-1-1G'),
|
||||
self.gmgr2,
|
||||
common.GlusterManager('root@host2:/manila-share-2-2G'))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(side_effect=_glustermanager_calls))
|
||||
expected_output = self.glusterfs_volumes_dict
|
||||
|
||||
ret = self._layout._fetch_gluster_volumes()
|
||||
|
||||
test_args = ('volume', 'list')
|
||||
self.gmgr1.gluster_call.assert_called_once_with(*test_args)
|
||||
self.gmgr2.gluster_call.assert_called_once_with(*test_args)
|
||||
self.assertEqual(expected_output, ret)
|
||||
|
||||
def test_fetch_gluster_volumes_no_keymatch(self):
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(
|
||||
self.gmgr1, 'gluster_call',
|
||||
mock.Mock(return_value=('manila-share-1', '')))
|
||||
_glustermanager_calls = (
|
||||
self.gmgr1,
|
||||
common.GlusterManager('root@host1:/manila-share-1'))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(side_effect=_glustermanager_calls))
|
||||
self.mock_object(self._layout, 'volume_pattern',
|
||||
re.compile('manila-share-\d+(-(?P<size>\d+)G)?$'))
|
||||
expected_output = {'root@host1:/manila-share-1': {'size': None}}
|
||||
|
||||
ret = self._layout._fetch_gluster_volumes()
|
||||
|
||||
test_args = ('volume', 'list')
|
||||
self.gmgr1.gluster_call.assert_called_once_with(*test_args)
|
||||
self.assertEqual(expected_output, ret)
|
||||
|
||||
def test_fetch_gluster_volumes_error(self):
|
||||
test_args = ('volume', 'list')
|
||||
|
||||
def raise_exception(*args, **kwargs):
|
||||
if(args == test_args):
|
||||
raise exception.ProcessExecutionError()
|
||||
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(self.gmgr1, 'gluster_call',
|
||||
mock.Mock(side_effect=raise_exception))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
self.mock_object(layout_volume.LOG, 'error')
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._fetch_gluster_volumes)
|
||||
|
||||
self.gmgr1.gluster_call.assert_called_once_with(*test_args)
|
||||
self.assertTrue(layout_volume.LOG.error.called)
|
||||
|
||||
def test_do_setup(self):
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(self.gmgr1, 'get_gluster_version',
|
||||
mock.Mock(return_value=('3', '6')))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
self.mock_object(self._layout, '_fetch_gluster_volumes',
|
||||
mock.Mock(return_value=self.glusterfs_volumes_dict))
|
||||
self.mock_object(self._layout, '_check_mount_glusterfs')
|
||||
self._layout.gluster_used_vols = self.glusterfs_used_vols
|
||||
self.mock_object(layout_volume.LOG, 'warn')
|
||||
|
||||
self._layout.do_setup(self._context)
|
||||
|
||||
self._layout._fetch_gluster_volumes.assert_called_once_with()
|
||||
self._layout._check_mount_glusterfs.assert_called_once_with()
|
||||
self.gmgr1.get_gluster_version.assert_called_once_with()
|
||||
|
||||
def test_do_setup_unsupported_glusterfs_version(self):
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(self.gmgr1, 'get_gluster_version',
|
||||
mock.Mock(return_value=('3', '5')))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.do_setup, self._context)
|
||||
|
||||
self.gmgr1.get_gluster_version.assert_called_once_with()
|
||||
|
||||
@ddt.data(exception.GlusterfsException, RuntimeError)
|
||||
def test_do_setup_get_gluster_version_fails(self, exc):
|
||||
def raise_exception(*args, **kwargs):
|
||||
raise exc
|
||||
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(self.gmgr1, 'get_gluster_version',
|
||||
mock.Mock(side_effect=raise_exception))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
|
||||
self.assertRaises(exc, self._layout.do_setup, self._context)
|
||||
|
||||
self.gmgr1.get_gluster_version.assert_called_once_with()
|
||||
|
||||
def test_do_setup_glusterfs_no_volumes_provided_by_backend(self):
|
||||
self._layout.configuration.glusterfs_servers = [self.glusterfs_server1]
|
||||
self.mock_object(self.gmgr1, 'get_gluster_version',
|
||||
mock.Mock(return_value=('3', '6')))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
self.mock_object(self._layout, '_fetch_gluster_volumes',
|
||||
mock.Mock(return_value={}))
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.do_setup, self._context)
|
||||
|
||||
self._layout._fetch_gluster_volumes.assert_called_once_with()
|
||||
|
||||
def test_share_manager(self):
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=self.gmgr1))
|
||||
|
||||
ret = self._layout._share_manager(self.share1)
|
||||
|
||||
self.assertEqual(self.gmgr1, ret)
|
||||
self._layout._glustermanager.assert_called_once_with(
|
||||
self.share1['export_location'])
|
||||
|
||||
def test_ensure_share(self):
|
||||
share = self.share1
|
||||
|
||||
self._layout.ensure_share(self._context, share)
|
||||
|
||||
self.assertIn(share['export_location'], self._layout.gluster_used_vols)
|
||||
|
||||
@ddt.data({"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
|
||||
"size": 1, "expected": "host:/share2G"},
|
||||
{"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
|
||||
"size": 2, "expected": "host:/share2G"},
|
||||
{"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(),
|
||||
"size": None, "expected": "host:/share2G"},
|
||||
{"voldict": {"host:/share2G": {"size": 2},
|
||||
"host:/share": {"size": None}},
|
||||
"used_vols": set(["host:/share2G"]), "size": 1,
|
||||
"expected": "host:/share"},
|
||||
{"voldict": {"host:/share2G": {"size": 2},
|
||||
"host:/share": {"size": None}},
|
||||
"used_vols": set(["host:/share2G"]), "size": 2,
|
||||
"expected": "host:/share"},
|
||||
{"voldict": {"host:/share2G": {"size": 2},
|
||||
"host:/share": {"size": None}},
|
||||
"used_vols": set(["host:/share2G"]), "size": 3,
|
||||
"expected": "host:/share"},
|
||||
{"voldict": {"host:/share2G": {"size": 2},
|
||||
"host:/share": {"size": None}},
|
||||
"used_vols": set(["host:/share2G"]), "size": None,
|
||||
"expected": "host:/share"},
|
||||
{"voldict": {"host:/share": {}}, "used_vols": set(), "size": 1,
|
||||
"expected": "host:/share"},
|
||||
{"voldict": {"host:/share": {}}, "used_vols": set(),
|
||||
"size": None, "expected": "host:/share"})
|
||||
@ddt.unpack
|
||||
def test_pop_gluster_vol(self, voldict, used_vols, size, expected):
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(expected, self._execute, None, None)
|
||||
self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict)
|
||||
self._layout.gluster_used_vols = used_vols
|
||||
self._layout._glustermanager = mock.Mock(return_value=gmgr1)
|
||||
self._layout.volume_pattern_keys = list(voldict.values())[0].keys()
|
||||
|
||||
result = self._layout._pop_gluster_vol(size=size)
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
self.assertIn(result, used_vols)
|
||||
self._layout._fetch_gluster_volumes.assert_called_once_with()
|
||||
self.assertEqual(
|
||||
[mock.call(result), mock.call(result)],
|
||||
self._layout._glustermanager.call_args_list
|
||||
)
|
||||
self.fake_driver._setup_via_manager.assert_called_once_with(gmgr1)
|
||||
|
||||
@ddt.data({"voldict": {"share2G": {"size": 2}},
|
||||
"used_vols": set(), "size": 3},
|
||||
{"voldict": {"share2G": {"size": 2}},
|
||||
"used_vols": set(["share2G"]), "size": None})
|
||||
@ddt.unpack
|
||||
def test_pop_gluster_vol_excp(self, voldict, used_vols, size):
|
||||
self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict)
|
||||
self._layout.gluster_used_vols = used_vols
|
||||
self._layout.volume_pattern_keys = list(voldict.values())[0].keys()
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._pop_gluster_vol, size=size)
|
||||
|
||||
self._layout._fetch_gluster_volumes.assert_called_once_with()
|
||||
self.assertFalse(
|
||||
self.fake_driver._setup_via_manager.called)
|
||||
|
||||
def test_push_gluster_vol(self):
|
||||
self._layout.gluster_used_vols = set([
|
||||
self.glusterfs_target1, self.glusterfs_target2])
|
||||
|
||||
self._layout._push_gluster_vol(self.glusterfs_target2)
|
||||
|
||||
self.assertEqual(1, len(self._layout.gluster_used_vols))
|
||||
self.assertFalse(
|
||||
self.glusterfs_target2 in self._layout.gluster_used_vols)
|
||||
|
||||
def test_push_gluster_vol_excp(self):
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self._layout.gluster_unused_vols_dict = {}
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._push_gluster_vol,
|
||||
self.glusterfs_target2)
|
||||
|
||||
@ddt.data({'vers_minor': '6',
|
||||
'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1',
|
||||
'-delete']},
|
||||
{'vers_minor': '7',
|
||||
'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '!',
|
||||
'-path', '/tmp/tmpKGHKJ/.trashcan', '!', '-path',
|
||||
'/tmp/tmpKGHKJ/.trashcan/internal_op', '-delete']})
|
||||
@ddt.unpack
|
||||
def test_wipe_gluster_vol(self, vers_minor, cmd):
|
||||
tmpdir = '/tmp/tmpKGHKJ'
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.glusterfs_versions = {
|
||||
self.glusterfs_server1: ('3', vers_minor)}
|
||||
|
||||
self.mock_object(tempfile, 'mkdtemp',
|
||||
mock.Mock(return_value=tmpdir))
|
||||
self.mock_object(self.fake_driver, '_execute', mock.Mock())
|
||||
self.mock_object(common, '_mount_gluster_vol', mock.Mock())
|
||||
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
|
||||
self.mock_object(shutil, 'rmtree', mock.Mock())
|
||||
|
||||
self._layout._wipe_gluster_vol(gmgr1)
|
||||
|
||||
tempfile.mkdtemp.assert_called_once_with()
|
||||
common._mount_gluster_vol.assert_called_once_with(
|
||||
self.fake_driver._execute, gmgr1.export,
|
||||
tmpdir)
|
||||
kwargs = {'run_as_root': True}
|
||||
self.fake_driver._execute.assert_called_once_with(
|
||||
*cmd, **kwargs)
|
||||
common._umount_gluster_vol.assert_called_once_with(
|
||||
self.fake_driver._execute, tmpdir)
|
||||
kwargs = {'ignore_errors': True}
|
||||
shutil.rmtree.assert_called_once_with(tmpdir,
|
||||
**kwargs)
|
||||
|
||||
def test_wipe_gluster_vol_mount_fail(self):
|
||||
tmpdir = '/tmp/tmpKGHKJ'
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.glusterfs_versions = {
|
||||
self.glusterfs_server1: ('3', '6')}
|
||||
self.mock_object(tempfile, 'mkdtemp',
|
||||
mock.Mock(return_value=tmpdir))
|
||||
self.mock_object(self.fake_driver, '_execute', mock.Mock())
|
||||
self.mock_object(common, '_mount_gluster_vol',
|
||||
mock.Mock(side_effect=exception.GlusterfsException))
|
||||
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
|
||||
self.mock_object(shutil, 'rmtree', mock.Mock())
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._wipe_gluster_vol,
|
||||
gmgr1)
|
||||
|
||||
tempfile.mkdtemp.assert_called_once_with()
|
||||
common._mount_gluster_vol.assert_called_once_with(
|
||||
self.fake_driver._execute, gmgr1.export,
|
||||
tmpdir)
|
||||
self.assertFalse(self.fake_driver._execute.called)
|
||||
self.assertFalse(common._umount_gluster_vol.called)
|
||||
kwargs = {'ignore_errors': True}
|
||||
shutil.rmtree.assert_called_once_with(tmpdir,
|
||||
**kwargs)
|
||||
|
||||
def test_wipe_gluster_vol_error_wiping_gluster_vol(self):
|
||||
tmpdir = '/tmp/tmpKGHKJ'
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.glusterfs_versions = {
|
||||
self.glusterfs_server1: ('3', '6')}
|
||||
cmd = ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '-delete']
|
||||
self.mock_object(tempfile, 'mkdtemp',
|
||||
mock.Mock(return_value=tmpdir))
|
||||
self.mock_object(
|
||||
self.fake_driver, '_execute',
|
||||
mock.Mock(side_effect=exception.ProcessExecutionError))
|
||||
self.mock_object(common, '_mount_gluster_vol', mock.Mock())
|
||||
self.mock_object(common, '_umount_gluster_vol', mock.Mock())
|
||||
self.mock_object(shutil, 'rmtree', mock.Mock())
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._wipe_gluster_vol,
|
||||
gmgr1)
|
||||
|
||||
tempfile.mkdtemp.assert_called_once_with()
|
||||
common._mount_gluster_vol.assert_called_once_with(
|
||||
self.fake_driver._execute, gmgr1.export,
|
||||
tmpdir)
|
||||
kwargs = {'run_as_root': True}
|
||||
self.fake_driver._execute.assert_called_once_with(
|
||||
*cmd, **kwargs)
|
||||
common._umount_gluster_vol.assert_called_once_with(
|
||||
self.fake_driver._execute, tmpdir)
|
||||
kwargs = {'ignore_errors': True}
|
||||
shutil.rmtree.assert_called_once_with(tmpdir,
|
||||
**kwargs)
|
||||
|
||||
def test_create_share(self):
|
||||
self._layout._pop_gluster_vol = mock.Mock(
|
||||
return_value=self.glusterfs_target1)
|
||||
|
||||
share = new_share()
|
||||
exp_locn = self._layout.create_share(self._context, share)
|
||||
|
||||
self.assertEqual(exp_locn, self.glusterfs_target1)
|
||||
self._layout._pop_gluster_vol.assert_called_once_with(share['size'])
|
||||
|
||||
def test_create_share_error(self):
|
||||
self._layout._pop_gluster_vol = mock.Mock(
|
||||
side_effect=exception.GlusterfsException)
|
||||
|
||||
share = new_share()
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.create_share, self._context, share)
|
||||
|
||||
self._layout._pop_gluster_vol.assert_called_once_with(
|
||||
share['size'])
|
||||
|
||||
def test_delete_share(self):
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
self._layout.delete_share(self._context, self.share1)
|
||||
|
||||
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
|
||||
self._layout._push_gluster_vol.assert_called_once_with(
|
||||
self.glusterfs_target1)
|
||||
|
||||
def test_delete_share_error(self):
|
||||
self._layout._wipe_gluster_vol = mock.Mock()
|
||||
self._layout._wipe_gluster_vol.side_effect = (
|
||||
exception.GlusterfsException)
|
||||
self._layout._push_gluster_vol = mock.Mock()
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.delete_share, self._context,
|
||||
self.share1)
|
||||
|
||||
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
|
||||
self.assertFalse(self._layout._push_gluster_vol.called)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')}
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(
|
||||
side_effect=(glusterXMLOut(ret=0, errno=0),)))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
ret = self._layout.create_snapshot(self._context, snapshot)
|
||||
|
||||
self.assertIsNone(ret)
|
||||
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
|
||||
gmgr1.volume)
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
|
||||
@ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=2),),
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'side_effect': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'side_effect': RuntimeError, '_exception': RuntimeError},
|
||||
{'side_effect': (('', ''),),
|
||||
'_exception': exception.GlusterfsException})
|
||||
@ddt.unpack
|
||||
def test_create_snapshot_error(self, side_effect, _exception):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')}
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(side_effect=side_effect))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
self.assertRaises(_exception, self._layout.create_snapshot,
|
||||
self._context, snapshot)
|
||||
|
||||
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
|
||||
gmgr1.volume)
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
|
||||
@ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException},
|
||||
{"vers_minor": '7',
|
||||
"exctype": exception.ShareSnapshotNotSupported})
|
||||
@ddt.unpack
|
||||
def test_create_snapshot_no_snap(self, vers_minor, exctype):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
self._layout.glusterfs_versions = {
|
||||
self.glusterfs_server1: ('3', vers_minor)}
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(
|
||||
side_effect=(glusterXMLOut(ret=-1, errno=0),)))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
self.assertRaises(exctype, self._layout.create_snapshot, self._context,
|
||||
snapshot)
|
||||
|
||||
args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id',
|
||||
gmgr1.volume)
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
|
||||
@ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException},
|
||||
{"vers_minor": '7',
|
||||
"exctype": exception.ShareSnapshotNotSupported})
|
||||
@ddt.unpack
|
||||
def test_create_snapshot_no_snap_cached(self, vers_minor, exctype):
|
||||
self._layout.gluster_nosnap_vols_dict = {
|
||||
self.share1['export_location']: 'fake error'}
|
||||
self._layout.glusterfs_versions = {
|
||||
self.glusterfs_server1: ('3', vers_minor)}
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
self.assertRaises(exctype, self._layout.create_snapshot, self._context,
|
||||
snapshot)
|
||||
|
||||
def test_find_actual_backend_snapshot_name(self):
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(return_value=('fake_snap_id_xyz', '')))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
ret = self._layout._find_actual_backend_snapshot_name(gmgr1, snapshot)
|
||||
|
||||
args = ('snapshot', 'list', gmgr1.volume, '--mode=script')
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
self.assertEqual('fake_snap_id_xyz', ret)
|
||||
|
||||
@ddt.data(exception.ProcessExecutionError, RuntimeError)
|
||||
def test_find_actual_backend_snapshot_name_gluster_error(self, _exception):
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(side_effect=_exception))
|
||||
|
||||
self.assertRaises({exception.ProcessExecutionError:
|
||||
exception.GlusterfsException}.get(_exception,
|
||||
_exception),
|
||||
self._layout._find_actual_backend_snapshot_name,
|
||||
gmgr1, mock.Mock())
|
||||
|
||||
args = ('snapshot', 'list', gmgr1.volume, '--mode=script')
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
|
||||
@ddt.data('this is too bad', 'fake_snap_id_xyx\nfake_snap_id_pqr')
|
||||
def test_find_actual_backend_snapshot_name_bad_snap_list(self, snaplist):
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
|
||||
self.mock_object(gmgr1, 'gluster_call',
|
||||
mock.Mock(return_value=(snaplist, '')))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout._find_actual_backend_snapshot_name,
|
||||
gmgr1, snapshot)
|
||||
|
||||
args = ('snapshot', 'list', gmgr1.volume, '--mode=script')
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
|
||||
@ddt.data({'glusterfs_target': 'root@host1:/gv1',
|
||||
'glusterfs_server': 'root@host1'},
|
||||
{'glusterfs_target': 'host1:/gv1',
|
||||
'glusterfs_server': 'host1'})
|
||||
@ddt.unpack
|
||||
def test_create_share_from_snapshot(self, glusterfs_target,
|
||||
glusterfs_server):
|
||||
share = new_share()
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share': new_share(export_location=glusterfs_target)
|
||||
}
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
new_export_location = ':/'.join([glusterfs_server, volume])
|
||||
gmgr = common.GlusterManager
|
||||
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
|
||||
new_gmgr = gmgr(new_export_location, self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([glusterfs_target])
|
||||
self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')}
|
||||
self.mock_object(old_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
self.mock_object(new_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
self.mock_object(new_gmgr, 'get_gluster_vol_option',
|
||||
mock.Mock())
|
||||
new_gmgr.get_gluster_vol_option.return_value = (
|
||||
'glusterfs-server-1,client')
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
self.mock_object(self._layout, '_share_manager',
|
||||
mock.Mock(return_value=old_gmgr))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=new_gmgr))
|
||||
|
||||
ret = self._layout.create_share_from_snapshot(
|
||||
self._context, share, snapshot, None)
|
||||
|
||||
(self._layout._find_actual_backend_snapshot_name.
|
||||
assert_called_once_with(old_gmgr, snapshot))
|
||||
args = (('snapshot', 'activate', 'fake_snap_id_xyz',
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, 'fake_snap_id_xyz'))
|
||||
old_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share'])
|
||||
self._layout._glustermanager.assert_called_once_with(
|
||||
gmgr.parse(new_export_location))
|
||||
self._layout.driver._setup_via_manager.assert_called_once_with(
|
||||
new_gmgr, old_gmgr)
|
||||
self.assertIn(
|
||||
new_export_location,
|
||||
self._layout.gluster_used_vols)
|
||||
self.assertEqual(new_export_location, ret)
|
||||
|
||||
def test_create_share_from_snapshot_error_old_gmr_gluster_calls(self):
|
||||
glusterfs_target = 'root@host1:/gv1'
|
||||
glusterfs_server = 'root@host1'
|
||||
share = new_share()
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
new_export_location = ':/'.join([glusterfs_server, volume])
|
||||
gmgr = common.GlusterManager
|
||||
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
|
||||
new_gmgr = gmgr(new_export_location, self._execute, None, None)
|
||||
self._layout.gluster_used_vols_dict = {glusterfs_target: old_gmgr}
|
||||
self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')}
|
||||
self.mock_object(
|
||||
old_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), exception.ProcessExecutionError]))
|
||||
self.mock_object(new_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
self.mock_object(new_gmgr, 'get_gluster_vol_option',
|
||||
mock.Mock())
|
||||
new_gmgr.get_gluster_vol_option.return_value = (
|
||||
'glusterfs-server-1,client')
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
self.mock_object(self._layout, '_share_manager',
|
||||
mock.Mock(return_value=old_gmgr))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=new_gmgr))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share': new_share(export_location=glusterfs_target)
|
||||
}
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.create_share_from_snapshot,
|
||||
self._context, share, snapshot)
|
||||
|
||||
(self._layout._find_actual_backend_snapshot_name.
|
||||
assert_called_once_with(old_gmgr, snapshot))
|
||||
args = (('snapshot', 'activate', 'fake_snap_id_xyz',
|
||||
'force', '--mode=script'),
|
||||
('snapshot', 'clone', volume, 'fake_snap_id_xyz'))
|
||||
old_gmgr.gluster_call.assert_has_calls([mock.call(*a) for a in args])
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share'])
|
||||
self.assertFalse(new_gmgr.get_gluster_vol_option.called)
|
||||
self.assertFalse(new_gmgr.gluster_call.called)
|
||||
self.assertNotIn(new_export_location,
|
||||
self._layout.glusterfs_versions.keys())
|
||||
|
||||
def test_create_share_from_snapshot_error_unsupported_gluster_version(
|
||||
self):
|
||||
glusterfs_target = 'root@host1:/gv1'
|
||||
glusterfs_server = 'root@host1'
|
||||
share = new_share()
|
||||
volume = ''.join(['manila-', share['id']])
|
||||
new_export_location = ':/'.join([glusterfs_server, volume])
|
||||
gmgr = common.GlusterManager
|
||||
old_gmgr = gmgr(glusterfs_target, self._execute, None, None)
|
||||
new_gmgr = gmgr(new_export_location, self._execute, None, None)
|
||||
self._layout.gluster_used_vols_dict = {glusterfs_target: old_gmgr}
|
||||
self._layout.glusterfs_versions = {glusterfs_server: ('3', '6')}
|
||||
self.mock_object(
|
||||
old_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), ('', '')]))
|
||||
self.mock_object(
|
||||
new_gmgr, 'gluster_call',
|
||||
mock.Mock(side_effect=[('', ''), exception.ProcessExecutionError]))
|
||||
self.mock_object(new_gmgr, 'get_gluster_vol_option',
|
||||
mock.Mock())
|
||||
new_gmgr.get_gluster_vol_option.return_value = (
|
||||
'glusterfs-server-1,client')
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
self.mock_object(self._layout, '_share_manager',
|
||||
mock.Mock(return_value=old_gmgr))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=new_gmgr))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share': new_share(export_location=glusterfs_target)
|
||||
}
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
self._layout.create_share_from_snapshot,
|
||||
self._context, share, snapshot)
|
||||
|
||||
self.assertFalse(
|
||||
self._layout._find_actual_backend_snapshot_name.called)
|
||||
self.assertFalse(old_gmgr.gluster_call.called)
|
||||
self._layout._share_manager.assert_called_once_with(
|
||||
snapshot['share'])
|
||||
self.assertFalse(self._layout._glustermanager.called)
|
||||
self.assertFalse(new_gmgr.get_gluster_vol_option.called)
|
||||
self.assertFalse(new_gmgr.gluster_call.called)
|
||||
self.assertNotIn(new_export_location,
|
||||
self._layout.glusterfs_versions.keys())
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
self.mock_object(
|
||||
gmgr1, 'gluster_call',
|
||||
mock.Mock(return_value=glusterXMLOut(ret=0, errno=0)))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
ret = self._layout.delete_snapshot(self._context, snapshot)
|
||||
|
||||
self.assertIsNone(ret)
|
||||
args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz',
|
||||
'--mode=script')
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
(self._layout._find_actual_backend_snapshot_name.
|
||||
assert_called_once_with(gmgr1, snapshot))
|
||||
|
||||
@ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=2),),
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'side_effect': exception.ProcessExecutionError,
|
||||
'_exception': exception.GlusterfsException},
|
||||
{'side_effect': RuntimeError, '_exception': RuntimeError},
|
||||
{'side_effect': (('', ''),),
|
||||
'_exception': exception.GlusterfsException})
|
||||
@ddt.unpack
|
||||
def test_delete_snapshot_error(self, side_effect, _exception):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
gmgr = common.GlusterManager
|
||||
gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None)
|
||||
self._layout.gluster_used_vols = set([self.glusterfs_target1])
|
||||
self.mock_object(self._layout, '_find_actual_backend_snapshot_name',
|
||||
mock.Mock(return_value='fake_snap_id_xyz'))
|
||||
args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz',
|
||||
'--mode=script')
|
||||
self.mock_object(
|
||||
gmgr1, 'gluster_call',
|
||||
mock.Mock(side_effect=side_effect))
|
||||
self.mock_object(self._layout, '_glustermanager',
|
||||
mock.Mock(return_value=gmgr1))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fake_snap_id',
|
||||
'share_id': self.share1['id'],
|
||||
'share': self.share1
|
||||
}
|
||||
self.assertRaises(_exception, self._layout.delete_snapshot,
|
||||
self._context, snapshot)
|
||||
|
||||
gmgr1.gluster_call.assert_called_once_with(*args)
|
||||
(self._layout._find_actual_backend_snapshot_name.
|
||||
assert_called_once_with(gmgr1, snapshot))
|
||||
|
||||
@ddt.data(
|
||||
('manage_existing', ('share', 'driver_options'), {}),
|
||||
('unmanage', ('share',), {}),
|
||||
('extend_share', ('share', 'new_size'), {'share_server': None}),
|
||||
('shrink_share', ('share', 'new_size'), {'share_server': None}))
|
||||
def test_nonimplemented_methods(self, method_invocation):
|
||||
method, args, kwargs = method_invocation
|
||||
self.assertRaises(NotImplementedError, getattr(self._layout, method),
|
||||
*args, **kwargs)
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue