Merge "Adding GPFS Manila driver"
This commit is contained in:
commit
2023930fd7
@ -10,7 +10,7 @@ filters_path=/etc/manila/rootwrap.d,/usr/share/manila/rootwrap
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/lpp/mmfs/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
|
@ -23,3 +23,40 @@ find_del: RegExpFilter, /bin/find, root, find, .*, -mindepth, 1, -delete
|
||||
|
||||
# manila/share/drivers/glusterfs_native.py: 'umount', '%s'
|
||||
umount: CommandFilter, umount, root
|
||||
|
||||
# GPFS commands
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmgetstate', '-Y'
|
||||
mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmlsattr', '%s'
|
||||
mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmcrfileset', '%s', '%s', '--inode-space', 'new'
|
||||
mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmlinkfileset', '%s', '%s', '-J', '%s'
|
||||
mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmsetquota', '-j', '%s', '-h', '%s', '%s'
|
||||
mmsetquota: CommandFilter, /usr/lpp/mmfs/bin/mmsetquota, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmunlinkfileset', '%s', '%s', '-f'
|
||||
mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmdelfileset', '%s', '%s', '-f'
|
||||
mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmcrsnapshot', '%s', '%s', '-j', '%s'
|
||||
mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'mmdelsnapshot', '%s', '%s', '-j', '%s'
|
||||
mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'rsync', '-rp', '%s', '%s'
|
||||
rsync: CommandFilter, /usr/bin/rsync, root
|
||||
# manila/share/drivers/ibm/gpfs.py: 'exportfs'
|
||||
exportfs: CommandFilter, /usr/sbin/exportfs, root
|
||||
# Ganesha commands
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'mv', '%s', '%s'
|
||||
mv: CommandFilter, /bin/mv, root
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'cp', '%s', '%s'
|
||||
cp: CommandFilter, /bin/cp, root
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'scp', '-i', '%s', '%s', '%s'
|
||||
scp: CommandFilter, /usr/bin/scp, root
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'ssh', '%s', '%s'
|
||||
ssh: CommandFilter, /usr/bin/ssh, root
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'chmod', '%s', '%s'
|
||||
chmod: CommandFilter, /bin/chmod, root
|
||||
# manila/share/drivers/ibm/ganesha_utils.py: 'service', '%s', 'restart'
|
||||
service: CommandFilter, /sbin/service, root
|
||||
|
@ -456,3 +456,11 @@ class VserverUnavailable(NetAppException):
|
||||
|
||||
class EMCVnxXMLAPIError(Invalid):
|
||||
message = _("%(err)s")
|
||||
|
||||
|
||||
class GPFSException(ManilaException):
|
||||
message = _("GPFS exception occurred.")
|
||||
|
||||
|
||||
class GPFSGaneshaException(ManilaException):
|
||||
message = _("GPFS Ganesha exception occurred.")
|
||||
|
@ -96,10 +96,12 @@ _global_opt_lists = [
|
||||
manila.service.service_opts,
|
||||
manila.share.api.share_api_opts,
|
||||
manila.share.driver.share_opts,
|
||||
manila.share.driver.ssh_opts,
|
||||
manila.share.drivers.emc.driver.EMC_NAS_OPTS,
|
||||
manila.share.drivers.generic.share_opts,
|
||||
manila.share.drivers.glusterfs.GlusterfsManilaShare_opts,
|
||||
manila.share.drivers.glusterfs_native.glusterfs_native_manila_share_opts,
|
||||
manila.share.drivers.ibm.gpfs.gpfs_share_opts,
|
||||
manila.share.drivers.netapp.cluster_mode.NETAPP_NAS_OPTS,
|
||||
manila.share.drivers.service_instance.server_opts,
|
||||
manila.share.manager.share_manager_opts,
|
||||
|
@ -43,8 +43,21 @@ share_opts = [
|
||||
help='The backend name for a given driver implementation.'),
|
||||
]
|
||||
|
||||
ssh_opts = [
|
||||
cfg.IntOpt('ssh_conn_timeout',
|
||||
default=60,
|
||||
help='Backend server SSH connection timeout.'),
|
||||
cfg.IntOpt('ssh_min_pool_conn',
|
||||
default=1,
|
||||
help='Minimum number of connections in the SSH pool.'),
|
||||
cfg.IntOpt('ssh_max_pool_conn',
|
||||
default=10,
|
||||
help='Maximum number of connections in the SSH pool.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(share_opts)
|
||||
CONF.register_opts(ssh_opts)
|
||||
|
||||
|
||||
class ExecuteMixin(object):
|
||||
@ -55,6 +68,7 @@ class ExecuteMixin(object):
|
||||
self.configuration = kwargs.get('configuration', None)
|
||||
if self.configuration:
|
||||
self.configuration.append_config_values(share_opts)
|
||||
self.configuration.append_config_values(ssh_opts)
|
||||
self.set_execute(kwargs.pop('execute', utils.execute))
|
||||
|
||||
def set_execute(self, execute):
|
||||
|
0
manila/share/drivers/ibm/__init__.py
Normal file
0
manila/share/drivers/ibm/__init__.py
Normal file
341
manila/share/drivers/ibm/ganesha_utils.py
Normal file
341
manila/share/drivers/ibm/ganesha_utils.py
Normal file
@ -0,0 +1,341 @@
|
||||
# Copyright 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Ganesha Admin Utilities
|
||||
|
||||
Ganesha NFS does not provide many tools for automating the process of creating
|
||||
and managing export defintions. This module provides utilities to help parse
|
||||
a specified ganesha config file and return a map containing the export
|
||||
definitions and attributes. A method republishing updated export definitions
|
||||
is also provided. And there are methods for requesting the ganesha server
|
||||
to reload the export definitions.
|
||||
|
||||
Consider moving this to common location for use by other manila drivers.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import re
|
||||
import socket
|
||||
import time
|
||||
|
||||
import netaddr
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
# more simple pattern for matching a single avpair per line,
|
||||
# skips lines starting with # comment char
|
||||
AVPATTERN = re.compile('^\s*(?!#)\s*(?P<attr>\S+)\s*=\s*(?P<val>\S+)\s*;')
|
||||
|
||||
# NFS Ganesha v1.5, v2.0 format used here.
|
||||
# TODO(nileshb): Upgrade it to NFS Ganesha 2.1 format.
|
||||
DEFAULT_EXPORT_ATTRS = {
|
||||
'export_id': 'undefined',
|
||||
'path': 'undefined',
|
||||
'fsal': 'undefined',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '"*"',
|
||||
'pseudo': 'undefined',
|
||||
'anonymous_root_uid': '-2',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'transport_protocols': '"UDP,TCP"',
|
||||
'sectype': '"sys"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'filesystem_id': '192.168',
|
||||
'tag': 'undefined',
|
||||
}
|
||||
|
||||
STARTING_EXPORT_ID = 100
|
||||
|
||||
|
||||
def valid_flags():
|
||||
return DEFAULT_EXPORT_ATTRS.keys()
|
||||
|
||||
|
||||
def parse_ganesha_config(configpath):
|
||||
"""Parse the specified ganesha configuration.
|
||||
|
||||
Parse a configuration file and return a list of lines that were found
|
||||
before the first EXPORT block, and a dictionary of exports and their
|
||||
attributes.
|
||||
|
||||
The input configuration file should be a valid ganesha config file and the
|
||||
export blocks should be the last items in the file.
|
||||
:returns: pre_lines -- List of lines, before the exports clause begins
|
||||
exports -- Dict of exports, indexed with the 'export_id'
|
||||
|
||||
Hers is a sample output:
|
||||
|
||||
pre_lines =
|
||||
[ '###################################################',
|
||||
'# Export entries',
|
||||
'###################################################',
|
||||
'',
|
||||
'',
|
||||
'# First export entry']
|
||||
|
||||
exports =
|
||||
{ '100': { 'anonymous_root_uid': '-2',
|
||||
'export_id': '100',
|
||||
'filesystem_id': '192.168',
|
||||
'fsal': '"GPFS"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'path': '"/gpfs0/share-0d7df0c0-4792-4e2a-68dc7206a164"',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'pseudo': '"/gpfs0/share-0d7df0c0-4792-4e2a-68dc7206a164"',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '""',
|
||||
'sectype': '"sys"',
|
||||
'tag': '"fs100"',
|
||||
'transport_protocols': '"UDP,TCP"'},
|
||||
'101': { 'anonymous_root_uid': '-2',
|
||||
'export_id': '101',
|
||||
'filesystem_id': '192.168',
|
||||
'fsal': '"GPFS"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'path': '"/gpfs0/share-74bee4dc-e07a-44a9-4be619a13fb1"',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'pseudo': '"/gpfs0/share-74bee4dc-e07a-44a9-4be619a13fb1"',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '"172.24.4.4"',
|
||||
'sectype': '"sys"',
|
||||
'tag': '"fs101"',
|
||||
'transport_protocols': '"UDP,TCP"'}}
|
||||
"""
|
||||
export_count = 0
|
||||
exports = dict()
|
||||
pre_lines = []
|
||||
with open(configpath) as f:
|
||||
for l in f.readlines():
|
||||
line = l.strip()
|
||||
if export_count == 0 and line != 'EXPORT':
|
||||
pre_lines.append(line)
|
||||
else:
|
||||
if line == 'EXPORT':
|
||||
export_count += 1
|
||||
expattrs = dict()
|
||||
try:
|
||||
match_obj = AVPATTERN.match(line)
|
||||
attr = match_obj.group('attr').lower()
|
||||
val = match_obj.group('val')
|
||||
expattrs[attr] = val
|
||||
if attr == 'export_id':
|
||||
exports[val] = expattrs
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if export_count != len(exports):
|
||||
msg = (_('Invalid export config file %(configpath)s: '
|
||||
'%(exports)s export clauses found, but '
|
||||
'%(export_ids)s export_ids.'),
|
||||
{"configpath": configpath,
|
||||
"exports": str(export_count),
|
||||
"export_ids": str(len(exports))})
|
||||
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
return pre_lines, exports
|
||||
|
||||
|
||||
def _get_export_by_path(exports, path):
|
||||
for index, export in exports.items():
|
||||
if export and 'path' in export and export['path'].strip('"\'') == path:
|
||||
return export
|
||||
return None
|
||||
|
||||
|
||||
def get_export_by_path(exports, path):
|
||||
"""Return the export that matches the specified path."""
|
||||
return _get_export_by_path(exports, path)
|
||||
|
||||
|
||||
def export_exists(exports, path):
|
||||
"""Return true if an export exists with the specified path."""
|
||||
return _get_export_by_path(exports, path) is not None
|
||||
|
||||
|
||||
def get_next_id(exports):
|
||||
"""Return an export id that is one larger than largest existing id."""
|
||||
try:
|
||||
next_id = max(map(int, exports.keys())) + 1
|
||||
except ValueError:
|
||||
next_id = STARTING_EXPORT_ID
|
||||
|
||||
LOG.debug("Export id = %d", next_id)
|
||||
return next_id
|
||||
|
||||
|
||||
def get_export_template():
|
||||
return copy.copy(DEFAULT_EXPORT_ATTRS)
|
||||
|
||||
|
||||
def _convert_ipstring_to_ipn(ipstring):
|
||||
"""Transform a single ip string into a list of IPNetwork objects."""
|
||||
if netaddr.valid_glob(ipstring):
|
||||
ipns = netaddr.glob_to_cidrs(ipstring)
|
||||
else:
|
||||
try:
|
||||
ipns = [netaddr.IPNetwork(ipstring)]
|
||||
except netaddr.AddrFormatError:
|
||||
msg = (_('Invalid IP access string %s.'), ipstring)
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
return ipns
|
||||
|
||||
|
||||
def format_access_list(access_string, deny_access=None):
|
||||
"""Transform access string into a format ganesha understands."""
|
||||
ipaddrs = set()
|
||||
deny_ipaddrs = set()
|
||||
# handle the case where there is an access string with a trailing comma
|
||||
access_string = access_string.strip(',')
|
||||
iptokens = access_string.split(',')
|
||||
|
||||
if deny_access:
|
||||
for deny_token in deny_access.split(','):
|
||||
deny_ipns = _convert_ipstring_to_ipn(deny_token)
|
||||
for deny_ipn in deny_ipns:
|
||||
deny_ips = [ip for ip in netaddr.iter_unique_ips(deny_ipn)]
|
||||
deny_ipaddrs = deny_ipaddrs.union(deny_ips)
|
||||
|
||||
for ipstring in iptokens:
|
||||
ipn_list = _convert_ipstring_to_ipn(ipstring)
|
||||
for ipn in ipn_list:
|
||||
ips = [ip for ip in netaddr.iter_unique_ips(ipn)]
|
||||
ipaddrs = ipaddrs.union(ips)
|
||||
|
||||
ipaddrs = ipaddrs - deny_ipaddrs
|
||||
ipaddrlist = sorted(list(ipaddrs))
|
||||
return ','.join([str(ip) for ip in ipaddrlist])
|
||||
|
||||
|
||||
def _publish_local_config(configpath, pre_lines, exports):
|
||||
tmp_path = '%s.tmp.%s' % (configpath, time.time())
|
||||
LOG.debug("tmp_path = %s", tmp_path)
|
||||
cpcmd = ['cp', configpath, tmp_path]
|
||||
try:
|
||||
utils.execute(*cpcmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed while publishing ganesha config locally. '
|
||||
'Error: %s.'), six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
|
||||
# change permission of the tmp file, so that it can be edited
|
||||
# by a non-root user
|
||||
chmodcmd = ['chmod', 'o+w', tmp_path]
|
||||
try:
|
||||
utils.execute(*chmodcmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed while publishing ganesha config locally. '
|
||||
'Error: %s.'), six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
|
||||
with open(tmp_path, 'w+') as f:
|
||||
for l in pre_lines:
|
||||
f.write('%s\n' % l)
|
||||
for e in exports:
|
||||
f.write('EXPORT\n{\n')
|
||||
for attr in exports[e]:
|
||||
f.write('%s = %s ;\n' % (attr, exports[e][attr]))
|
||||
|
||||
f.write('}\n')
|
||||
mvcmd = ['mv', tmp_path, configpath]
|
||||
try:
|
||||
utils.execute(*mvcmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed while publishing ganesha config locally.'
|
||||
'Error: %s.'), six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
LOG.info(_LI('Ganesha config %s published locally.'), configpath)
|
||||
|
||||
|
||||
def _publish_remote_config(server, sshlogin, sshkey, configpath):
|
||||
dest = '%s@%s:%s' % (sshlogin, server, configpath)
|
||||
scpcmd = ['scp', '-i', sshkey, configpath, dest]
|
||||
try:
|
||||
utils.execute(*scpcmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed while publishing ganesha config on remote server. '
|
||||
'Error: %s.'), six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
||||
LOG.info(_LI('Ganesha config %(path)s published to %(server)s.'),
|
||||
{'path': configpath,
|
||||
'server': server})
|
||||
|
||||
|
||||
def publish_ganesha_config(servers, sshlogin, sshkey, configpath,
|
||||
pre_lines, exports):
|
||||
"""Publish the specified configuration information.
|
||||
|
||||
Save the existing configuration file and then publish a new
|
||||
ganesha configuration to the specified path. The pre-export
|
||||
lines are written first, followed by the collection of export
|
||||
definitions.
|
||||
"""
|
||||
_publish_local_config(configpath, pre_lines, exports)
|
||||
|
||||
localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2]
|
||||
for gsvr in servers:
|
||||
if gsvr not in localserver_iplist:
|
||||
_publish_remote_config(gsvr, sshlogin, sshkey, configpath)
|
||||
|
||||
|
||||
def reload_ganesha_config(servers, sshlogin, service='ganesha.nfsd'):
|
||||
"""Request ganesha server reload updated config."""
|
||||
|
||||
# Note: dynamic reload of ganesha config is not enabled
|
||||
# in ganesha v2.0. Therefore, the code uses the ganesha service restart
|
||||
# option to make sure the config changes are reloaded
|
||||
for server in servers:
|
||||
# Until reload is fully implemented and if the reload returns a bad
|
||||
# status revert to service restart instead
|
||||
LOG.info(_LI('Restart service %(service)s on %(server)s to force a '
|
||||
'config file reload'),
|
||||
{'service': service, 'server': server})
|
||||
run_local = True
|
||||
|
||||
reload_cmd = ['service', service, 'restart']
|
||||
localserver_iplist = socket.gethostbyname_ex(
|
||||
socket.gethostname())[2]
|
||||
if server not in localserver_iplist:
|
||||
remote_login = sshlogin + '@' + server
|
||||
reload_cmd = ['ssh', remote_login] + reload_cmd
|
||||
run_local = False
|
||||
try:
|
||||
utils.execute(*reload_cmd, run_as_root=run_local)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Could not restart service %(service)s on '
|
||||
'%(server)s: %(excmsg)s'),
|
||||
{'service': service,
|
||||
'server': server,
|
||||
'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSGaneshaException(msg)
|
825
manila/share/drivers/ibm/gpfs.py
Normal file
825
manila/share/drivers/ibm/gpfs.py
Normal file
@ -0,0 +1,825 @@
|
||||
# Copyright 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
GPFS Driver for shares.
|
||||
|
||||
Config Requirements:
|
||||
GPFS file system must have quotas enabled (mmchfs -Q yes).
|
||||
Notes:
|
||||
GPFS independent fileset is used for each share.
|
||||
|
||||
TODO(nileshb): add support for share server creation/deletion/handling.
|
||||
|
||||
Limitation:
|
||||
1. While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key'
|
||||
for remote login to the GPFS node must be specified and there must be
|
||||
a passwordless authentication already setup between the Manila and the
|
||||
remote GPFS node.
|
||||
|
||||
"""
|
||||
import abc
|
||||
import copy
|
||||
import math
|
||||
import os
|
||||
import pipes
|
||||
import re
|
||||
import socket
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.utils import excutils
|
||||
from oslo.utils import importutils
|
||||
from oslo.utils import units
|
||||
from oslo_concurrency import processutils
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE, _LI
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.ibm import ganesha_utils
|
||||
from manila import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# matches multiple comma separated avpairs on a line. values with an embedded
|
||||
# comma must be wrapped in quotation marks
|
||||
AVPATTERN = re.compile(r'\s*(?P<attr>\w+)\s*=\s*(?P<val>'
|
||||
'(["][a-zA-Z0-9_, ]+["])|(\w+))\s*[,]?')
|
||||
|
||||
|
||||
gpfs_share_opts = [
|
||||
cfg.StrOpt('gpfs_share_export_ip',
|
||||
default=None,
|
||||
help='IP to be added to GPFS export string.'),
|
||||
cfg.StrOpt('gpfs_mount_point_base',
|
||||
default='$state_path/mnt',
|
||||
help='Base folder where exported shares are located.'),
|
||||
cfg.StrOpt('gpfs_nfs_server_type',
|
||||
default='KNFS',
|
||||
help=('NFS Server type. Valid choices are "KNFS" (kernel NFS) '
|
||||
'or "GNFS" (Ganesha NFS).')),
|
||||
cfg.ListOpt('gpfs_nfs_server_list',
|
||||
default=None,
|
||||
help=('A list of the fully qualified NFS server names that '
|
||||
'make up the OpenStack Manila configuration.')),
|
||||
cfg.IntOpt('gpfs_ssh_port',
|
||||
default=22,
|
||||
help='GPFS server SSH port.'),
|
||||
cfg.StrOpt('gpfs_ssh_login',
|
||||
default=None,
|
||||
help='GPFS server SSH login name.'),
|
||||
cfg.StrOpt('gpfs_ssh_password',
|
||||
default=None,
|
||||
secret=True,
|
||||
help='GPFS server SSH login password. '
|
||||
'The password is not needed, if \'gpfs_ssh_private_key\' '
|
||||
'is configured.'),
|
||||
cfg.StrOpt('gpfs_ssh_private_key',
|
||||
default=None,
|
||||
help='Path to GPFS server SSH private key for login.'),
|
||||
cfg.ListOpt('gpfs_share_helpers',
|
||||
default=[
|
||||
'KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper',
|
||||
'GNFS=manila.share.drivers.ibm.gpfs.GNFSHelper',
|
||||
],
|
||||
help='Specify list of share export helpers.'),
|
||||
cfg.StrOpt('knfs_export_options',
|
||||
default=('rw,sync,no_root_squash,insecure,no_wdelay,'
|
||||
'no_subtree_check'),
|
||||
help=('Options to use when exporting a share using kernel '
|
||||
'NFS server. Note that these defaults can be overridden '
|
||||
'when a share is created by passing metadata with key '
|
||||
'name export_options.')),
|
||||
cfg.StrOpt('gnfs_export_options',
|
||||
default=('maxread = 65536, prefread = 65536'),
|
||||
help=('Options to use when exporting a share using ganesha '
|
||||
'NFS server. Note that these defaults can be overridden '
|
||||
'when a share is created by passing metadata with key '
|
||||
'name export_options. Also note the complete set of '
|
||||
'default ganesha export options is specified in '
|
||||
'ganesha_utils.')),
|
||||
cfg.StrOpt('ganesha_config_path',
|
||||
default='/etc/ganesha/ganesha_exports.conf',
|
||||
help=('Path to ganesha export config file. The config file '
|
||||
'may also contain non-export configuration data but it '
|
||||
'must be placed before the EXPORT clauses.')),
|
||||
cfg.StrOpt('ganesha_service_name',
|
||||
default='ganesha.nfsd',
|
||||
help=('Name of the ganesha nfs service.')),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(gpfs_share_opts)
|
||||
|
||||
|
||||
class GPFSShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
||||
"""GPFS Share Driver.
|
||||
|
||||
Executes commands relating to Shares.
|
||||
Supports creation of shares on a GPFS cluster.
|
||||
|
||||
API version history:
|
||||
|
||||
1.0 - Initial version.
|
||||
"""
|
||||
|
||||
def __init__(self, db, *args, **kwargs):
|
||||
"""Do initialization."""
|
||||
super(GPFSShareDriver, self).__init__(*args, **kwargs)
|
||||
self.db = db
|
||||
self._helpers = {}
|
||||
self.configuration.append_config_values(gpfs_share_opts)
|
||||
self.backend_name = self.configuration.safe_get(
|
||||
'share_backend_name') or "IBM Storage System"
|
||||
self.sshpool = None
|
||||
self.ssh_connections = {}
|
||||
self._gpfs_execute = None
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the share driver does while starting."""
|
||||
super(GPFSShareDriver, self).do_setup(context)
|
||||
host = self.configuration.gpfs_share_export_ip
|
||||
localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2]
|
||||
if host in localserver_iplist: # run locally
|
||||
self._gpfs_execute = self._gpfs_local_execute
|
||||
else:
|
||||
self._gpfs_execute = self._gpfs_remote_execute
|
||||
self._setup_helpers()
|
||||
|
||||
def _gpfs_local_execute(self, *cmd, **kwargs):
|
||||
if 'run_as_root' not in kwargs:
|
||||
kwargs.update({'run_as_root': True})
|
||||
|
||||
return utils.execute(*cmd, **kwargs)
|
||||
|
||||
def _gpfs_remote_execute(self, *cmd, **kwargs):
|
||||
host = self.configuration.gpfs_share_export_ip
|
||||
check_exit_code = kwargs.pop('check_exit_code', None)
|
||||
|
||||
return self._run_ssh(host, cmd, check_exit_code)
|
||||
|
||||
def _run_ssh(self, host, cmd_list, check_exit_code=True):
|
||||
command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list)
|
||||
|
||||
if not self.sshpool:
|
||||
gpfs_ssh_login = self.configuration.gpfs_ssh_login
|
||||
password = self.configuration.gpfs_ssh_password
|
||||
privatekey = self.configuration.gpfs_ssh_private_key
|
||||
gpfs_ssh_port = self.configuration.gpfs_ssh_port
|
||||
ssh_conn_timeout = self.configuration.ssh_conn_timeout
|
||||
min_size = self.configuration.ssh_min_pool_conn
|
||||
max_size = self.configuration.ssh_max_pool_conn
|
||||
|
||||
self.sshpool = utils.SSHPool(host,
|
||||
gpfs_ssh_port,
|
||||
ssh_conn_timeout,
|
||||
gpfs_ssh_login,
|
||||
password=password,
|
||||
privatekey=privatekey,
|
||||
min_size=min_size,
|
||||
max_size=max_size)
|
||||
try:
|
||||
with self.sshpool.item() as ssh:
|
||||
return processutils.ssh_execute(
|
||||
ssh,
|
||||
command,
|
||||
check_exit_code=check_exit_code)
|
||||
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = (_('Error running SSH command: %(cmd)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'cmd': command, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _check_gpfs_state(self):
|
||||
try:
|
||||
out, __ = self._gpfs_execute('mmgetstate', '-Y')
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to check GPFS state. Error: %(excmsg)s.'),
|
||||
{'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
lines = out.splitlines()
|
||||
try:
|
||||
state_token = lines[0].split(':').index('state')
|
||||
gpfs_state = lines[1].split(':')[state_token]
|
||||
except (IndexError, ValueError) as e:
|
||||
msg = (_('Failed to check GPFS state. Error: %(excmsg)s.'),
|
||||
{'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
if gpfs_state != 'active':
|
||||
return False
|
||||
return True
|
||||
|
||||
def _is_dir(self, path):
|
||||
try:
|
||||
output, __ = self._gpfs_execute('stat', '--format=%F', path,
|
||||
run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('%(path)s is not a directory. Error: %(excmsg)s'),
|
||||
{'path': path, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
return output.strip() == 'directory'
|
||||
|
||||
def _is_gpfs_path(self, directory):
|
||||
try:
|
||||
self._gpfs_execute('mmlsattr', directory)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('%(dir)s is not on GPFS filesystem. Error: %(excmsg)s.'),
|
||||
{'dir': directory, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
return True
|
||||
|
||||
def _setup_helpers(self):
|
||||
"""Initializes protocol-specific NAS drivers."""
|
||||
self._helpers = {}
|
||||
for helper_str in self.configuration.gpfs_share_helpers:
|
||||
share_proto, _, import_str = helper_str.partition('=')
|
||||
helper = importutils.import_class(import_str)
|
||||
self._helpers[share_proto.upper()] = helper(self._gpfs_execute,
|
||||
self.configuration)
|
||||
|
||||
def _local_path(self, sharename):
|
||||
"""Get local path for a share or share snapshot by name."""
|
||||
return os.path.join(self.configuration.gpfs_mount_point_base,
|
||||
sharename)
|
||||
|
||||
def _get_gpfs_device(self):
|
||||
fspath = self.configuration.gpfs_mount_point_base
|
||||
try:
|
||||
(out, _) = self._gpfs_execute('df', fspath)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to get GPFS device for %(fspath)s.'
|
||||
'Error: %(excmsg)s'),
|
||||
{'fspath': fspath, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
lines = out.splitlines()
|
||||
fs = lines[1].split()[0]
|
||||
return fs
|
||||
|
||||
def _create_share(self, shareobj):
|
||||
"""Create a linked fileset file in GPFS.
|
||||
|
||||
Note: GPFS file system must have quotas enabled
|
||||
(mmchfs -Q yes).
|
||||
"""
|
||||
sharename = shareobj['name']
|
||||
sizestr = '%sG' % shareobj['size']
|
||||
sharepath = self._local_path(sharename)
|
||||
fsdev = self._get_gpfs_device()
|
||||
|
||||
# create fileset for the share, link it to root path and set max size
|
||||
try:
|
||||
self._gpfs_execute('mmcrfileset', fsdev, sharename,
|
||||
'--inode-space', 'new')
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to create fileset on %(fsdev)s for '
|
||||
'the share %(sharename)s. Error: %(excmsg)s.'),
|
||||
{'fsdev': fsdev, 'sharename': sharename,
|
||||
'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
try:
|
||||
self._gpfs_execute('mmlinkfileset', fsdev, sharename, '-J',
|
||||
sharepath)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to link fileset for the share %(sharename)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'sharename': sharename, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
try:
|
||||
self._gpfs_execute('mmsetquota', '-j', sharename, '-h',
|
||||
sizestr, fsdev)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to set quota for the share %(sharename)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'sharename': sharename, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
try:
|
||||
self._gpfs_execute('chmod', '777', sharepath)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to set permissions for share %(sharename)s. '
|
||||
'Error: %(excmsg).'),
|
||||
{'sharename': sharename, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _delete_share(self, shareobj):
|
||||
"""Remove container by removing GPFS fileset."""
|
||||
sharename = shareobj['name']
|
||||
fsdev = self._get_gpfs_device()
|
||||
|
||||
# unlink and delete the share's fileset
|
||||
try:
|
||||
self._gpfs_execute('mmunlinkfileset', fsdev, sharename, '-f')
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed unlink fileset for share %(sharename)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'sharename': sharename, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
try:
|
||||
self._gpfs_execute('mmdelfileset', fsdev, sharename, '-f')
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed delete fileset for share %(sharename)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'sharename': sharename, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _get_available_capacity(self, path):
|
||||
"""Calculate available space on path."""
|
||||
try:
|
||||
out, __ = self._gpfs_execute('df', '-P', '-B', '1', path)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to check available capacity for %(path)s.'
|
||||
'Error: %(excmsg)s.'),
|
||||
{'path': path, 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
out = out.splitlines()[1]
|
||||
size = int(out.split()[1])
|
||||
available = int(out.split()[3])
|
||||
return available, size
|
||||
|
||||
def _create_share_snapshot(self, snapshot):
|
||||
"""Create a snapshot of the share."""
|
||||
sharename = snapshot['share_name']
|
||||
snapshotname = snapshot['name']
|
||||
fsdev = self._get_gpfs_device()
|
||||
LOG.debug("sharename = %s, snapshotname = %s, fsdev = %s",
|
||||
(sharename, snapshotname, fsdev))
|
||||
|
||||
try:
|
||||
self._gpfs_execute('mmcrsnapshot', fsdev, snapshot['name'],
|
||||
'-j', sharename)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to create snapshot %(snapshot)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'snapshot': snapshot['name'], 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _delete_share_snapshot(self, snapshot):
|
||||
"""Delete a snapshot of the share."""
|
||||
sharename = snapshot['share_name']
|
||||
fsdev = self._get_gpfs_device()
|
||||
|
||||
try:
|
||||
self._gpfs_execute('mmdelsnapshot', fsdev, snapshot['name'],
|
||||
'-j', sharename)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to delete snapshot %(snapshot)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'snapshot': snapshot['name'], 'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _create_share_from_snapshot(self, share, snapshot, share_path):
|
||||
"""Create share from a share snapshot."""
|
||||
self._create_share(share)
|
||||
snapshot_path = self._get_snapshot_path(snapshot)
|
||||
snapshot_path = snapshot_path + "/"
|
||||
try:
|
||||
self._gpfs_execute('rsync', '-rp', snapshot_path, share_path)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to create share %(share)s from '
|
||||
'snapshot %(snapshot)s. Error: %(excmsg)s.'),
|
||||
{'share': share['name'], 'snapshot': snapshot['name'],
|
||||
'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def create_share(self, ctx, share, share_server=None):
|
||||
"""Create GPFS directory that will be represented as share."""
|
||||
self._create_share(share)
|
||||
share_path = self._get_share_path(share)
|
||||
location = self._get_helper(share).create_export(share_path)
|
||||
return location
|
||||
|
||||
def create_share_from_snapshot(self, ctx, share, snapshot,
|
||||
share_server=None):
|
||||
"""Is called to create share from a snapshot."""
|
||||
share_path = self._get_share_path(share)
|
||||
self._create_share_from_snapshot(share, snapshot, share_path)
|
||||
location = self._get_helper(share).create_export(share_path)
|
||||
return location
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Creates a snapshot."""
|
||||
self._create_share_snapshot(snapshot)
|
||||
|
||||
def delete_share(self, ctx, share, share_server=None):
|
||||
"""Remove and cleanup share storage."""
|
||||
location = self._get_share_path(share)
|
||||
self._get_helper(share).remove_export(location, share)
|
||||
self._delete_share(share)
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Deletes a snapshot."""
|
||||
self._delete_share_snapshot(snapshot)
|
||||
|
||||
def ensure_share(self, ctx, share, share_server=None):
|
||||
"""Ensure that storage are mounted and exported."""
|
||||
|
||||
def allow_access(self, ctx, share, access, share_server=None):
|
||||
"""Allow access to the share."""
|
||||
location = self._get_share_path(share)
|
||||
self._get_helper(share).allow_access(location, share,
|
||||
access['access_type'],
|
||||
access['access_to'])
|
||||
|
||||
def deny_access(self, ctx, share, access, share_server=None):
|
||||
"""Deny access to the share."""
|
||||
location = self._get_share_path(share)
|
||||
self._get_helper(share).deny_access(location, share,
|
||||
access['access_type'],
|
||||
access['access_to'])
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
if not self._check_gpfs_state():
|
||||
msg = (_('GPFS is not active.'))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
if not self.configuration.gpfs_share_export_ip:
|
||||
msg = (_('gpfs_share_export_ip must be specified.'))
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
gpfs_base_dir = self.configuration.gpfs_mount_point_base
|
||||
if not gpfs_base_dir.startswith('/'):
|
||||
msg = (_('%s must be an absolute path.'), gpfs_base_dir)
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
if not self._is_dir(gpfs_base_dir):
|
||||
msg = (_('%s is not a directory.'), gpfs_base_dir)
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
if not self._is_gpfs_path(gpfs_base_dir):
|
||||
msg = (_('%s is not on GPFS. Perhaps GPFS not mounted.'),
|
||||
gpfs_base_dir)
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
if self.configuration.gpfs_nfs_server_type not in ['KNFS', 'GNFS']:
|
||||
msg = (_('Invalid gpfs_nfs_server_type value: %s. '
|
||||
'Valid values are: "KNFS", "GNFS".'),
|
||||
self.configuration.gpfs_nfs_server_type)
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
if self.configuration.gpfs_nfs_server_list is None:
|
||||
msg = (_('Missing value for gpfs_nfs_server_list.'))
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
def get_share_stats(self, refresh=False):
|
||||
"""Get share status.
|
||||
|
||||
If 'refresh' is True, run update the stats first.
|
||||
"""
|
||||
if refresh:
|
||||
self._update_share_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_share_status(self):
|
||||
"""Retrieve status info from share volume group."""
|
||||
|
||||
LOG.debug("Updating share status")
|
||||
data = {}
|
||||
|
||||
data["share_backend_name"] = self.backend_name
|
||||
data["vendor_name"] = 'IBM'
|
||||
data["driver_version"] = '1.0'
|
||||
data["storage_protocol"] = 'NFS'
|
||||
|
||||
data['reserved_percentage'] = \
|
||||
self.configuration.reserved_share_percentage
|
||||
data['QoS_support'] = False
|
||||
|
||||
free, capacity = self._get_available_capacity(
|
||||
self.configuration.gpfs_mount_point_base)
|
||||
|
||||
data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
|
||||
data['free_capacity_gb'] = math.ceil(free / units.Gi)
|
||||
|
||||
self._stats = data
|
||||
|
||||
def _get_helper(self, share):
|
||||
if share['share_proto'].startswith('NFS'):
|
||||
return self._helpers[self.configuration.gpfs_nfs_server_type]
|
||||
else:
|
||||
msg = (_('Share protocol %s not supported by GPFS driver.'),
|
||||
share['share_proto'])
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidShare(reason=msg)
|
||||
|
||||
def _get_share_path(self, share):
|
||||
"""Returns share path on storage provider."""
|
||||
return os.path.join(self.configuration.gpfs_mount_point_base,
|
||||
share['name'])
|
||||
|
||||
def _get_snapshot_path(self, snapshot):
|
||||
"""Returns share path on storage provider."""
|
||||
snapshot_dir = ".snapshots"
|
||||
return os.path.join(self.configuration.gpfs_mount_point_base,
|
||||
snapshot["share_name"], snapshot_dir,
|
||||
snapshot["name"])
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class NASHelperBase(object):
|
||||
"""Interface to work with share."""
|
||||
|
||||
def __init__(self, execute, config_object):
|
||||
self.configuration = config_object
|
||||
self._execute = execute
|
||||
|
||||
def create_export(self, local_path):
|
||||
"""Construct location of new export."""
|
||||
return ':'.join([self.configuration.gpfs_share_export_ip, local_path])
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_export(self, local_path, share):
|
||||
"""Remove export."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def allow_access(self, local_path, share, access_type, access):
|
||||
"""Allow access to the host."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def deny_access(self, local_path, share, access_type, access,
|
||||
force=False):
|
||||
"""Deny access to the host."""
|
||||
|
||||
|
||||
class KNFSHelper(NASHelperBase):
|
||||
"""Wrapper for Kernel NFS Commands."""
|
||||
|
||||
def __init__(self, execute, config_object):
|
||||
super(KNFSHelper, self).__init__(execute, config_object)
|
||||
self._execute = execute
|
||||
try:
|
||||
self._execute('exportfs', check_exit_code=True, run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('NFS server not found. Error: %s.') % six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def _publish_access(self, *cmd):
|
||||
for server in self.configuration.gpfs_nfs_server_list:
|
||||
localserver_iplist = socket.gethostbyname_ex(
|
||||
socket.gethostname())[2]
|
||||
run_local = True
|
||||
if server not in localserver_iplist:
|
||||
sshlogin = self.configuration.gpfs_ssh_login
|
||||
remote_login = sshlogin + '@' + server
|
||||
cmd = ['ssh', remote_login] + list(cmd)
|
||||
run_local = False
|
||||
try:
|
||||
utils.execute(*cmd,
|
||||
run_as_root=run_local,
|
||||
check_exit_code=True)
|
||||
except exception.ProcessExecutionError:
|
||||
raise
|
||||
|
||||
def _get_export_options(self, share):
|
||||
"""Set various export attributes for share."""
|
||||
|
||||
metadata = share.get('share_metadata')
|
||||
options = None
|
||||
for item in metadata:
|
||||
if item['key'] == 'export_options':
|
||||
options = item['value']
|
||||
else:
|
||||
msg = (_('Unknown metadata key %s.'), item['key'])
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
if not options:
|
||||
options = self.configuration.knfs_export_options
|
||||
|
||||
return options
|
||||
|
||||
def remove_export(self, local_path, share):
|
||||
"""Remove export."""
|
||||
|
||||
def allow_access(self, local_path, share, access_type, access):
|
||||
"""Allow access to one or more vm instances."""
|
||||
|
||||
if access_type != 'ip':
|
||||
raise exception.InvalidShareAccess('Only ip access type '
|
||||
'supported.')
|
||||
|
||||
# check if present in export
|
||||
try:
|
||||
out, __ = self._execute('exportfs', run_as_root=True)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to check exports on the systems. '
|
||||
' Error: %s.') % six.text_type(e))
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access),
|
||||
out)
|
||||
if out is not None:
|
||||
raise exception.ShareAccessExists(access_type=access_type,
|
||||
access=access)
|
||||
|
||||
export_opts = self._get_export_options(share)
|
||||
|
||||
cmd = ['exportfs', '-o', export_opts,
|
||||
':'.join([access, local_path])]
|
||||
try:
|
||||
self._publish_access(*cmd)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to allow access for share %(sharename)s. '
|
||||
'Error: %(excmsg)s.'),
|
||||
{'sharename': share['name'],
|
||||
'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
def deny_access(self, local_path, share, access_type, access,
|
||||
force=False):
|
||||
"""Remove access for one or more vm instances."""
|
||||
cmd = ['exportfs', '-u', ':'.join([access, local_path])]
|
||||
try:
|
||||
self._publish_access(*cmd)
|
||||
except exception.ProcessExecutionError as e:
|
||||
msg = (_('Failed to deny access for share %(sharename)s. '
|
||||
'Error: %excmsg)s.'),
|
||||
{'sharename': share['name'],
|
||||
'excmsg': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
raise exception.GPFSException(msg)
|
||||
|
||||
|
||||
class GNFSHelper(NASHelperBase):
|
||||
"""Wrapper for Ganesha NFS Commands."""
|
||||
|
||||
def __init__(self, execute, config_object):
|
||||
super(GNFSHelper, self).__init__(execute, config_object)
|
||||
self.default_export_options = dict()
|
||||
for m in AVPATTERN.finditer(self.configuration.gnfs_export_options):
|
||||
self.default_export_options[m.group('attr')] = m.group('val')
|
||||
|
||||
def _get_export_options(self, share):
|
||||
"""Set various export attributes for share."""
|
||||
|
||||
# load default options first - any options passed as share metadata
|
||||
# will take precedence
|
||||
options = copy.copy(self.default_export_options)
|
||||
|
||||
metadata = share.get('share_metadata')
|
||||
for item in metadata:
|
||||
attr = item['key']
|
||||
if attr in ganesha_utils.valid_flags():
|
||||
options[attr] = item['value']
|
||||
else:
|
||||
LOG.error(_LE('Invalid metadata %(attr)s for share '
|
||||
'%(share)s.'),
|
||||
{'attr': attr, 'share': share['name']})
|
||||
|
||||
return options
|
||||
|
||||
@utils.synchronized("ganesha-process-req")
|
||||
def _ganesha_process_request(self, req_type, local_path,
|
||||
share, access_type=None,
|
||||
access=None, force=False):
|
||||
cfgpath = self.configuration.ganesha_config_path
|
||||
gservice = self.configuration.ganesha_service_name
|
||||
gservers = self.configuration.gpfs_nfs_server_list
|
||||
sshlogin = self.configuration.gpfs_ssh_login
|
||||
sshkey = self.configuration.gpfs_ssh_private_key
|
||||
pre_lines, exports = ganesha_utils.parse_ganesha_config(cfgpath)
|
||||
reload_needed = True
|
||||
|
||||
if (req_type == "allow_access"):
|
||||
export_opts = self._get_export_options(share)
|
||||
# add the new share if it's not already defined
|
||||
if not ganesha_utils.export_exists(exports, local_path):
|
||||
# Add a brand new export definition
|
||||
new_id = ganesha_utils.get_next_id(exports)
|
||||
export = ganesha_utils.get_export_template()
|
||||
export['fsal'] = '"GPFS"'
|
||||
export['export_id'] = new_id
|
||||
export['tag'] = '"fs%s"' % new_id
|
||||
export['path'] = '"%s"' % local_path
|
||||
export['pseudo'] = '"%s"' % local_path
|
||||
export['rw_access'] = (
|
||||
'"%s"' % ganesha_utils.format_access_list(access)
|
||||
)
|
||||
for key in export_opts:
|
||||
export[key] = export_opts[key]
|
||||
|
||||
exports[new_id] = export
|
||||
LOG.info(_LI('Add %(share)s with access from %(access)s'),
|
||||
{'share': share['name'], 'access': access})
|
||||
else:
|
||||
# Update existing access with new/extended access information
|
||||
export = ganesha_utils.get_export_by_path(exports, local_path)
|
||||
initial_access = export['rw_access'].strip('"')
|
||||
merged_access = ','.join([access, initial_access])
|
||||
updated_access = ganesha_utils.format_access_list(
|
||||
merged_access
|
||||
)
|
||||
if initial_access != updated_access:
|
||||
LOG.info(_LI('Update %(share)s with access from '
|
||||
'%(access)s'),
|
||||
{'share': share['name'], 'access': access})
|
||||
export['rw_access'] = '"%s"' % updated_access
|
||||
else:
|
||||
LOG.info(_LI('Do not update %(share)s, access from '
|
||||
'%(access)s already defined'),
|
||||
{'share': share['name'], 'access': access})
|
||||
reload_needed = False
|
||||
|
||||
elif (req_type == "deny_access"):
|
||||
export = ganesha_utils.get_export_by_path(exports, local_path)
|
||||
initial_access = export['rw_access'].strip('"')
|
||||
updated_access = ganesha_utils.format_access_list(
|
||||
initial_access,
|
||||
deny_access=access
|
||||
)
|
||||
|
||||
if initial_access != updated_access:
|
||||
LOG.info(_LI('Update %(share)s removing access from '
|
||||
'%(access)s'),
|
||||
{'share': share['name'], 'access': access})
|
||||
export['rw_access'] = '"%s"' % updated_access
|
||||
else:
|
||||
LOG.info(_LI('Do not update %(share)s, access from %(access)s '
|
||||
'already removed'), {'share': share['name'],
|
||||
'access': access})
|
||||
reload_needed = False
|
||||
|
||||
elif (req_type == "remove_export"):
|
||||
export = ganesha_utils.get_export_by_path(exports, local_path)
|
||||
if export:
|
||||
exports.pop(export['export_id'])
|
||||
LOG.info(_LI('Remove export for %s'), share['name'])
|
||||
else:
|
||||
LOG.info(_LI('Export for %s is not defined in Ganesha '
|
||||
'config.'),
|
||||
share['name'])
|
||||
reload_needed = False
|
||||
|
||||
if reload_needed:
|
||||
# publish config to all servers and reload or restart
|
||||
ganesha_utils.publish_ganesha_config(gservers, sshlogin, sshkey,
|
||||
cfgpath, pre_lines, exports)
|
||||
ganesha_utils.reload_ganesha_config(gservers, sshlogin, gservice)
|
||||
|
||||
def remove_export(self, local_path, share):
|
||||
"""Remove export."""
|
||||
self._ganesha_process_request("remove_export", local_path, share)
|
||||
|
||||
def allow_access(self, local_path, share, access_type, access):
|
||||
"""Allow access to the host."""
|
||||
# TODO(nileshb): add support for read only, metadata, and other
|
||||
# access types
|
||||
if access_type != 'ip':
|
||||
raise exception.InvalidShareAccess('Only ip access type '
|
||||
'supported.')
|
||||
|
||||
self._ganesha_process_request("allow_access", local_path,
|
||||
share, access_type, access)
|
||||
|
||||
def deny_access(self, local_path, share, access_type, access,
|
||||
force=False):
|
||||
"""Deny access to the host."""
|
||||
self._ganesha_process_request("deny_access", local_path,
|
||||
share, access_type, access, force)
|
0
manila/tests/share/drivers/ibm/__init__.py
Normal file
0
manila/tests/share/drivers/ibm/__init__.py
Normal file
267
manila/tests/share/drivers/ibm/test_ganesha_utils.py
Normal file
267
manila/tests/share/drivers/ibm/test_ganesha_utils.py
Normal file
@ -0,0 +1,267 @@
|
||||
# Copyright (c) 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Unit tests for the Ganesha Utils module."""
|
||||
|
||||
import socket
|
||||
import time
|
||||
|
||||
import mock
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
import manila.share.drivers.ibm.ganesha_utils as ganesha_utils
|
||||
from manila import test
|
||||
from manila import utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def fake_pre_lines(**kwargs):
|
||||
pre_lines = [
|
||||
'###################################################',
|
||||
'# Export entries',
|
||||
'###################################################',
|
||||
'',
|
||||
'',
|
||||
'# First export entry',
|
||||
]
|
||||
return pre_lines
|
||||
|
||||
|
||||
def fake_exports(**kwargs):
|
||||
exports = {
|
||||
'100': {
|
||||
'anonymous_root_uid': '-2',
|
||||
'export_id': '100',
|
||||
'filesystem_id': '192.168',
|
||||
'fsal': '"GPFS"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'path': '"/fs0/share-1234"',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'pseudo': '"/fs0/share-1234"',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '""',
|
||||
'sectype': '"sys"',
|
||||
'tag': '"fs100"',
|
||||
'transport_protocols': '"UDP,TCP"',
|
||||
},
|
||||
'101': {
|
||||
'anonymous_root_uid': '-2',
|
||||
'export_id': '101',
|
||||
'filesystem_id': '192.168',
|
||||
'fsal': '"GPFS"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'path': '"/fs0/share-5678"',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'pseudo': '"/fs0/share-5678"',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '"172.24.4.4"',
|
||||
'sectype': '"sys"',
|
||||
'tag': '"fs101"',
|
||||
'transport_protocols': '"UDP,TCP"',
|
||||
},
|
||||
}
|
||||
return exports
|
||||
|
||||
|
||||
class GaneshaUtilsTestCase(test.TestCase):
|
||||
"""Tests Ganesha Utils."""
|
||||
|
||||
def setUp(self):
|
||||
super(GaneshaUtilsTestCase, self).setUp()
|
||||
self.fake_path = "/fs0/share-1234"
|
||||
self.fake_pre_lines = fake_pre_lines()
|
||||
self.fake_exports = fake_exports()
|
||||
self.fake_configpath = "/etc/ganesha/ganesha.exports.conf"
|
||||
self.local_ip = ["192.11.22.1"]
|
||||
self.remote_ips = ["192.11.22.2", "192.11.22.3"]
|
||||
self.servers = self.local_ip + self.remote_ips
|
||||
self.sshlogin = "fake_login"
|
||||
self.sshkey = "fake_sshkey"
|
||||
self.STARTING_EXPORT_ID = 100
|
||||
self.stubs.Set(socket, 'gethostname',
|
||||
mock.Mock(return_value="testserver"))
|
||||
self.stubs.Set(socket, 'gethostbyname_ex', mock.Mock(
|
||||
return_value=('localhost',
|
||||
['localhost.localdomain', 'testserver'],
|
||||
['127.0.0.1'] + self.local_ip)
|
||||
))
|
||||
|
||||
def test_get_export_by_path(self):
|
||||
fake_export = {'export_id': '100'}
|
||||
self.stubs.Set(ganesha_utils, '_get_export_by_path',
|
||||
mock.Mock(return_value=fake_export))
|
||||
export = ganesha_utils.get_export_by_path(self.fake_exports,
|
||||
self.fake_path)
|
||||
self.assertEqual(export, fake_export)
|
||||
ganesha_utils._get_export_by_path.assert_called_once_with(
|
||||
self.fake_exports, self.fake_path
|
||||
)
|
||||
|
||||
def test_export_exists(self):
|
||||
fake_export = {'export_id': '100'}
|
||||
self.stubs.Set(ganesha_utils, '_get_export_by_path',
|
||||
mock.Mock(return_value=fake_export))
|
||||
result = ganesha_utils.export_exists(self.fake_exports, self.fake_path)
|
||||
self.assertTrue(result)
|
||||
ganesha_utils._get_export_by_path.assert_called_once_with(
|
||||
self.fake_exports, self.fake_path
|
||||
)
|
||||
|
||||
def test__get_export_by_path_export_exists(self):
|
||||
expected_export = {
|
||||
'anonymous_root_uid': '-2',
|
||||
'export_id': '100',
|
||||
'filesystem_id': '192.168',
|
||||
'fsal': '"GPFS"',
|
||||
'maxread': '65536',
|
||||
'maxwrite': '65536',
|
||||
'nfs_protocols': '"3,4"',
|
||||
'path': '"/fs0/share-1234"',
|
||||
'prefread': '65536',
|
||||
'prefwrite': '65536',
|
||||
'pseudo': '"/fs0/share-1234"',
|
||||
'root_access': '"*"',
|
||||
'rw_access': '""',
|
||||
'sectype': '"sys"',
|
||||
'tag': '"fs100"',
|
||||
'transport_protocols': '"UDP,TCP"',
|
||||
}
|
||||
export = ganesha_utils._get_export_by_path(self.fake_exports,
|
||||
self.fake_path)
|
||||
self.assertEqual(export, expected_export)
|
||||
|
||||
def test__get_export_by_path_export_does_not_exists(self):
|
||||
share_path = '/fs0/share-1111'
|
||||
export = ganesha_utils._get_export_by_path(self.fake_exports,
|
||||
share_path)
|
||||
self.assertEqual(export, None)
|
||||
|
||||
def test_get_next_id(self):
|
||||
expected_id = 102
|
||||
result = ganesha_utils.get_next_id(self.fake_exports)
|
||||
self.assertEqual(result, expected_id)
|
||||
|
||||
@mock.patch('six.moves.builtins.map')
|
||||
def test_get_next_id_first_export(self, mock_map):
|
||||
expected_id = self.STARTING_EXPORT_ID
|
||||
mock_map.side_effect = ValueError
|
||||
result = ganesha_utils.get_next_id(self.fake_exports)
|
||||
self.assertEqual(result, expected_id)
|
||||
|
||||
def test_format_access_list(self):
|
||||
access_string = "9.123.12.1,9.123.12.2,9.122"
|
||||
result = ganesha_utils.format_access_list(access_string, None)
|
||||
self.assertEqual(result, "9.122.0.0,9.123.12.1,9.123.12.2")
|
||||
|
||||
def test_format_access_list_deny_access(self):
|
||||
access_string = "9.123.12.1,9.123,12.2"
|
||||
deny_access = "9.123,12.2"
|
||||
result = ganesha_utils.format_access_list(access_string,
|
||||
deny_access=deny_access)
|
||||
self.assertEqual(result, "9.123.12.1")
|
||||
|
||||
def test_publish_ganesha_config(self):
|
||||
configpath = self.fake_configpath
|
||||
methods = ('_publish_local_config', '_publish_remote_config')
|
||||
for method in methods:
|
||||
self.stubs.Set(ganesha_utils, method, mock.Mock())
|
||||
ganesha_utils.publish_ganesha_config(self.servers, self.sshlogin,
|
||||
self.sshkey, configpath,
|
||||
self.fake_pre_lines,
|
||||
self.fake_exports)
|
||||
ganesha_utils._publish_local_config.assert_called_once_with(
|
||||
configpath, self.fake_pre_lines, self.fake_exports
|
||||
)
|
||||
for remote_ip in self.remote_ips:
|
||||
ganesha_utils._publish_remote_config.assert_any_call(
|
||||
remote_ip, self.sshlogin, self.sshkey, configpath
|
||||
)
|
||||
|
||||
def test_reload_ganesha_config(self):
|
||||
self.stubs.Set(utils, 'execute', mock.Mock(return_value=True))
|
||||
service = 'ganesha.nfsd'
|
||||
ganesha_utils.reload_ganesha_config(self.servers, self.sshlogin)
|
||||
reload_cmd = ['service', service, 'restart']
|
||||
utils.execute.assert_any_call(*reload_cmd, run_as_root=True)
|
||||
for remote_ip in self.remote_ips:
|
||||
reload_cmd = ['service', service, 'restart']
|
||||
remote_login = self.sshlogin + '@' + remote_ip
|
||||
reload_cmd = ['ssh', remote_login] + reload_cmd
|
||||
utils.execute.assert_any_call(
|
||||
*reload_cmd, run_as_root=False
|
||||
)
|
||||
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
def test__publish_local_config(self, mock_open):
|
||||
self.stubs.Set(utils, 'execute', mock.Mock(return_value=True))
|
||||
fake_timestamp = 1415506949.75
|
||||
self.stubs.Set(time, 'time', mock.Mock(return_value=fake_timestamp))
|
||||
configpath = self.fake_configpath
|
||||
tmp_path = '%s.tmp.%s' % (configpath, fake_timestamp)
|
||||
ganesha_utils._publish_local_config(configpath,
|
||||
self.fake_pre_lines,
|
||||
self.fake_exports)
|
||||
cpcmd = ['cp', configpath, tmp_path]
|
||||
utils.execute.assert_any_call(*cpcmd, run_as_root=True)
|
||||
chmodcmd = ['chmod', 'o+w', tmp_path]
|
||||
utils.execute.assert_any_call(*chmodcmd, run_as_root=True)
|
||||
mvcmd = ['mv', tmp_path, configpath]
|
||||
utils.execute.assert_any_call(*mvcmd, run_as_root=True)
|
||||
self.assertTrue(time.time.called)
|
||||
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
def test__publish_local_config_exception(self, mock_open):
|
||||
self.stubs.Set(utils, 'execute',
|
||||
mock.Mock(side_effect=exception.ProcessExecutionError))
|
||||
fake_timestamp = 1415506949.75
|
||||
self.stubs.Set(time, 'time', mock.Mock(return_value=fake_timestamp))
|
||||
configpath = self.fake_configpath
|
||||
tmp_path = '%s.tmp.%s' % (configpath, fake_timestamp)
|
||||
self.assertRaises(exception.GPFSGaneshaException,
|
||||
ganesha_utils._publish_local_config, configpath,
|
||||
self.fake_pre_lines, self.fake_exports)
|
||||
cpcmd = ['cp', configpath, tmp_path]
|
||||
utils.execute.assert_called_once_with(*cpcmd, run_as_root=True)
|
||||
self.assertTrue(time.time.called)
|
||||
|
||||
def test__publish_remote_config(self):
|
||||
utils.execute = mock.Mock(return_value=True)
|
||||
server = self.remote_ips[1]
|
||||
dest = '%s@%s:%s' % (self.sshlogin, server, self.fake_configpath)
|
||||
scpcmd = ['scp', '-i', self.sshkey, self.fake_configpath, dest]
|
||||
|
||||
ganesha_utils._publish_remote_config(server, self.sshlogin,
|
||||
self.sshkey, self.fake_configpath)
|
||||
utils.execute.assert_called_once_with(*scpcmd, run_as_root=False)
|
||||
|
||||
def test__publish_remote_config_exception(self):
|
||||
self.stubs.Set(utils, 'execute',
|
||||
mock.Mock(side_effect=exception.ProcessExecutionError))
|
||||
server = self.remote_ips[1]
|
||||
dest = '%s@%s:%s' % (self.sshlogin, server, self.fake_configpath)
|
||||
scpcmd = ['scp', '-i', self.sshkey, self.fake_configpath, dest]
|
||||
|
||||
self.assertRaises(exception.GPFSGaneshaException,
|
||||
ganesha_utils._publish_remote_config, server,
|
||||
self.sshlogin, self.sshkey, self.fake_configpath)
|
||||
utils.execute.assert_called_once_with(*scpcmd, run_as_root=False)
|
717
manila/tests/share/drivers/ibm/test_gpfs.py
Normal file
717
manila/tests/share/drivers/ibm/test_gpfs.py
Normal file
@ -0,0 +1,717 @@
|
||||
# Copyright (c) 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Unit tests for the IBM GPFS driver module."""
|
||||
|
||||
import re
|
||||
import socket
|
||||
|
||||
import mock
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
import manila.share.configuration as config
|
||||
import manila.share.drivers.ibm.ganesha_utils as ganesha_utils
|
||||
import manila.share.drivers.ibm.gpfs as gpfs
|
||||
from manila import test
|
||||
from manila.tests.db import fakes as db_fakes
|
||||
from manila import utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def fake_share(**kwargs):
|
||||
share = {
|
||||
'id': 'fakeid',
|
||||
'name': 'fakename',
|
||||
'size': 1,
|
||||
'share_proto': 'NFS',
|
||||
'export_location': '127.0.0.1:/mnt/nfs/share-1',
|
||||
}
|
||||
share.update(kwargs)
|
||||
return db_fakes.FakeModel(share)
|
||||
|
||||
|
||||
def fake_snapshot(**kwargs):
|
||||
snapshot = {
|
||||
'id': 'fakesnapshotid',
|
||||
'share_name': 'fakename',
|
||||
'share_id': 'fakeid',
|
||||
'name': 'fakesnapshotname',
|
||||
'share_size': 1,
|
||||
'share_proto': 'NFS',
|
||||
'export_location': '127.0.0.1:/mnt/nfs/volume-00002',
|
||||
}
|
||||
snapshot.update(kwargs)
|
||||
return db_fakes.FakeModel(snapshot)
|
||||
|
||||
|
||||
def fake_access(**kwargs):
|
||||
access = {
|
||||
'id': 'fakeaccid',
|
||||
'access_type': 'ip',
|
||||
'access_to': '10.0.0.2',
|
||||
'state': 'active',
|
||||
}
|
||||
access.update(kwargs)
|
||||
return db_fakes.FakeModel(access)
|
||||
|
||||
|
||||
class GPFSShareDriverTestCase(test.TestCase):
|
||||
"""Tests GPFSShareDriver."""
|
||||
|
||||
def setUp(self):
|
||||
super(GPFSShareDriverTestCase, self).setUp()
|
||||
self._context = context.get_admin_context()
|
||||
self._gpfs_execute = mock.Mock(return_value=('', ''))
|
||||
|
||||
self._helper_fake = mock.Mock()
|
||||
self.fake_conf = config.Configuration(None)
|
||||
self._db = mock.Mock()
|
||||
self._driver = gpfs.GPFSShareDriver(self._db,
|
||||
execute=self._gpfs_execute,
|
||||
configuration=self.fake_conf)
|
||||
self._knfs_helper = gpfs.KNFSHelper(self._gpfs_execute,
|
||||
self.fake_conf)
|
||||
self._gnfs_helper = gpfs.GNFSHelper(self._gpfs_execute,
|
||||
self.fake_conf)
|
||||
self.fakedev = "/dev/gpfs0"
|
||||
self.fakefspath = "/gpfs0"
|
||||
self.fakesharepath = "/gpfs0/share-fakeid"
|
||||
self.fakesnapshotpath = "/gpfs0/.snapshots/snapshot-fakesnapshotid"
|
||||
self.stubs.Set(gpfs.os.path, 'exists', mock.Mock(return_value=True))
|
||||
self._driver._helpers = {
|
||||
'KNFS': self._helper_fake
|
||||
}
|
||||
self.share = fake_share()
|
||||
self.server = {
|
||||
'backend_details': {
|
||||
'ip': '1.2.3.4',
|
||||
'instance_id': 'fake'
|
||||
}
|
||||
}
|
||||
self.access = fake_access()
|
||||
self.snapshot = fake_snapshot()
|
||||
self.local_ip = "192.11.22.1"
|
||||
self.remote_ip = "192.11.22.2"
|
||||
gpfs_nfs_server_list = [self.local_ip, self.remote_ip]
|
||||
self._knfs_helper.configuration.gpfs_nfs_server_list = \
|
||||
gpfs_nfs_server_list
|
||||
self._gnfs_helper.configuration.gpfs_nfs_server_list = \
|
||||
gpfs_nfs_server_list
|
||||
self._gnfs_helper.configuration.ganesha_config_path = \
|
||||
"fake_ganesha_config_path"
|
||||
self.sshlogin = "fake_login"
|
||||
self.sshkey = "fake_sshkey"
|
||||
self.gservice = "fake_ganesha_service"
|
||||
self._gnfs_helper.configuration.gpfs_ssh_login = self.sshlogin
|
||||
self._gnfs_helper.configuration.gpfs_ssh_private_key = self.sshkey
|
||||
self._gnfs_helper.configuration.ganesha_service_name = self.gservice
|
||||
self.stubs.Set(socket, 'gethostname',
|
||||
mock.Mock(return_value="testserver"))
|
||||
self.stubs.Set(socket, 'gethostbyname_ex', mock.Mock(
|
||||
return_value=('localhost',
|
||||
['localhost.localdomain', 'testserver'],
|
||||
['127.0.0.1', self.local_ip])
|
||||
))
|
||||
|
||||
def test_do_setup(self):
|
||||
self.stubs.Set(self._driver, '_setup_helpers', mock.Mock())
|
||||
self._driver.do_setup(self._context)
|
||||
self._driver._setup_helpers.assert_called_any()
|
||||
|
||||
def test_setup_helpers(self):
|
||||
self._driver._helpers = {}
|
||||
CONF.set_default('gpfs_share_helpers', ['KNFS=fakenfs'])
|
||||
self.stubs.Set(gpfs.importutils, 'import_class',
|
||||
mock.Mock(return_value=self._helper_fake))
|
||||
self._driver._setup_helpers()
|
||||
gpfs.importutils.import_class.assert_has_calls(
|
||||
[mock.call('fakenfs')]
|
||||
)
|
||||
self.assertEqual(len(self._driver._helpers), 1)
|
||||
|
||||
def test_create_share(self):
|
||||
self._helper_fake.create_export.return_value = 'fakelocation'
|
||||
methods = ('_create_share', '_get_share_path')
|
||||
for method in methods:
|
||||
self.stubs.Set(self._driver, method, mock.Mock())
|
||||
result = self._driver.create_share(self._context, self.share,
|
||||
share_server=self.server)
|
||||
self._driver._create_share.assert_called_once_with(self.share)
|
||||
self._driver._get_share_path.assert_called_once_with(self.share)
|
||||
|
||||
self.assertEqual(result, 'fakelocation')
|
||||
|
||||
def test_create_share_from_snapshot(self):
|
||||
self._helper_fake.create_export.return_value = 'fakelocation'
|
||||
self._driver._get_share_path = mock.Mock(return_value=self.
|
||||
fakesharepath)
|
||||
self._driver._create_share_from_snapshot = mock.Mock()
|
||||
result = self._driver.create_share_from_snapshot(self._context,
|
||||
self.share,
|
||||
self.snapshot,
|
||||
share_server=None)
|
||||
self._driver._get_share_path.assert_called_once_with(self.share)
|
||||
self._driver._create_share_from_snapshot.assert_called_once_with(
|
||||
self.share, self.snapshot,
|
||||
self.fakesharepath
|
||||
)
|
||||
self.assertEqual(result, 'fakelocation')
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self._driver._create_share_snapshot = mock.Mock()
|
||||
self._driver.create_snapshot(self._context, self.snapshot,
|
||||
share_server=None)
|
||||
self._driver._create_share_snapshot.assert_called_once_with(
|
||||
self.snapshot
|
||||
)
|
||||
|
||||
def test_delete_share(self):
|
||||
self._driver._get_share_path = mock.Mock(
|
||||
return_value=self.fakesharepath
|
||||
)
|
||||
self._driver._delete_share = mock.Mock()
|
||||
|
||||
self._driver.delete_share(self._context, self.share,
|
||||
share_server=None)
|
||||
|
||||
self._driver._get_share_path.assert_called_once_with(self.share)
|
||||
self._driver._delete_share.assert_called_once_with(self.share)
|
||||
self._helper_fake.remove_export.assert_called_once_with(
|
||||
self.fakesharepath, self.share
|
||||
)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
self._driver._delete_share_snapshot = mock.Mock()
|
||||
self._driver.delete_snapshot(self._context, self.snapshot,
|
||||
share_server=None)
|
||||
self._driver._delete_share_snapshot.assert_called_once_with(
|
||||
self.snapshot
|
||||
)
|
||||
|
||||
def test__delete_share_snapshot(self):
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=0)
|
||||
self._driver._delete_share_snapshot(self.snapshot)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'mmdelsnapshot', self.fakedev, self.snapshot['name'],
|
||||
'-j', self.snapshot['share_name']
|
||||
)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
|
||||
def test__delete_share_snapshot_exception(self):
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._delete_share_snapshot, self.snapshot)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'mmdelsnapshot', self.fakedev, self.snapshot['name'],
|
||||
'-j', self.snapshot['share_name']
|
||||
)
|
||||
|
||||
def test_allow_access(self):
|
||||
self._driver._get_share_path = mock.Mock(
|
||||
return_value=self.fakesharepath
|
||||
)
|
||||
self._helper_fake.allow_access = mock.Mock()
|
||||
self._driver.allow_access(self._context, self.share,
|
||||
self.access, share_server=None)
|
||||
self._helper_fake.allow_access.assert_called_once_with(
|
||||
self.fakesharepath, self.share,
|
||||
self.access['access_type'],
|
||||
self.access['access_to']
|
||||
)
|
||||
self._driver._get_share_path.assert_called_once_with(self.share)
|
||||
|
||||
def test_deny_access(self):
|
||||
self._driver._get_share_path = mock.Mock(return_value=self.
|
||||
fakesharepath)
|
||||
self._helper_fake.deny_access = mock.Mock()
|
||||
self._driver.deny_access(self._context, self.share,
|
||||
self.access, share_server=None)
|
||||
self._helper_fake.deny_access.assert_called_once_with(
|
||||
self.fakesharepath, self.share,
|
||||
self.access['access_type'],
|
||||
self.access['access_to']
|
||||
)
|
||||
self._driver._get_share_path.assert_called_once_with(self.share)
|
||||
|
||||
def test__check_gpfs_state_active(self):
|
||||
fakeout = "mmgetstate::state:\nmmgetstate::active:"
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
|
||||
result = self._driver._check_gpfs_state()
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
|
||||
self.assertEqual(result, True)
|
||||
|
||||
def test__check_gpfs_state_down(self):
|
||||
fakeout = "mmgetstate::state:\nmmgetstate::down:"
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
|
||||
result = self._driver._check_gpfs_state()
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test__check_gpfs_state_exception(self):
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._check_gpfs_state)
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
|
||||
|
||||
def test__is_dir_success(self):
|
||||
fakeoutput = "directory"
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
|
||||
result = self._driver._is_dir(self.fakefspath)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'stat', '--format=%F', self.fakefspath, run_as_root=False
|
||||
)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
def test__is_dir_failure(self):
|
||||
fakeoutput = "regulalr file"
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
|
||||
result = self._driver._is_dir(self.fakefspath)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'stat', '--format=%F', self.fakefspath, run_as_root=False
|
||||
)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test__is_dir_exception(self):
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._is_dir, self.fakefspath)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'stat', '--format=%F', self.fakefspath, run_as_root=False
|
||||
)
|
||||
|
||||
def test__is_gpfs_path_ok(self):
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=0)
|
||||
result = self._driver._is_gpfs_path(self.fakefspath)
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmlsattr',
|
||||
self.fakefspath)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
def test__is_gpfs_path_exception(self):
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._is_gpfs_path,
|
||||
self.fakefspath)
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmlsattr',
|
||||
self.fakefspath)
|
||||
|
||||
def test__get_gpfs_device(self):
|
||||
fakeout = "Filesystem\n" + self.fakedev
|
||||
orig_val = self._driver.configuration.gpfs_mount_point_base
|
||||
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
|
||||
result = self._driver._get_gpfs_device()
|
||||
self._driver._gpfs_execute.assert_called_once_with('df',
|
||||
self.fakefspath)
|
||||
self.assertEqual(result, self.fakedev)
|
||||
self._driver.configuration.gpfs_mount_point_base = orig_val
|
||||
|
||||
def test__create_share(self):
|
||||
sizestr = '%sG' % self.share['size']
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=True)
|
||||
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._create_share(self.share)
|
||||
self._driver._gpfs_execute.assert_any_call('mmcrfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'--inode-space', 'new')
|
||||
self._driver._gpfs_execute.assert_any_call('mmlinkfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'-J', self.fakesharepath)
|
||||
self._driver._gpfs_execute.assert_any_call('mmsetquota', '-j',
|
||||
self.share['name'], '-h',
|
||||
sizestr,
|
||||
self.fakedev)
|
||||
self._driver._gpfs_execute.assert_any_call('chmod',
|
||||
'777',
|
||||
self.fakesharepath)
|
||||
|
||||
self._driver._local_path.assert_called_once_with(self.share['name'])
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
|
||||
def test__create_share_exception(self):
|
||||
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._create_share, self.share)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
self._driver._local_path.assert_called_once_with(self.share['name'])
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmcrfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'--inode-space',
|
||||
'new')
|
||||
|
||||
def test__delete_share(self):
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=True)
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._delete_share(self.share)
|
||||
self._driver._gpfs_execute.assert_any_call('mmunlinkfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'-f')
|
||||
self._driver._gpfs_execute.assert_any_call('mmdelfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'-f')
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
|
||||
def test__delete_share_exception(self):
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._delete_share, self.share)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
self._driver._gpfs_execute.assert_called_once_with('mmunlinkfileset',
|
||||
self.fakedev,
|
||||
self.share['name'],
|
||||
'-f')
|
||||
|
||||
def test__create_share_snapshot(self):
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=True)
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._create_share_snapshot(self.snapshot)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'mmcrsnapshot', self.fakedev, self.snapshot['name'],
|
||||
'-j', self.snapshot['share_name']
|
||||
)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
|
||||
def test__create_share_snapshot_exception(self):
|
||||
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._create_share_snapshot, self.snapshot)
|
||||
self._driver._get_gpfs_device.assert_called_once_with()
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'mmcrsnapshot', self.fakedev, self.snapshot['name'],
|
||||
'-j', self.snapshot['share_name']
|
||||
)
|
||||
|
||||
def test__create_share_from_snapshot(self):
|
||||
self._driver._gpfs_execute = mock.Mock(return_value=True)
|
||||
self._driver._create_share = mock.Mock(return_value=True)
|
||||
self._driver._get_snapshot_path = mock.Mock(return_value=self.
|
||||
fakesnapshotpath)
|
||||
self._driver._create_share_from_snapshot(self.share, self.snapshot,
|
||||
self.fakesharepath)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
|
||||
)
|
||||
self._driver._create_share.assert_called_once_with(self.share)
|
||||
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
|
||||
|
||||
def test__create_share_from_snapshot_exception(self):
|
||||
self._driver._create_share = mock.Mock(return_value=True)
|
||||
self._driver._get_snapshot_path = mock.Mock(return_value=self.
|
||||
fakesnapshotpath)
|
||||
self._driver._gpfs_execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._driver._create_share_from_snapshot,
|
||||
self.share, self.snapshot, self.fakesharepath)
|
||||
self._driver._create_share.assert_called_once_with(self.share)
|
||||
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
|
||||
self._driver._gpfs_execute.assert_called_once_with(
|
||||
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
|
||||
)
|
||||
|
||||
def test__gpfs_local_execute(self):
|
||||
self.stubs.Set(utils, 'execute', mock.Mock(return_value=True))
|
||||
cmd = "testcmd"
|
||||
self._driver._gpfs_local_execute(cmd)
|
||||
utils.execute.assert_called_once_with(cmd, run_as_root=True)
|
||||
|
||||
def test__gpfs_remote_execute(self):
|
||||
self._driver._run_ssh = mock.Mock(return_value=True)
|
||||
cmd = "testcmd"
|
||||
orig_value = self._driver.configuration.gpfs_share_export_ip
|
||||
self._driver.configuration.gpfs_share_export_ip = self.local_ip
|
||||
self._driver._gpfs_remote_execute(cmd, check_exit_code=True)
|
||||
self._driver._run_ssh.assert_called_once_with(
|
||||
self.local_ip, tuple([cmd]), True
|
||||
)
|
||||
self._driver.configuration.gpfs_share_export_ip = orig_value
|
||||
|
||||
def test_knfs_allow_access(self):
|
||||
self._knfs_helper._execute = mock.Mock(
|
||||
return_value=['/fs0 <world>', 0]
|
||||
)
|
||||
self.stubs.Set(re, 'search', mock.Mock(return_value=None))
|
||||
export_opts = None
|
||||
self._knfs_helper._get_export_options = mock.Mock(
|
||||
return_value=export_opts
|
||||
)
|
||||
self._knfs_helper._publish_access = mock.Mock()
|
||||
access_type = self.access['access_type']
|
||||
access = self.access['access_to']
|
||||
local_path = self.fakesharepath
|
||||
self._knfs_helper.allow_access(local_path, self.share,
|
||||
access_type, access)
|
||||
self._knfs_helper._execute.assert_called_once_with('exportfs',
|
||||
run_as_root=True)
|
||||
re.search.assert_called_any()
|
||||
self._knfs_helper._get_export_options.assert_any_call(self.share)
|
||||
cmd = ['exportfs', '-o', export_opts, ':'.join([access, local_path])]
|
||||
self._knfs_helper._publish_access.assert_called_once_with(*cmd)
|
||||
|
||||
def test_knfs_allow_access_access_exists(self):
|
||||
out = ['/fs0 <world>', 0]
|
||||
self._knfs_helper._execute = mock.Mock(return_value=out)
|
||||
self.stubs.Set(re, 'search', mock.Mock(return_value="fake"))
|
||||
self._knfs_helper._get_export_options = mock.Mock()
|
||||
access_type = self.access['access_type']
|
||||
access = self.access['access_to']
|
||||
local_path = self.fakesharepath
|
||||
self.assertRaises(exception.ShareAccessExists,
|
||||
self._knfs_helper.allow_access,
|
||||
local_path, self.share,
|
||||
access_type, access)
|
||||
self._knfs_helper._execute.assert_any_call('exportfs',
|
||||
run_as_root=True)
|
||||
self.assertTrue(re.search.called)
|
||||
self.assertFalse(self._knfs_helper._get_export_options.called)
|
||||
|
||||
def test_knfs_allow_access_invalid_access(self):
|
||||
access_type = 'invalid_access_type'
|
||||
self.assertRaises(exception.InvalidShareAccess,
|
||||
self._knfs_helper.allow_access,
|
||||
self.fakesharepath, self.share,
|
||||
access_type,
|
||||
self.access['access_to'])
|
||||
|
||||
def test_knfs_allow_access_exception(self):
|
||||
self._knfs_helper._execute = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
access_type = self.access['access_type']
|
||||
access = self.access['access_to']
|
||||
local_path = self.fakesharepath
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._knfs_helper.allow_access,
|
||||
local_path, self.share,
|
||||
access_type, access)
|
||||
self._knfs_helper._execute.assert_called_once_with('exportfs',
|
||||
run_as_root=True)
|
||||
|
||||
def test_knfs_deny_access(self):
|
||||
self._knfs_helper._publish_access = mock.Mock()
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
self._knfs_helper.deny_access(local_path, self.share,
|
||||
access_type, access)
|
||||
cmd = ['exportfs', '-u', ':'.join([access, local_path])]
|
||||
self._knfs_helper._publish_access.assert_called_once_with(*cmd)
|
||||
|
||||
def test_knfs_deny_access_exception(self):
|
||||
self._knfs_helper._publish_access = mock.Mock(
|
||||
side_effect=exception.ProcessExecutionError
|
||||
)
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
cmd = ['exportfs', '-u', ':'.join([access, local_path])]
|
||||
self.assertRaises(exception.GPFSException,
|
||||
self._knfs_helper.deny_access, local_path,
|
||||
self.share, access_type, access)
|
||||
self._knfs_helper._publish_access.assert_called_once_with(*cmd)
|
||||
|
||||
def test_knfs__publish_access(self):
|
||||
self.stubs.Set(utils, 'execute', mock.Mock())
|
||||
cmd = ['fakecmd']
|
||||
self._knfs_helper._publish_access(*cmd)
|
||||
utils.execute.assert_any_call(*cmd, run_as_root=True,
|
||||
check_exit_code=True)
|
||||
remote_login = self.sshlogin + '@' + self.remote_ip
|
||||
cmd = ['ssh', remote_login] + list(cmd)
|
||||
utils.execute.assert_any_call(*cmd, run_as_root=False,
|
||||
check_exit_code=True)
|
||||
self.assertTrue(socket.gethostbyname_ex.called)
|
||||
self.assertTrue(socket.gethostname.called)
|
||||
|
||||
def test_knfs__publish_access_exception(self):
|
||||
self.stubs.Set(utils, 'execute',
|
||||
mock.Mock(side_effect=exception.ProcessExecutionError))
|
||||
cmd = ['fakecmd']
|
||||
self.assertRaises(exception.ProcessExecutionError,
|
||||
self._knfs_helper._publish_access, *cmd)
|
||||
self.assertTrue(socket.gethostbyname_ex.called)
|
||||
self.assertTrue(socket.gethostname.called)
|
||||
utils.execute.assert_called_once_with(*cmd, run_as_root=True,
|
||||
check_exit_code=True)
|
||||
|
||||
def test_gnfs_allow_access(self):
|
||||
self._gnfs_helper._ganesha_process_request = mock.Mock()
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
self._gnfs_helper.allow_access(local_path, self.share,
|
||||
access_type, access)
|
||||
self._gnfs_helper._ganesha_process_request.assert_called_once_with(
|
||||
"allow_access", local_path, self.share, access_type, access
|
||||
)
|
||||
|
||||
def test_gnfs_allow_access_invalid_access(self):
|
||||
access_type = 'invalid_access_type'
|
||||
self.assertRaises(exception.InvalidShareAccess,
|
||||
self._gnfs_helper.allow_access,
|
||||
self.fakesharepath, self.share,
|
||||
access_type,
|
||||
self.access['access_to'])
|
||||
|
||||
def test_gnfs_deny_access(self):
|
||||
self._gnfs_helper._ganesha_process_request = mock.Mock()
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
self._gnfs_helper.deny_access(local_path, self.share,
|
||||
access_type, access)
|
||||
self._gnfs_helper._ganesha_process_request.assert_called_once_with(
|
||||
"deny_access", local_path, self.share, access_type, access, False
|
||||
)
|
||||
|
||||
def test_gnfs_remove_export(self):
|
||||
self._gnfs_helper._ganesha_process_request = mock.Mock()
|
||||
local_path = self.fakesharepath
|
||||
self._gnfs_helper.remove_export(local_path, self.share)
|
||||
self._gnfs_helper._ganesha_process_request.assert_called_once_with(
|
||||
"remove_export", local_path, self.share
|
||||
)
|
||||
|
||||
def test_gnfs__ganesha_process_request_allow_access(self):
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
cfgpath = self._gnfs_helper.configuration.ganesha_config_path
|
||||
gservers = self._gnfs_helper.configuration.gpfs_nfs_server_list
|
||||
export_opts = []
|
||||
pre_lines = []
|
||||
exports = {}
|
||||
self._gnfs_helper._get_export_options = mock.Mock(
|
||||
return_value=export_opts
|
||||
)
|
||||
self.stubs.Set(ganesha_utils, 'parse_ganesha_config', mock.Mock(
|
||||
return_value=(pre_lines, exports)
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'export_exists', mock.Mock(
|
||||
return_value=False
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'get_next_id', mock.Mock(
|
||||
return_value=101
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'get_export_template', mock.Mock(
|
||||
return_value={}
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'publish_ganesha_config', mock.Mock())
|
||||
self.stubs.Set(ganesha_utils, 'reload_ganesha_config', mock.Mock())
|
||||
self._gnfs_helper._ganesha_process_request(
|
||||
"allow_access", local_path, self.share, access_type, access
|
||||
)
|
||||
self._gnfs_helper._get_export_options.assert_called_once_with(
|
||||
self.share
|
||||
)
|
||||
ganesha_utils.export_exists.assert_called_once_with(exports,
|
||||
local_path)
|
||||
ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath)
|
||||
ganesha_utils.publish_ganesha_config.assert_called_once_with(
|
||||
gservers, self.sshlogin, self.sshkey, cfgpath, pre_lines, exports
|
||||
)
|
||||
ganesha_utils.reload_ganesha_config.assert_called_once_with(
|
||||
gservers, self.sshlogin, self.gservice
|
||||
)
|
||||
|
||||
def test_gnfs__ganesha_process_request_deny_access(self):
|
||||
access = self.access['access_to']
|
||||
access_type = self.access['access_type']
|
||||
local_path = self.fakesharepath
|
||||
cfgpath = self._gnfs_helper.configuration.ganesha_config_path
|
||||
gservers = self._gnfs_helper.configuration.gpfs_nfs_server_list
|
||||
pre_lines = []
|
||||
initial_access = "10.0.0.1,10.0.0.2"
|
||||
export = {"rw_access": initial_access}
|
||||
exports = {}
|
||||
self.stubs.Set(ganesha_utils, 'parse_ganesha_config', mock.Mock(
|
||||
return_value=(pre_lines, exports)
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'get_export_by_path', mock.Mock(
|
||||
return_value=export
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'format_access_list', mock.Mock(
|
||||
return_value="10.0.0.1"
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'publish_ganesha_config', mock.Mock())
|
||||
self.stubs.Set(ganesha_utils, 'reload_ganesha_config', mock.Mock())
|
||||
self._gnfs_helper._ganesha_process_request(
|
||||
"deny_access", local_path, self.share, access_type, access
|
||||
)
|
||||
ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath)
|
||||
ganesha_utils.get_export_by_path.assert_called_once_with(exports,
|
||||
local_path)
|
||||
ganesha_utils.format_access_list.assert_called_once_with(
|
||||
initial_access, deny_access=access
|
||||
)
|
||||
ganesha_utils.publish_ganesha_config.assert_called_once_with(
|
||||
gservers, self.sshlogin, self.sshkey, cfgpath, pre_lines, exports
|
||||
)
|
||||
ganesha_utils.reload_ganesha_config.assert_called_once_with(
|
||||
gservers, self.sshlogin, self.gservice
|
||||
)
|
||||
|
||||
def test_gnfs__ganesha_process_request_remove_export(self):
|
||||
local_path = self.fakesharepath
|
||||
cfgpath = self._gnfs_helper.configuration.ganesha_config_path
|
||||
pre_lines = []
|
||||
exports = {}
|
||||
export = {}
|
||||
self.stubs.Set(ganesha_utils, 'parse_ganesha_config', mock.Mock(
|
||||
return_value=(pre_lines, exports)
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'get_export_by_path', mock.Mock(
|
||||
return_value=export
|
||||
))
|
||||
self.stubs.Set(ganesha_utils, 'publish_ganesha_config', mock.Mock())
|
||||
self.stubs.Set(ganesha_utils, 'reload_ganesha_config', mock.Mock())
|
||||
self._gnfs_helper._ganesha_process_request(
|
||||
"remove_export", local_path, self.share
|
||||
)
|
||||
ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath)
|
||||
ganesha_utils.get_export_by_path.assert_called_once_with(exports,
|
||||
local_path)
|
||||
self.assertFalse(ganesha_utils.publish_ganesha_config.called)
|
||||
self.assertFalse(ganesha_utils.reload_ganesha_config.called)
|
Loading…
Reference in New Issue
Block a user