diff --git a/doc/source/admin/share_back_ends_feature_support_mapping.rst b/doc/source/admin/share_back_ends_feature_support_mapping.rst index 49788426d1..c46ed45f6b 100644 --- a/doc/source/admin/share_back_ends_feature_support_mapping.rst +++ b/doc/source/admin/share_back_ends_feature_support_mapping.rst @@ -67,7 +67,9 @@ Mapping of share drivers and share features support +----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+--------------------+ | INFINIDAT | Q | \- | Q | \- | Q | Q | \- | Q | Q | +----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+--------------------+ -| INSPUR | R | \- | R | \- | R | R | \- | \- | \- | +| INSPUR AS13000 | R | \- | R | \- | R | R | \- | \- | \- | ++----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+--------------------+ +| INSPUR InStorage | T | \- | T | \- | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+--------------------+ | LVM | M | \- | M | \- | M | M | \- | O | O | +----------------------------------------+-----------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+--------------------+--------------------+ @@ -138,7 +140,9 @@ Mapping of share drivers and share access rules support +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | INFINIDAT | NFS (Q) | \- | \- | \- | \- | NFS (Q) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ -| INSPUR | NFS (R) | \- | CIFS (R) | \- | \- | NFS (R) | \- | CIFS (R) | \- | \- | +| INSPUR AS13000 | NFS (R) | \- | CIFS (R) | \- | \- | NFS (R) | \- | CIFS (R) | \- | \- | ++----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ +| INSPUR InStorage | NFS (T) | \- | CIFS (T) | \- | \- | NFS (T) | \- | CIFS (T) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Oracle ZFSSA | NFS,CIFS(K) | \- | \- | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ @@ -201,7 +205,9 @@ Mapping of share drivers and security services support +----------------------------------------+------------------+-----------------+------------------+ | INFINIDAT | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ -| INSPUR | \- | \- | \- | +| INSPUR AS13000 | \- | \- | \- | ++----------------------------------------+------------------+-----------------+------------------+ +| INSPUR InStorage | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Oracle ZFSSA | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ @@ -280,7 +286,9 @@ More information: :ref:`capabilities_and_extra_specs` +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+ | QNAP | \- | O | Q | Q | O | Q | \- | O | \- | \- | P | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+ -| INSPUR | \- | R | \- | \- | R | \- | \- | R | \- | \- | R | \- | +| INSPUR AS13000 | \- | R | \- | \- | R | \- | \- | R | \- | \- | R | \- | ++----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+ +| INSPUR InStorage | \- | T | \- | \- | \- | T | \- | \- | \- | \- | T | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+ .. note:: diff --git a/manila/opts.py b/manila/opts.py index af9fdff07f..b41e7c161c 100644 --- a/manila/opts.py +++ b/manila/opts.py @@ -73,6 +73,7 @@ import manila.share.drivers.huawei.huawei_nas import manila.share.drivers.ibm.gpfs import manila.share.drivers.infinidat.infinibox import manila.share.drivers.inspur.as13000.as13000_nas +import manila.share.drivers.inspur.instorage.instorage import manila.share.drivers.lvm import manila.share.drivers.maprfs.maprfs_native import manila.share.drivers.netapp.options @@ -159,6 +160,7 @@ _global_opt_lists = [ manila.share.drivers.infinidat.infinibox.infinidat_connection_opts, manila.share.drivers.infinidat.infinibox.infinidat_general_opts, manila.share.drivers.inspur.as13000.as13000_nas.inspur_as13000_opts, + manila.share.drivers.inspur.instorage.instorage.instorage_opts, manila.share.drivers.maprfs.maprfs_native.maprfs_native_share_opts, manila.share.drivers.lvm.share_opts, manila.share.drivers.netapp.options.netapp_proxy_opts, diff --git a/manila/share/drivers/inspur/instorage/__init__.py b/manila/share/drivers/inspur/instorage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/share/drivers/inspur/instorage/cli_helper.py b/manila/share/drivers/inspur/instorage/cli_helper.py new file mode 100644 index 0000000000..4a86d134fe --- /dev/null +++ b/manila/share/drivers/inspur/instorage/cli_helper.py @@ -0,0 +1,476 @@ +# Copyright 2019 Inspur Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +CLI helpers for Inspur InStorage +""" + +import paramiko +import re +import six + +from eventlet import greenthread + +from oslo_concurrency import processutils +from oslo_log import log +from oslo_utils import excutils + +from manila import exception +from manila.i18n import _ +from manila import utils as manila_utils + +LOG = log.getLogger(__name__) + + +class SSHRunner(object): + """SSH runner is used to run ssh command on inspur instorage system.""" + + def __init__(self, host, port, login, password, privatekey=None): + self.host = host + self.port = port + self.login = login + self.password = password + self.privatekey = privatekey + + self.ssh_conn_timeout = 60 + self.ssh_min_pool_size = 1 + self.ssh_max_pool_size = 10 + + self.sshpool = None + + def __call__(self, cmd_list, check_exit_code=True, attempts=1): + """SSH tool""" + manila_utils.check_ssh_injection(cmd_list) + command = ' '.join(cmd_list) + if not self.sshpool: + try: + self.sshpool = manila_utils.SSHPool( + self.host, + self.port, + self.ssh_conn_timeout, + self.login, + password=self.password, + privatekey=self.privatekey, + min_size=self.ssh_min_pool_size, + max_size=self.ssh_max_pool_size + ) + except paramiko.SSHException: + LOG.error("Unable to create SSHPool") + raise + try: + return self._ssh_execute(self.sshpool, command, + check_exit_code, attempts) + except Exception: + LOG.error("Error running SSH command: %s", command) + raise + + def _ssh_execute(self, sshpool, command, + check_exit_code=True, attempts=1): + try: + with sshpool.item() as ssh: + last_exception = None + while attempts > 0: + attempts -= 1 + try: + return processutils.ssh_execute( + ssh, + command, + check_exit_code=check_exit_code) + except Exception as e: + LOG.exception('Error has occurred') + last_exception = e + greenthread.sleep(1) + + try: + raise processutils.ProcessExecutionError( + exit_code=last_exception.exit_code, + stdout=last_exception.stdout, + stderr=last_exception.stderr, + cmd=last_exception.cmd) + except AttributeError: + raise processutils.ProcessExecutionError( + exit_code=-1, + stdout="", + stderr="Error running SSH command", + cmd=command) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error("Error running SSH command: %s", command) + + +class CLIParser(object): + """Parse MCS CLI output and generate iterable.""" + + def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): + super(CLIParser, self).__init__() + if ssh_cmd: + self.ssh_cmd = ' '.join(ssh_cmd) + else: + self.ssh_cmd = 'None' + self.raw = raw + self.delim = delim + self.with_header = with_header + self.result = self._parse() + + def __getitem__(self, key): + try: + return self.result[key] + except KeyError: + msg = (_('Did not find the expected key %(key)s in %(fun)s: ' + '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, + 'raw': self.raw}) + raise exception.ShareBackendException(msg=msg) + + def __iter__(self): + for a in self.result: + yield a + + def __len__(self): + return len(self.result) + + def _parse(self): + def get_reader(content, delim): + for line in content.lstrip().splitlines(): + line = line.strip() + if line: + yield line.split(delim) + else: + yield [] + + if isinstance(self.raw, six.string_types): + stdout, stderr = self.raw, '' + else: + stdout, stderr = self.raw + reader = get_reader(stdout, self.delim) + result = [] + + if self.with_header: + hds = tuple() + for row in reader: + hds = row + break + for row in reader: + cur = dict() + if len(hds) != len(row): + msg = (_('Unexpected CLI response: header/row mismatch. ' + 'header: %(header)s, row: %(row)s.') + % {'header': hds, + 'row': row}) + raise exception.ShareBackendException(msg=msg) + for k, v in zip(hds, row): + CLIParser.append_dict(cur, k, v) + result.append(cur) + else: + cur = dict() + for row in reader: + if row: + CLIParser.append_dict(cur, row[0], ' '.join(row[1:])) + elif cur: # start new section + result.append(cur) + cur = dict() + if cur: + result.append(cur) + return result + + @staticmethod + def append_dict(dict_, key, value): + key, value = key.strip(), value.strip() + obj = dict_.get(key, None) + if obj is None: + dict_[key] = value + elif isinstance(obj, list): + obj.append(value) + dict_[key] = obj + else: + dict_[key] = [obj, value] + return dict_ + + +class InStorageSSH(object): + """SSH interface to Inspur InStorage systems.""" + + def __init__(self, ssh_runner): + self._ssh = ssh_runner + + def _run_ssh(self, ssh_cmd): + try: + return self._ssh(ssh_cmd) + except processutils.ProcessExecutionError as e: + msg = (_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s.') % + {'cmd': ssh_cmd, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def run_ssh_inq(self, ssh_cmd, delim='!', with_header=False): + """Run an SSH command and return parsed output.""" + raw = self._run_ssh(ssh_cmd) + LOG.debug('Response for cmd %s is %s', ssh_cmd, raw) + return CLIParser(raw, ssh_cmd=ssh_cmd, delim=delim, + with_header=with_header) + + def run_ssh_assert_no_output(self, ssh_cmd): + """Run an SSH command and assert no output returned.""" + out, err = self._run_ssh(ssh_cmd) + if len(out.strip()) != 0: + msg = (_('Expected no output from CLI command %(cmd)s, ' + 'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def run_ssh_check_created(self, ssh_cmd): + """Run an SSH command and return the ID of the created object.""" + out, err = self._run_ssh(ssh_cmd) + try: + match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) + return match_obj.group(1) + except (AttributeError, IndexError): + msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s.') % + {'cmd': ssh_cmd, + 'out': out, + 'err': err}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def lsnode(self, node_id=None): + with_header = True + ssh_cmd = ['mcsinq', 'lsnode', '-delim', '!'] + if node_id: + with_header = False + ssh_cmd.append(node_id) + return self.run_ssh_inq(ssh_cmd, with_header=with_header) + + def lsnaspool(self, pool_id=None): + ssh_cmd = ['mcsinq', 'lsnaspool', '-delim', '!'] + if pool_id: + ssh_cmd.append(pool_id) + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def lsfs(self, node_name=None, fsname=None): + if fsname and not node_name: + msg = _('Node name should be set when file system name is set.') + LOG.error(msg) + raise exception.InvalidParameterValue(msg) + + ssh_cmd = ['mcsinq', 'lsfs', '-delim', '!'] + to_append = [] + + if node_name: + to_append += ['-node', '"%s"' % node_name] + + if fsname: + to_append += ['-name', '"%s"' % fsname] + + if not to_append: + to_append += ['-all'] + + ssh_cmd += to_append + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def addfs(self, fsname, pool_name, size, node_name): + """Create a file system on the storage. + + :param fsname: file system name + :param pool_name: pool in which to create the file system + :param size: file system size in GB + :param node_name: the primary node name + :return: + """ + + ssh_cmd = ['mcsop', 'addfs', '-name', '"%s"' % fsname, '-pool', + '"%s"' % pool_name, '-size', '%dg' % size, + '-node', '"%s"' % node_name] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmfs(self, fsname): + """Remove the specific file system. + + :param fsname: file system name to be removed + :return: + """ + + ssh_cmd = ['mcsop', 'rmfs', '-name', '"%s"' % fsname] + self.run_ssh_assert_no_output(ssh_cmd) + + def expandfs(self, fsname, size): + """Expand the space of the specific file system. + + :param fsname: file system name + :param size: the size(GB) to be expanded, origin + size = result + :return: + """ + + ssh_cmd = ['mcsop', 'expandfs', '-name', '"%s"' % fsname, + '-size', '%dg' % size] + self.run_ssh_assert_no_output(ssh_cmd) + + # NAS directory operation + def lsnasdir(self, dirpath): + """List the child directory under dirpath. + + :param dirpath: the parent directory to list with + :return: + """ + + ssh_cmd = ['mcsinq', 'lsnasdir', '-delim', '!', '"%s"' % dirpath] + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def addnasdir(self, dirpath): + """Create a new NAS directory indicated by dirpath.""" + + ssh_cmd = ['mcsop', 'addnasdir', '"%s"' % dirpath] + self.run_ssh_assert_no_output(ssh_cmd) + + def chnasdir(self, old_path, new_path): + """Rename the NAS directory name.""" + + ssh_cmd = ['mcsop', 'chnasdir', '-oldpath', '"%s"' % old_path, + '-newpath', '"%s"' % new_path] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmnasdir(self, dirpath): + """Remove the specific dirpath.""" + + ssh_cmd = ['mcsop', 'rmnasdir', '"%s"' % dirpath] + self.run_ssh_assert_no_output(ssh_cmd) + + # NFS operation + def rmnfs(self, share_path): + """Remove the NFS indicated by path.""" + + ssh_cmd = ['mcsop', 'rmnfs', '"%s"' % share_path] + self.run_ssh_assert_no_output(ssh_cmd) + + def lsnfslist(self, prefix=None): + """List NFS shares on a system.""" + + ssh_cmd = ['mcsinq', 'lsnfslist', '-delim', '!'] + if prefix: + ssh_cmd.append('"%s"' % prefix) + + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def lsnfsinfo(self, share_path): + """List a specific NFS share's information.""" + + ssh_cmd = ['mcsinq', 'lsnfsinfo', '-delim', '!', '"%s"' % share_path] + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def addnfsclient(self, share_path, client_spec): + """Add a client access rule to NFS share. + + :param share_path: the NFS share path. + :param client_spec: IP/MASK:RIGHTS:ALL_SQUASH:ROOT_SQUASH. + :return: + """ + + ssh_cmd = ['mcsop', 'addnfsclient', '-path', '"%s"' % share_path, + '-client', client_spec] + self.run_ssh_assert_no_output(ssh_cmd) + + def chnfsclient(self, share_path, client_spec): + """Change a NFS share's client info.""" + + ssh_cmd = ['mcsop', 'chnfsclient', '-path', '"%s"' % share_path, + '-client', client_spec] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmnfsclient(self, share_path, client_spec): + """Remove a client info from the NFS share.""" + + # client_spec parameter for rmnfsclient is IP/MASK, + # so we need remove the right part + client_spec = client_spec.split(':')[0] + + ssh_cmd = ['mcsop', 'rmnfsclient', '-path', '"%s"' % share_path, + '-client', client_spec] + self.run_ssh_assert_no_output(ssh_cmd) + + # CIFS operation + def lscifslist(self, filter=None): + """List CIFS shares on the system.""" + + ssh_cmd = ['mcsinq', 'lscifslist', '-delim', '!'] + if filter: + ssh_cmd.append('"%s"' % filter) + + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def lscifsinfo(self, share_name): + """List a specific CIFS share's information.""" + + ssh_cmd = ['mcsinq', 'lscifsinfo', '-delim', '!', '"%s"' % share_name] + return self.run_ssh_inq(ssh_cmd, with_header=True) + + def addcifs(self, share_name, dirpath, oplocks='off'): + """Create a CIFS share with given path.""" + ssh_cmd = ['mcsop', 'addcifs', '-name', share_name, '-path', dirpath, + '-oplocks', oplocks] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmcifs(self, share_name): + """Remove a CIFS share.""" + + ssh_cmd = ['mcsop', 'rmcifs', share_name] + self.run_ssh_assert_no_output(ssh_cmd) + + def chcifs(self, share_name, oplocks='off'): + """Change a CIFS share's attribute. + + :param share_name: share's name + :param oplocks: 'off' or 'on' + :return: + """ + ssh_cmd = ['mcsop', 'chcifs', '-name', share_name, '-oplocks', oplocks] + self.run_ssh_assert_no_output(ssh_cmd) + + def addcifsuser(self, share_name, rights): + """Add a user access rule to CIFS share. + + :param share_name: share's name + :param rights: [LU|LG]:xxx:[rw|ro] + :return: + """ + ssh_cmd = ['mcsop', 'addcifsuser', '-name', share_name, + '-rights', rights] + self.run_ssh_assert_no_output(ssh_cmd) + + def chcifsuser(self, share_name, rights): + """Change a user access rule.""" + + ssh_cmd = ['mcsop', 'chcifsuser', '-name', share_name, + '-rights', rights] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmcifsuser(self, share_name, rights): + """Remove CIFS user from a CIFS share.""" + + # the rights parameter for rmcifsuser is LU:NAME + rights = ':'.join(rights.split(':')[0:-1]) + + ssh_cmd = ['mcsop', 'rmcifsuser', '-name', share_name, + '-rights', rights] + self.run_ssh_assert_no_output(ssh_cmd) + + # NAS port ip + def lsnasportip(self): + """List NAS service port ip address.""" + + ssh_cmd = ['mcsinq', 'lsnasportip', '-delim', '!'] + return self.run_ssh_inq(ssh_cmd, with_header=True) diff --git a/manila/share/drivers/inspur/instorage/instorage.py b/manila/share/drivers/inspur/instorage/instorage.py new file mode 100644 index 0000000000..34e6c04dbc --- /dev/null +++ b/manila/share/drivers/inspur/instorage/instorage.py @@ -0,0 +1,623 @@ +# Copyright 2019 Inspur Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Driver for Inspur InStorage +""" + +import ipaddress +import itertools +import six + +from oslo_config import cfg +from oslo_log import log +from oslo_utils import units + +from manila import coordination +from manila import exception +from manila.i18n import _ +from manila.share import driver +from manila.share import utils as share_utils + +from manila.share.drivers.inspur.instorage.cli_helper import InStorageSSH +from manila.share.drivers.inspur.instorage.cli_helper import SSHRunner + +instorage_opts = [ + cfg.HostAddressOpt( + 'instorage_nas_ip', + required=True, + help='IP address for the InStorage.' + ), + cfg.PortOpt( + 'instorage_nas_port', + default=22, + help='Port number for the InStorage.' + ), + cfg.StrOpt( + 'instorage_nas_login', + required=True, + help='Username for the InStorage.' + ), + cfg.StrOpt( + 'instorage_nas_password', + required=True, + secret=True, + help='Password for the InStorage.' + ), + cfg.ListOpt( + 'instorage_nas_pools', + required=True, + help='The Storage Pools Manila should use, a comma separated list.' + ) +] + +CONF = cfg.CONF +CONF.register_opts(instorage_opts) +LOG = log.getLogger(__name__) + + +class InStorageShareDriver(driver.ShareDriver): + """Inspur InStorage NAS driver. Allows for NFS and CIFS NAS. + + .. code::none + Version history: + 1.0.0 - Initial driver. + Driver support: + share create/delete + extend size + update_access + protocol: NFS/CIFS + """ + + VENDOR = 'INSPUR' + VERSION = '1.0.0' + PROTOCOL = 'NFS_CIFS' + + def __init__(self, *args, **kwargs): + super(InStorageShareDriver, self).__init__(False, *args, **kwargs) + self.configuration.append_config_values(instorage_opts) + + self.backend_name = self.configuration.safe_get('share_backend_name') + self.backend_pools = self.configuration.instorage_nas_pools + + self.ssh_runner = SSHRunner(**{ + 'host': self.configuration.instorage_nas_ip, + 'port': 22, + 'login': self.configuration.instorage_nas_login, + 'password': self.configuration.instorage_nas_password + }) + + self.assistant = InStorageAssistant(self.ssh_runner) + + def check_for_setup_error(self): + nodes = self.assistant.get_nodes_info() + if len(nodes) == 0: + msg = _('No valid node, be sure the NAS Port IP is configured') + raise exception.ShareBackendException(msg=msg) + + pools = self.assistant.get_available_pools() + not_exist = set(self.backend_pools).difference(set(pools)) + if not_exist: + msg = _('Pool %s not exist on the storage system') % not_exist + raise exception.InvalidParameterValue(msg) + + def _update_share_stats(self, **kwargs): + """Retrieve share stats information.""" + + try: + stats = { + 'share_backend_name': self.backend_name, + 'vendor_name': self.VENDOR, + 'driver_version': self.VERSION, + 'storage_protocol': 'NFS_CIFS', + 'reserved_percentage': + self.configuration.reserved_share_percentage, + 'max_over_subscription_ratio': + self.configuration.max_over_subscription_ratio, + 'snapshot_support': False, + 'create_share_from_snapshot_support': False, + 'revert_to_snapshot_support': False, + 'qos': False, + 'total_capacity_gb': 0.0, + 'free_capacity_gb': 0.0, + 'pools': [] + } + + pools = self.assistant.get_pools_attr(self.backend_pools) + total_capacity_gb = 0 + free_capacity_gb = 0 + for pool in pools.values(): + total_capacity_gb += pool['total_capacity_gb'] + free_capacity_gb += pool['free_capacity_gb'] + stats['pools'].append(pool) + + stats['total_capacity_gb'] = total_capacity_gb + stats['free_capacity_gb'] = free_capacity_gb + + LOG.debug('share status %s', stats) + + super(InStorageShareDriver, self)._update_share_stats(stats) + except Exception: + msg = _('Unexpected error while trying to get the ' + 'usage stats from array.') + LOG.exception(msg) + raise + + @staticmethod + def generate_share_name(share): + # Generate a name with id of the share as base, and do follows: + # 1. Remove the '-' in the id string. + # 2. Transform all alpha to lower case. + # 3. If the first char of the id is a num, + # transform it to an Upper case alpha start from 'A', + # such as '0' -> 'A', '1' -> 'B'. + # e.g. + # generate_share_name({ + # 'id': '46CF5E85-D618-4023-8727-6A1EA9292954', + # ... + # }) + # returns 'E6cf5e85d618402387276a1ea9292954' + + name = share['id'].replace('-', '').lower() + if name[0] in '0123456789': + name = chr(ord('A') + ord(name[0]) - ord('0')) + name[1:] + return name + + def get_network_allocations_number(self): + """Get the number of network interfaces to be created.""" + + return 0 + + def create_share(self, context, share, share_server=None): + """Create a new share instance.""" + share_name = self.generate_share_name(share) + share_size = share['size'] + share_proto = share['share_proto'] + + pool_name = share_utils.extract_host(share['host'], level='pool') + + self.assistant.create_share( + share_name, + pool_name, + share_size, + share_proto + ) + + return self.assistant.get_export_locations(share_name, share_proto) + + def delete_share(self, context, share, share_server=None): + """Delete the given share instance.""" + share_name = self.generate_share_name(share) + share_proto = share['share_proto'] + + self.assistant.delete_share(share_name, share_proto) + + def extend_share(self, share, new_size, share_server=None): + """Extend the share instance's size to new size.""" + share_name = self.generate_share_name(share) + + self.assistant.extend_share(share_name, new_size) + + def ensure_share(self, context, share, share_server=None): + """Ensure that the share instance is exported.""" + share_name = self.generate_share_name(share) + share_proto = share['share_proto'] + + return self.assistant.get_export_locations(share_name, share_proto) + + def update_access(self, context, share, access_rules, add_rules, + delete_rules, share_server=None): + """Update the share instance's access rule.""" + share_name = self.generate_share_name(share) + share_proto = share['share_proto'] + + @coordination.synchronized('inspur-instorage-access-' + share_name) + def _update_access(name, proto, rules, add_rules, delete_rules): + self.assistant.update_access( + name, proto, rules, add_rules, delete_rules + ) + + _update_access( + share_name, share_proto, access_rules, add_rules, delete_rules + ) + + +class InStorageAssistant(object): + + NFS_CLIENT_SPEC_PATTERN = ( + '%(ip)s/%(mask)s:%(rights)s:%(all_squash)s:%(root_squash)s' + ) + + CIFS_CLIENT_RIGHT_PATTERN = ( + '%(type)s:%(name)s:%(rights)s' + ) + + def __init__(self, ssh_runner): + self.ssh = InStorageSSH(ssh_runner) + + @staticmethod + def handle_keyerror(cmd, out): + msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') + % {'out': out, 'cmd': cmd}) + raise exception.ShareBackendException(msg=msg) + + def size_to_gb(self, size): + new_size = 0 + + if 'P' in size: + new_size = int(float(size.rstrip('PB')) * units.Mi) + elif 'T' in size: + new_size = int(float(size.rstrip('TB')) * units.Ki) + elif 'G' in size: + new_size = int(float(size.rstrip('GB')) * 1) + elif 'M' in size: + mb_size = float(size.rstrip('MB')) + new_size = int((mb_size + units.Ki - 1) / units.Ki) + + return new_size + + def get_available_pools(self): + nas_pools = self.ssh.lsnaspool() + return [pool['pool_name'] for pool in nas_pools] + + def get_pools_attr(self, backend_pools): + pools = {} + fs_attr = self.ssh.lsfs() + nas_pools = self.ssh.lsnaspool() + for pool_attr in nas_pools: + pool_name = pool_attr['pool_name'] + if pool_name not in backend_pools: + continue + + total_used_capacity = 0 + total_allocated_capacity = 0 + for fs in fs_attr: + if fs['pool_name'] != pool_name: + continue + allocated = self.size_to_gb(fs['total_capacity']) + used = self.size_to_gb(fs['used_capacity']) + + total_allocated_capacity += allocated + total_used_capacity += used + + available = self.size_to_gb(pool_attr['available_capacity']) + + pool = { + 'pool_name': pool_name, + 'total_capacity_gb': total_allocated_capacity + available, + 'free_capacity_gb': available, + 'allocated_capacity_gb': total_allocated_capacity, + 'reserved_percentage': 0, + 'qos': False, + 'dedupe': False, + 'compression': False, + 'thin_provisioning': False, + 'max_over_subscription_ratio': 0 + } + + pools[pool_name] = pool + + return pools + + def get_nodes_info(self): + """Return a dictionary containing information of system's nodes.""" + nodes = {} + resp = self.ssh.lsnasportip() + for port in resp: + try: + # Port is invalid if it has no IP configured. + if port['ip'] == '': + continue + + node_name = port['node_name'] + if node_name not in nodes: + nodes[node_name] = {} + + node = nodes[node_name] + node[port['id']] = port + except KeyError: + self.handle_keyerror('lsnasportip', port) + + return nodes + + @staticmethod + def get_fsname_by_name(name): + return ('%(fsname)s' % {'fsname': name})[0:32] + + @staticmethod + def get_dirname_by_name(name): + return ('%(dirname)s' % {'dirname': name})[0:32] + + def get_dirpath_by_name(self, name): + fsname = self.get_fsname_by_name(name) + dirname = self.get_dirname_by_name(name) + + return '/fs/%(fsname)s/%(dirname)s' % { + 'fsname': fsname, 'dirname': dirname + } + + def create_share(self, name, pool, size, proto): + """Create a share with given info.""" + + # use one available node as the primary node + nodes = self.get_nodes_info() + if len(nodes) == 0: + msg = _('No valid node, be sure the NAS Port IP is configured') + raise exception.ShareBackendException(msg=msg) + + node_name = [key for key in nodes.keys()][0] + + # first create the file system on which share will be created + fsname = self.get_fsname_by_name(name) + self.ssh.addfs(fsname, pool, size, node_name) + + # then create the directory used for the share + dirpath = self.get_dirpath_by_name(name) + self.ssh.addnasdir(dirpath) + + # For CIFS, we need to create a CIFS share. + # For NAS, the share is automatically added when the first + # 'access spec' is added on it. + if proto == 'CIFS': + self.ssh.addcifs(name, dirpath) + + def check_share_exist(self, name): + """Check whether the specified share exist on backend.""" + + fsname = self.get_fsname_by_name(name) + for fs in self.ssh.lsfs(): + if fs['fs_name'] == fsname: + return True + return False + + def delete_share(self, name, proto): + """Delete the given share.""" + + if not self.check_share_exist(name): + LOG.warning('Share %s does not exist on the backend.', name) + return + + # For CIFS, we have to delete the share first. + # For NAS, when the last client access spec is removed from + # it, the share is automatically deleted. + if proto == 'CIFS': + self.ssh.rmcifs(name) + + # then delete the directory + dirpath = self.get_dirpath_by_name(name) + self.ssh.rmnasdir(dirpath) + + # at last delete the file system + fsname = self.get_fsname_by_name(name) + self.ssh.rmfs(fsname) + + def extend_share(self, name, new_size): + """Extend a given share to a new size. + + :param name: the name of the share. + :param new_size: the new size the share should be. + :return: + """ + # first get the original capacity + old_size = None + fsname = self.get_fsname_by_name(name) + for fs in self.ssh.lsfs(): + if fs['fs_name'] == fsname: + old_size = self.size_to_gb(fs['total_capacity']) + break + + if old_size is None: + msg = _('share %s is not available') % name + raise exception.ShareBackendException(msg=msg) + + LOG.debug('Extend fs %s from %dGB to %dGB', fsname, old_size, new_size) + self.ssh.expandfs(fsname, new_size - old_size) + + def get_export_locations(self, name, share_proto): + """Get the export locations of a given share. + + :param name: the name of the share. + :param share_proto: the protocol of the share. + :return: a list of export locations. + """ + + if share_proto == 'NFS': + dirpath = self.get_dirpath_by_name(name) + pattern = '%(ip)s:' + dirpath + elif share_proto == 'CIFS': + pattern = '\\\\%(ip)s\\' + name + else: + msg = _('share protocol %s is not supported') % share_proto + raise exception.ShareBackendException(msg=msg) + + # we need get the node so that we know which port ip we can use + node_name = None + fsname = self.get_fsname_by_name(name) + for node in self.ssh.lsnode(): + for fs in self.ssh.lsfs(node['name']): + if fs['fs_name'] == fsname: + node_name = node['name'] + break + if node_name: + break + + if node_name is None: + msg = _('share %s is not available') % name + raise exception.ShareBackendException(msg=msg) + + locations = [] + ports = self.ssh.lsnasportip() + for port in ports: + if port['node_name'] == node_name and port['ip'] != '': + location = pattern % {'ip': port['ip']} + + locations.append({ + 'path': location, + 'is_admin_only': False, + 'metadata': {} + }) + + return locations + + def classify_nfs_client_spec(self, client_spec, dirpath): + nfslist = self.ssh.lsnfslist(dirpath) + if len(nfslist): + nfsinfo = self.ssh.lsnfsinfo(dirpath) + spec_set = set([ + self.NFS_CLIENT_SPEC_PATTERN % i for i in nfsinfo + ]) + else: + spec_set = set() + + client_spec_set = set(client_spec) + + del_spec = spec_set.difference(client_spec_set) + add_spec = client_spec_set.difference(spec_set) + + return list(add_spec), list(del_spec) + + def access_rule_to_client_spec(self, access_rule): + if access_rule['access_type'] != 'ip': + msg = _('only ip access type is supported when using NFS protocol') + raise exception.ShareBackendException(msg=msg) + + network = ipaddress.ip_network(six.text_type(access_rule['access_to'])) + if network.version != 4: + msg = _('only IPV4 is accepted when using NFS protocol') + raise exception.ShareBackendException(msg=msg) + + client_spec = self.NFS_CLIENT_SPEC_PATTERN % { + 'ip': six.text_type(network.network_address), + 'mask': six.text_type(network.netmask), + 'rights': access_rule['access_level'], + 'all_squash': 'all_squash', + 'root_squash': 'root_squash' + } + + return client_spec + + def update_nfs_access(self, share_name, access_rules, add_rules, + delete_rules): + """Update a NFS share's access rule.""" + + dirpath = self.get_dirpath_by_name(share_name) + if add_rules or delete_rules: + add_spec = [ + self.access_rule_to_client_spec(r) for r in add_rules + ] + del_spec = [ + self.access_rule_to_client_spec(r) for r in delete_rules + ] + + _, can_del_spec = self.classify_nfs_client_spec( + [], dirpath + ) + to_del_set = set(del_spec) + can_del_set = set(can_del_spec) + will_del_set = to_del_set.intersection(can_del_set) + del_spec = list(will_del_set) + else: + access_spec = [ + self.access_rule_to_client_spec(r) for r in access_rules + ] + + add_spec, del_spec = self.classify_nfs_client_spec( + access_spec, dirpath + ) + + for spec in del_spec: + self.ssh.rmnfsclient(dirpath, spec) + for spec in add_spec: + self.ssh.addnfsclient(dirpath, spec) + + def classify_cifs_rights(self, access_rights, share_name): + cifsinfo = self.ssh.lscifsinfo(share_name) + rights_set = set([ + self.CIFS_CLIENT_RIGHT_PATTERN % i for i in cifsinfo + ]) + access_rights_set = set(access_rights) + + del_rights = rights_set.difference(access_rights_set) + add_rights = access_rights_set.difference(rights_set) + + return list(add_rights), list(del_rights) + + def access_rule_to_rights(self, access_rule): + if access_rule['access_type'] != 'user': + msg = _('only user access type is supported' + ' when using CIFS protocol') + raise exception.ShareBackendException(msg=msg) + + rights = self.CIFS_CLIENT_RIGHT_PATTERN % { + 'type': 'LU', + 'name': access_rule['access_to'], + 'rights': access_rule['access_level'] + } + + return rights + + def update_cifs_access(self, share_name, access_rules, add_rules, + delete_rules): + """Update a CIFS share's access rule.""" + + if add_rules or delete_rules: + add_rights = [ + self.access_rule_to_rights(r) for r in add_rules + ] + del_rights = [ + self.access_rule_to_rights(r) for r in delete_rules + ] + else: + access_rights = [ + self.access_rule_to_rights(r) for r in access_rules + ] + + add_rights, del_rights = self.classify_cifs_rights( + access_rights, share_name + ) + + for rights in del_rights: + self.ssh.rmcifsuser(share_name, rights) + for rights in add_rights: + self.ssh.addcifsuser(share_name, rights) + + @staticmethod + def check_access_type(access_type, *rules): + rule_chain = itertools.chain(*rules) + if all([r['access_type'] == access_type for r in rule_chain]): + return True + else: + return False + + def update_access(self, share_name, share_proto, + access_rules, add_rules, delete_rules): + if share_proto == 'CIFS': + if self.check_access_type('user', access_rules, + add_rules, delete_rules): + self.update_cifs_access(share_name, access_rules, + add_rules, delete_rules) + else: + msg = _("Only %s access type allowed.") % "user" + raise exception.InvalidShareAccess(reason=msg) + elif share_proto == 'NFS': + if self.check_access_type('ip', access_rules, + add_rules, delete_rules): + self.update_nfs_access(share_name, access_rules, + add_rules, delete_rules) + else: + msg = _("Only %s access type allowed.") % "ip" + raise exception.InvalidShareAccess(reason=msg) + else: + msg = _('share protocol %s is not supported') % share_proto + raise exception.ShareBackendException(msg=msg) diff --git a/manila/tests/conf_fixture.py b/manila/tests/conf_fixture.py index 4c485b5752..2518f133e9 100644 --- a/manila/tests/conf_fixture.py +++ b/manila/tests/conf_fixture.py @@ -60,6 +60,11 @@ def set_defaults(conf): _safe_set_of_opts(conf, 'as13000_nas_password', 'password') _safe_set_of_opts(conf, 'as13000_share_pools', 'pool0') + _safe_set_of_opts(conf, 'instorage_nas_ip', '1.1.1.1') + _safe_set_of_opts(conf, 'instorage_nas_login', 'admin') + _safe_set_of_opts(conf, 'instorage_nas_password', 'password') + _safe_set_of_opts(conf, 'instorage_nas_pools', 'pool0') + _safe_set_of_opts(conf, 'qnap_management_url', 'http://1.2.3.4:8080') _safe_set_of_opts(conf, 'qnap_share_ip', '1.2.3.4') _safe_set_of_opts(conf, 'qnap_nas_login', 'admin') diff --git a/manila/tests/share/drivers/inspur/instorage/__init__.py b/manila/tests/share/drivers/inspur/instorage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/tests/share/drivers/inspur/instorage/test_instorage.py b/manila/tests/share/drivers/inspur/instorage/test_instorage.py new file mode 100644 index 0000000000..a2cdf2355c --- /dev/null +++ b/manila/tests/share/drivers/inspur/instorage/test_instorage.py @@ -0,0 +1,1534 @@ +# Copyright 2019 Inspur Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Share driver test for Inspur InStorage +""" + +import ddt +import mock +import paramiko + +from eventlet import greenthread + +from oslo_concurrency import processutils +from oslo_config import cfg + +from manila import context +from manila import exception +from manila.share import driver +from manila.share.drivers.inspur.instorage import cli_helper +from manila.share.drivers.inspur.instorage import instorage +from manila import test +from manila.tests import fake_share +from manila import utils as manila_utils + +CONF = cfg.CONF + + +class FakeConfig(object): + def __init__(self, *args, **kwargs): + self.driver_handles_share_servers = False + self.share_driver = 'fake_share_driver_name' + self.share_backend_name = 'fake_instorage' + self.instorage_nas_ip = kwargs.get( + 'instorage_nas_ip', 'some_ip') + self.instorage_nas_port = kwargs.get( + 'instorage_nas_port', 'some_port') + self.instorage_nas_login = kwargs.get( + 'instorage_nas_login', 'username') + self.instorage_nas_password = kwargs.get( + 'instorage_nas_password', 'password') + self.instorage_nas_pools = kwargs.get( + 'instorage_nas_pools', ['fakepool']) + self.network_config_group = kwargs.get( + "network_config_group", "fake_network_config_group") + self.admin_network_config_group = kwargs.get( + "admin_network_config_group", "fake_admin_network_config_group") + self.config_group = kwargs.get("config_group", "fake_config_group") + self.reserved_share_percentage = kwargs.get( + "reserved_share_percentage", 0) + self.max_over_subscription_ratio = kwargs.get( + "max_over_subscription_ratio", 0) + self.filter_function = kwargs.get("filter_function", None) + self.goodness_function = kwargs.get("goodness_function", None) + + def safe_get(self, key): + return getattr(self, key) + + def append_config_values(self, *args, **kwargs): + pass + + +@ddt.ddt +class InStorageShareDriverTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(InStorageShareDriverTestCase, self).__init__(*args, **kwargs) + self._ctxt = context.get_admin_context() + self.configuration = FakeConfig() + self.share = fake_share.fake_share() + self.share_instance = fake_share.fake_share_instance( + self.share, host='H@B#P' + ) + + def setUp(self): + self.mock_object(instorage.CONF, '_check_required_opts') + self.driver = instorage.InStorageShareDriver( + configuration=self.configuration + ) + super(InStorageShareDriverTestCase, self).setUp() + + def test_check_for_setup_error_failed_no_nodes(self): + mock_gni = mock.Mock(return_value={}) + self.mock_object( + instorage.InStorageAssistant, 'get_nodes_info', mock_gni + ) + + self.assertRaises( + exception.ShareBackendException, + self.driver.check_for_setup_error + ) + + def test_check_for_setup_error_failed_pool_invalid(self): + mock_gni = mock.Mock(return_value={'node1': {}}) + self.mock_object( + instorage.InStorageAssistant, 'get_nodes_info', mock_gni + ) + mock_gap = mock.Mock(return_value=['pool0']) + self.mock_object( + instorage.InStorageAssistant, 'get_available_pools', mock_gap + ) + + self.assertRaises( + exception.InvalidParameterValue, + self.driver.check_for_setup_error + ) + + def test_check_for_setup_error_success(self): + mock_gni = mock.Mock(return_value={'node1': {}}) + self.mock_object( + instorage.InStorageAssistant, 'get_nodes_info', mock_gni + ) + mock_gap = mock.Mock(return_value=['fakepool', 'pool0']) + self.mock_object( + instorage.InStorageAssistant, 'get_available_pools', mock_gap + ) + + self.driver.check_for_setup_error() + mock_gni.assert_called_once() + mock_gap.assert_called_once() + + def test__update_share_stats(self): + pool_attr = { + 'pool0': { + 'pool_name': 'pool0', + 'total_capacity_gb': 110, + 'free_capacity_gb': 100, + 'allocated_capacity_gb': 10, + 'reserved_percentage': 0, + 'qos': False, + 'dedupe': False, + 'compression': False, + 'thin_provisioning': False, + 'max_over_subscription_ratio': 0 + } + } + mock_gpa = mock.Mock(return_value=pool_attr) + self.mock_object( + instorage.InStorageAssistant, 'get_pools_attr', mock_gpa + ) + mock_uss = mock.Mock() + self.mock_object(driver.ShareDriver, '_update_share_stats', mock_uss) + + self.driver._update_share_stats() + + mock_gpa.assert_called_once_with(['fakepool']) + stats = { + 'share_backend_name': 'fake_instorage', + 'vendor_name': 'INSPUR', + 'driver_version': '1.0.0', + 'storage_protocol': 'NFS_CIFS', + 'reserved_percentage': 0, + 'max_over_subscription_ratio': 0, + 'snapshot_support': False, + 'create_share_from_snapshot_support': False, + 'revert_to_snapshot_support': False, + 'qos': False, + 'total_capacity_gb': 110, + 'free_capacity_gb': 100, + 'pools': [pool_attr['pool0']] + } + mock_uss.assert_called_once_with(stats) + + @ddt.data( + {'id': 'abc-123', 'real': 'abc123'}, + {'id': '123-abc', 'real': 'B23abc'}) + @ddt.unpack + def test_generate_share_name(self, id, real): + ret = self.driver.generate_share_name({'id': id}) + self.assertEqual(real, ret) + + def test_get_network_allocations_number(self): + ret = self.driver.get_network_allocations_number() + self.assertEqual(0, ret) + + def test_create_share(self): + mock_cs = self.mock_object( + instorage.InStorageAssistant, 'create_share' + ) + mock_gel = self.mock_object( + instorage.InStorageAssistant, + 'get_export_locations', + mock.Mock(return_value=['fake_export_location']) + ) + + ret = self.driver.create_share(self._ctxt, self.share_instance) + + self.assertEqual(['fake_export_location'], ret) + mock_cs.assert_called_once_with('fakeinstanceid', 'P', 1, 'fake_proto') + mock_gel.assert_called_once_with('fakeinstanceid', 'fake_proto') + + def test_delete_share(self): + mock_ds = self.mock_object( + instorage.InStorageAssistant, 'delete_share' + ) + + self.driver.delete_share(self._ctxt, self.share_instance) + + mock_ds.assert_called_once_with('fakeinstanceid', 'fake_proto') + + def test_extend_share(self): + mock_es = self.mock_object( + instorage.InStorageAssistant, 'extend_share' + ) + + self.driver.extend_share(self.share_instance, 3) + + mock_es.assert_called_once_with('fakeinstanceid', 3) + + def test_ensure_share(self): + mock_gel = self.mock_object( + instorage.InStorageAssistant, + 'get_export_locations', + mock.Mock(return_value=['fake_export_location']) + ) + + ret = self.driver.ensure_share(self._ctxt, self.share_instance) + + self.assertEqual(['fake_export_location'], ret) + mock_gel.assert_called_once_with('fakeinstanceid', 'fake_proto') + + def test_update_access(self): + mock_ua = self.mock_object( + instorage.InStorageAssistant, 'update_access' + ) + + self.driver.update_access(self._ctxt, self.share_instance, [], [], []) + + mock_ua.assert_called_once_with( + 'fakeinstanceid', 'fake_proto', [], [], [] + ) + + +class FakeSSH(object): + def __enter__(self): + return self + + def __exit__(self, exec_type, exec_val, exec_tb): + if exec_val: + raise + + +class FakeSSHPool(object): + def __init__(self, ssh): + self.fakessh = ssh + + def item(self): + return self.fakessh + + +class SSHRunnerTestCase(test.TestCase): + def setUp(self): + self.fakessh = FakeSSH() + self.fakePool = FakeSSHPool(self.fakessh) + super(SSHRunnerTestCase, self).setUp() + + def test___call___success(self): + mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') + mock_sshpool = mock.Mock(return_value=self.fakePool) + self.mock_object(manila_utils, 'SSHPool', mock_sshpool) + mock_se = mock.Mock(return_value='fake_value') + self.mock_object(cli_helper.SSHRunner, '_ssh_execute', mock_se) + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + ret = runner(['mcsinq', 'lsvdisk']) + + mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) + mock_sshpool.assert_called_once_with( + '127.0.0.1', '22', 60, 'fakeuser', + password='fakepassword', + privatekey=None, + min_size=1, + max_size=10 + ) + mock_se.assert_called_once_with( + self.fakePool, + 'mcsinq lsvdisk', + True, + 1 + ) + self.assertEqual('fake_value', ret) + + def test___call___ssh_pool_failed(self): + mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') + mock_sshpool = mock.Mock(side_effect=paramiko.SSHException()) + self.mock_object(manila_utils, 'SSHPool', mock_sshpool) + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + + self.assertRaises(paramiko.SSHException, runner, ['mcsinq', 'lsvdisk']) + mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) + + def test___call___ssh_exec_failed(self): + mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') + mock_sshpool = mock.Mock(return_value=self.fakePool) + self.mock_object(manila_utils, 'SSHPool', mock_sshpool) + exception = processutils.ProcessExecutionError() + mock_se = mock.Mock(side_effect=exception) + self.mock_object(cli_helper.SSHRunner, '_ssh_execute', mock_se) + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + + self.assertRaises( + processutils.ProcessExecutionError, + runner, + ['mcsinq', 'lsvdisk'] + ) + mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) + mock_sshpool.assert_called_once_with( + '127.0.0.1', '22', 60, 'fakeuser', + password='fakepassword', + privatekey=None, + min_size=1, + max_size=10 + ) + + def test__ssh_execute_success(self): + mock_se = mock.Mock(return_value='fake_value') + self.mock_object(processutils, 'ssh_execute', mock_se) + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + ret = runner._ssh_execute(self.fakePool, 'mcsinq lsvdisk') + + mock_se.assert_called_once_with( + self.fakessh, + 'mcsinq lsvdisk', + check_exit_code=True + ) + self.assertEqual('fake_value', ret) + + def test__ssh_execute_success_run_again(self): + mock_se = mock.Mock(side_effect=[Exception(), 'fake_value']) + self.mock_object(processutils, 'ssh_execute', mock_se) + mock_sleep = self.mock_object(greenthread, 'sleep') + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + ret = runner._ssh_execute( + self.fakePool, + 'mcsinq lsvdisk', + check_exit_code=True, + attempts=2 + ) + + call = mock.call(self.fakessh, 'mcsinq lsvdisk', check_exit_code=True) + mock_se.assert_has_calls([call, call]) + mock_sleep.assert_called_once() + self.assertEqual('fake_value', ret) + + def test__ssh_execute_failed_exec_failed(self): + exception = Exception() + exception.exit_code = '1' + exception.stdout = 'fake_stdout' + exception.stderr = 'fake_stderr' + exception.cmd = 'fake_cmd_list' + mock_se = mock.Mock(side_effect=exception) + self.mock_object(processutils, 'ssh_execute', mock_se) + mock_sleep = self.mock_object(greenthread, 'sleep') + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + + self.assertRaises( + processutils.ProcessExecutionError, + runner._ssh_execute, + self.fakePool, + 'mcsinq lsvdisk', + check_exit_code=True, + attempts=1 + ) + mock_se.assert_called_once_with( + self.fakessh, + 'mcsinq lsvdisk', + check_exit_code=True + ) + mock_sleep.assert_called_once() + + def test__ssh_execute_failed_exec_failed_exception_error(self): + mock_se = mock.Mock(side_effect=Exception()) + self.mock_object(processutils, 'ssh_execute', mock_se) + mock_sleep = self.mock_object(greenthread, 'sleep') + + runner = cli_helper.SSHRunner( + '127.0.0.1', '22', 'fakeuser', 'fakepassword' + ) + + self.assertRaises( + processutils.ProcessExecutionError, + runner._ssh_execute, + self.fakePool, + 'mcsinq lsvdisk', + check_exit_code=True, + attempts=1 + ) + mock_se.assert_called_once_with( + self.fakessh, + 'mcsinq lsvdisk', + check_exit_code=True + ) + mock_sleep.assert_called_once() + + +class CLIParserTestCase(test.TestCase): + def test_cliparser_with_header(self): + cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] + response = [ + 'head1!head2', + 'r1c1!r1c2', + 'r2c1!r2c2' + ] + response = '\n'.join(response) + + ret = cli_helper.CLIParser( + response, cmdlist, delim='!', with_header=True + ) + + self.assertEqual(2, len(ret)) + self.assertEqual('r1c1', ret[0]['head1']) + self.assertEqual('r1c2', ret[0]['head2']) + self.assertEqual('r2c1', ret[1]['head1']) + self.assertEqual('r2c2', ret[1]['head2']) + + value = [(v['head1'], v['head2']) for v in ret] + self.assertEqual([('r1c1', 'r1c2'), ('r2c1', 'r2c2')], value) + + def test_cliparser_without_header(self): + cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] + response = [ + 'head1!p1v1', + 'head2!p1v2', + '', + 'head1!p2v1', + 'head2!p2v2' + ] + response = '\n'.join(response) + + ret = cli_helper.CLIParser( + response, cmdlist, delim='!', with_header=False + ) + + self.assertEqual(2, len(ret)) + self.assertEqual('p1v1', ret[0]['head1']) + self.assertEqual('p1v2', ret[0]['head2']) + self.assertEqual('p2v1', ret[1]['head1']) + self.assertEqual('p2v2', ret[1]['head2']) + + +@ddt.ddt +class InStorageSSHTestCase(test.TestCase): + def setUp(self): + self.sshMock = mock.Mock() + self.ssh = cli_helper.InStorageSSH(self.sshMock) + super(InStorageSSHTestCase, self).setUp() + + def tearDown(self): + super(InStorageSSHTestCase, self).tearDown() + + @ddt.data(None, 'node1') + def test_lsnode(self, node_id): + if node_id: + cmd = ['mcsinq', 'lsnode', '-delim', '!', node_id] + response = [ + 'id!1', + 'name!node1' + ] + else: + cmd = ['mcsinq', 'lsnode', '-delim', '!'] + response = [ + 'id!name', + '1!node1', + '2!node2' + ] + + response = '\n'.join(response) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnode(node_id) + + if node_id: + self.sshMock.assert_called_once_with(cmd) + self.assertEqual('node1', ret[0]['name']) + else: + self.sshMock.assert_called_once_with(cmd) + self.assertEqual('node1', ret[0]['name']) + self.assertEqual('node2', ret[1]['name']) + + @ddt.data(None, 'Pool0') + def test_lsnaspool(self, pool_id): + response = [ + 'pool_name!available_capacity', + 'Pool0!2GB' + ] + if pool_id is None: + response.append('Pool1!3GB') + + response = '\n'.join(response) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnaspool(pool_id) + + if pool_id is None: + cmd = ['mcsinq', 'lsnaspool', '-delim', '!'] + self.sshMock.assert_called_once_with(cmd) + self.assertEqual('Pool0', ret[0]['pool_name']) + self.assertEqual('2GB', ret[0]['available_capacity']) + self.assertEqual('Pool1', ret[1]['pool_name']) + self.assertEqual('3GB', ret[1]['available_capacity']) + else: + cmd = ['mcsinq', 'lsnaspool', '-delim', '!', pool_id] + self.sshMock.assert_called_once_with(cmd) + self.assertEqual('Pool0', ret[0]['pool_name']) + self.assertEqual('2GB', ret[0]['available_capacity']) + + @ddt.data({'node_name': 'node1', 'fsname': 'fs1'}, + {'node_name': 'node1', 'fsname': None}, + {'node_name': None, 'fsname': 'fs1'}, + {'node_name': None, 'fsname': None}) + @ddt.unpack + def test_lsfs(self, node_name, fsname): + response = [ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool0!fs0!10GB!1GB', + 'pool1!fs1!8GB!3GB' + ] + response = '\n'.join(response) + self.sshMock.return_value = (response, '') + + if fsname and not node_name: + self.assertRaises(exception.InvalidParameterValue, + self.ssh.lsfs, + node_name=node_name, + fsname=fsname) + else: + ret = self.ssh.lsfs(node_name, fsname) + + cmdlist = [] + if node_name and not fsname: + cmdlist = ['mcsinq', 'lsfs', '-delim', '!', '-node', '"node1"'] + elif node_name and fsname: + cmdlist = ['mcsinq', 'lsfs', '-delim', '!', + '-node', '"node1"', '-name', '"fs1"'] + else: + cmdlist = ['mcsinq', 'lsfs', '-delim', '!', '-all'] + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('pool0', ret[0]['pool_name']) + self.assertEqual('fs0', ret[0]['fs_name']) + self.assertEqual('10GB', ret[0]['total_capacity']) + self.assertEqual('1GB', ret[0]['used_capacity']) + self.assertEqual('pool1', ret[1]['pool_name']) + self.assertEqual('fs1', ret[1]['fs_name']) + self.assertEqual('8GB', ret[1]['total_capacity']) + self.assertEqual('3GB', ret[1]['used_capacity']) + + def test_addfs(self): + self.sshMock.return_value = ('', '') + + self.ssh.addfs('fsname', 'fake_pool', 1, 'node1') + + cmdlist = ['mcsop', 'addfs', '-name', '"fsname"', + '-pool', '"fake_pool"', '-size', '1g', '-node', '"node1"'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmfs(self): + self.sshMock.return_value = ('', '') + + self.ssh.rmfs('fsname') + + cmdlist = ['mcsop', 'rmfs', '-name', '"fsname"'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_expandfs(self): + self.sshMock.return_value = ('', '') + + self.ssh.expandfs('fsname', 2) + + cmdlist = ['mcsop', 'expandfs', '-name', '"fsname"', '-size', '2g'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_lsnasdir(self): + response = [ + 'parent_dir!name', + '/fs/test_01!share_01' + ] + + response = '\n'.join(response) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnasdir('/fs/test_01') + + cmdlist = ['mcsinq', 'lsnasdir', '-delim', '!', '"/fs/test_01"'] + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('/fs/test_01', ret[0]['parent_dir']) + self.assertEqual('share_01', ret[0]['name']) + + def test_addnasdir(self): + self.sshMock.return_value = ('', '') + + self.ssh.addnasdir('/fs/test_01/share_01') + + cmdlist = ['mcsop', 'addnasdir', '"/fs/test_01/share_01"'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_chnasdir(self): + self.sshMock.return_value = ('', '') + + self.ssh.chnasdir('/fs/test_01/share_01', '/fs/test_01/share_02') + + cmdlist = ['mcsop', 'chnasdir', '-oldpath', '"/fs/test_01/share_01"', + '-newpath', '"/fs/test_01/share_02"'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmnasdir(self): + self.sshMock.return_value = ('', '') + + self.ssh.rmnasdir('/fs/test_01/share_01') + + cmdlist = ['mcsop', 'rmnasdir', '"/fs/test_01/share_01"'] + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmnfs(self): + self.sshMock.return_value = ('', '') + + self.ssh.rmnfs('/fs/test_01/share_01') + + cmdlist = ['mcsop', 'rmnfs', '"/fs/test_01/share_01"'] + self.sshMock.assert_called_once_with(cmdlist) + + @ddt.data(None, '/fs/test_01') + def test_lsnfslist(self, prefix): + cmdlist = ['mcsinq', 'lsnfslist', '-delim', '!'] + if prefix: + cmdlist.append('"/fs/test_01"') + response = '\n'.join([ + 'path', + '/fs/test_01/share_01', + '/fs/test_01/share_02' + ]) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnfslist(prefix) + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('/fs/test_01/share_01', ret[0]['path']) + self.assertEqual('/fs/test_01/share_02', ret[1]['path']) + + def test_lsnfsinfo(self): + cmdlist = [ + 'mcsinq', 'lsnfsinfo', '-delim', '!', '"/fs/test_01/share_01"' + ] + response = '\n'.join([ + 'ip!mask!rights!root_squash!all_squash', + '192.168.1.0!255.255.255.0!rw!root_squash!all_squash' + ]) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnfsinfo('/fs/test_01/share_01') + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('192.168.1.0', ret[0]['ip']) + self.assertEqual('255.255.255.0', ret[0]['mask']) + self.assertEqual('rw', ret[0]['rights']) + + def test_addnfsclient(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'addnfsclient', '-path', '"/fs/test_01/share_01"', + '-client', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' + ] + + self.ssh.addnfsclient( + '/fs/test_01/share_01', + '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' + ) + + self.sshMock.assert_called_once_with(cmdlist) + + def test_chnfsclient(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'chnfsclient', '-path', '"/fs/test_01/share_01"', + '-client', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' + ] + + self.ssh.chnfsclient( + '/fs/test_01/share_01', + '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' + ) + + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmnfsclient(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'rmnfsclient', '-path', '"/fs/test_01/share_01"', + '-client', '192.168.1.0/255.255.255.0' + ] + + self.ssh.rmnfsclient( + '/fs/test_01/share_01', + '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' + ) + + self.sshMock.assert_called_once_with(cmdlist) + + @ddt.data(None, 'cifs') + def test_lscifslist(self, filter): + cmdlist = ['mcsinq', 'lscifslist', '-delim', '!'] + if filter: + cmdlist.append('"%s"' % filter) + response = '\n'.join([ + 'name!path', + 'cifs!/fs/test_01/share_01' + ]) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lscifslist(filter) + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('cifs', ret[0]['name']) + self.assertEqual('/fs/test_01/share_01', ret[0]['path']) + + def test_lscifsinfo(self): + cmdlist = ['mcsinq', 'lscifsinfo', '-delim', '!', '"cifs"'] + response = '\n'.join([ + 'path!oplocks!type!name!rights', + '/fs/test_01/share_01!on!LU!user1!rw' + ]) + self.sshMock.return_value = (response, '') + + ret = self.ssh.lscifsinfo('cifs') + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('/fs/test_01/share_01', ret[0]['path']) + self.assertEqual('on', ret[0]['oplocks']) + self.assertEqual('LU', ret[0]['type']) + self.assertEqual('user1', ret[0]['name']) + self.assertEqual('rw', ret[0]['rights']) + + def test_addcifs(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'addcifs', '-name', 'cifs', + '-path', '/fs/test_01/share_01', '-oplocks', 'off' + ] + + self.ssh.addcifs('cifs', '/fs/test_01/share_01', 'off') + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmcifs(self): + self.sshMock.return_value = ('', '') + + cmdlist = ['mcsop', 'rmcifs', 'cifs'] + + self.ssh.rmcifs('cifs') + self.sshMock.assert_called_once_with(cmdlist) + + def test_chcifs(self): + self.sshMock.return_value = ('', '') + + cmdlist = ['mcsop', 'chcifs', '-name', 'cifs', '-oplocks', 'off'] + + self.ssh.chcifs('cifs', 'off') + self.sshMock.assert_called_once_with(cmdlist) + + def test_addcifsuser(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'addcifsuser', '-name', 'cifs', '-rights', 'LU:user1:rw' + ] + + self.ssh.addcifsuser('cifs', 'LU:user1:rw') + self.sshMock.assert_called_once_with(cmdlist) + + def test_chcifsuser(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'chcifsuser', '-name', 'cifs', '-rights', 'LU:user1:rw' + ] + + self.ssh.chcifsuser('cifs', 'LU:user1:rw') + self.sshMock.assert_called_once_with(cmdlist) + + def test_rmcifsuser(self): + self.sshMock.return_value = ('', '') + + cmdlist = [ + 'mcsop', 'rmcifsuser', '-name', 'cifs', '-rights', 'LU:user1' + ] + + self.ssh.rmcifsuser('cifs', 'LU:user1:rw') + self.sshMock.assert_called_once_with(cmdlist) + + def test_lsnasportip(self): + cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] + response = '\n'.join([ + 'node_name!id!ip!mask!gw!link_state', + 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', + 'node2!1!192.168.10.2!255.255.255.0!192.168.10.254!inactive' + ]) + + self.sshMock.return_value = (response, '') + + ret = self.ssh.lsnasportip() + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual('node1', ret[0]['node_name']) + self.assertEqual('1', ret[0]['id']) + self.assertEqual('192.168.10.1', ret[0]['ip']) + self.assertEqual('255.255.255.0', ret[0]['mask']) + self.assertEqual('192.168.10.254', ret[0]['gw']) + self.assertEqual('active', ret[0]['link_state']) + self.assertEqual('node2', ret[1]['node_name']) + self.assertEqual('1', ret[1]['id']) + self.assertEqual('192.168.10.2', ret[1]['ip']) + self.assertEqual('255.255.255.0', ret[1]['mask']) + self.assertEqual('192.168.10.254', ret[1]['gw']) + self.assertEqual('inactive', ret[1]['link_state']) + + +@ddt.ddt +class InStorageAssistantTestCase(test.TestCase): + def setUp(self): + self.sshMock = mock.Mock() + self.assistant = instorage.InStorageAssistant(self.sshMock) + super(InStorageAssistantTestCase, self).setUp() + + def tearDown(self): + super(InStorageAssistantTestCase, self).tearDown() + + @ddt.data( + {'size': '1000MB', 'gb_size': 1}, + {'size': '3GB', 'gb_size': 3}, + {'size': '4TB', 'gb_size': 4096}, + {'size': '5PB', 'gb_size': 5242880}) + @ddt.unpack + def test_size_to_gb(self, size, gb_size): + ret = self.assistant.size_to_gb(size) + self.assertEqual(gb_size, ret) + + def test_get_available_pools(self): + response_for_lsnaspool = ('\n'.join([ + 'pool_name!available_capacity', + 'pool0!100GB', + 'pool1!150GB' + ]), '') + cmdlist = ['mcsinq', 'lsnaspool', '-delim', '!'] + self.sshMock.return_value = response_for_lsnaspool + + ret = self.assistant.get_available_pools() + + pools = ['pool0', 'pool1'] + self.assertEqual(pools, ret) + self.sshMock.assert_called_once_with(cmdlist) + + def test_get_pools_attr(self): + response_for_lsfs = ('\n'.join([ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool0!fs0!10GB!1GB', + 'pool1!fs1!8GB!3GB' + ]), '') + call_for_lsfs = mock.call(['mcsinq', 'lsfs', '-delim', '!', '-all']) + response_for_lsnaspool = ('\n'.join([ + 'pool_name!available_capacity', + 'pool0!100GB', + 'pool1!150GB' + ]), '') + call_for_lsnaspool = mock.call(['mcsinq', 'lsnaspool', '-delim', '!']) + self.sshMock.side_effect = [ + response_for_lsfs, + response_for_lsnaspool + ] + + ret = self.assistant.get_pools_attr(['pool0']) + pools = { + 'pool0': { + 'pool_name': 'pool0', + 'total_capacity_gb': 110, + 'free_capacity_gb': 100, + 'allocated_capacity_gb': 10, + 'qos': False, + 'reserved_percentage': 0, + 'dedupe': False, + 'compression': False, + 'thin_provisioning': False, + 'max_over_subscription_ratio': 0 + } + } + self.assertEqual(pools, ret) + self.sshMock.assert_has_calls([call_for_lsfs, call_for_lsnaspool]) + + def test_get_nodes_info(self): + response_for_lsnasportip = ('\n'.join([ + 'node_name!id!ip!mask!gw!link_state', + 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', + 'node2!1!192.168.10.2!255.255.255.0!192.168.10.254!inactive', + 'node1!2!!!!inactive', + 'node2!2!!!!inactive' + ]), '') + call_for_lsnasportip = mock.call([ + 'mcsinq', 'lsnasportip', '-delim', '!' + ]) + self.sshMock.side_effect = [response_for_lsnasportip] + + ret = self.assistant.get_nodes_info() + nodes = { + 'node1': { + '1': { + 'node_name': 'node1', + 'id': '1', + 'ip': '192.168.10.1', + 'mask': '255.255.255.0', + 'gw': '192.168.10.254', + 'link_state': 'active' + } + }, + 'node2': { + '1': { + 'node_name': 'node2', + 'id': '1', + 'ip': '192.168.10.2', + 'mask': '255.255.255.0', + 'gw': '192.168.10.254', + 'link_state': 'inactive' + } + } + } + self.assertEqual(nodes, ret) + self.sshMock.assert_has_calls([call_for_lsnasportip]) + + @ddt.data( + {'name': '1' * 30, 'fsname': '1' * 30}, + {'name': '1' * 40, 'fsname': '1' * 32}) + @ddt.unpack + def test_get_fsname_by_name(self, name, fsname): + ret = self.assistant.get_fsname_by_name(name) + + self.assertEqual(fsname, ret) + + @ddt.data( + {'name': '1' * 30, 'dirname': '1' * 30}, + {'name': '1' * 40, 'dirname': '1' * 32}) + @ddt.unpack + def test_get_dirsname_by_name(self, name, dirname): + ret = self.assistant.get_dirname_by_name(name) + + self.assertEqual(dirname, ret) + + @ddt.data( + {'name': '1' * 30, 'dirpath': '/fs/' + '1' * 30 + '/' + '1' * 30}, + {'name': '1' * 40, 'dirpath': '/fs/' + '1' * 32 + '/' + '1' * 32}) + @ddt.unpack + def test_get_dirpath_by_name(self, name, dirpath): + ret = self.assistant.get_dirpath_by_name(name) + + self.assertEqual(dirpath, ret) + + @ddt.data('CIFS', 'NFS') + def test_create_share(self, proto): + response_for_lsnasportip = ('\n'.join([ + 'node_name!id!ip!mask!gw!link_state', + 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active' + ]), '') + call_for_lsnasportip = mock.call([ + 'mcsinq', 'lsnasportip', '-delim', '!' + ]) + response_for_addfs = ('', '') + call_for_addfs = mock.call([ + 'mcsop', 'addfs', '-name', '"fakename"', '-pool', '"fakepool"', + '-size', '10g', '-node', '"node1"' + ]) + response_for_addnasdir = ('', '') + call_for_addnasdir = mock.call([ + 'mcsop', 'addnasdir', '"/fs/fakename/fakename"' + ]) + response_for_addcifs = ('', '') + call_for_addcifs = mock.call([ + 'mcsop', 'addcifs', '-name', 'fakename', + '-path', '/fs/fakename/fakename', '-oplocks', 'off' + ]) + + side_effect = [ + response_for_lsnasportip, + response_for_addfs, + response_for_addnasdir + ] + calls = [call_for_lsnasportip, call_for_addfs, call_for_addnasdir] + if proto == 'CIFS': + side_effect.append(response_for_addcifs) + calls.append(call_for_addcifs) + self.sshMock.side_effect = side_effect + + self.assistant.create_share('fakename', 'fakepool', 10, proto) + + self.sshMock.assert_has_calls(calls) + + @ddt.data(True, False) + def test_check_share_exist(self, exist): + response_for_lsfs = ('\n'.join([ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool0!fs0!10GB!1GB', + 'pool1!fs1!8GB!3GB' + ]), '') + call_for_lsfs = mock.call([ + 'mcsinq', 'lsfs', '-delim', '!', '-all' + ]) + self.sshMock.side_effect = [ + response_for_lsfs + ] + + share_name = 'fs0' if exist else 'fs2' + + ret = self.assistant.check_share_exist(share_name) + + self.assertEqual(exist, ret) + self.sshMock.assert_has_calls([call_for_lsfs]) + + @ddt.data({'proto': 'CIFS', 'share_exist': False}, + {'proto': 'CIFS', 'share_exist': True}, + {'proto': 'NFS', 'share_exist': False}, + {'proto': 'NFS', 'share_exist': True}) + @ddt.unpack + def test_delete_share(self, proto, share_exist): + mock_cse = self.mock_object( + instorage.InStorageAssistant, + 'check_share_exist', + mock.Mock(return_value=share_exist) + ) + response_for_rmcifs = ('', '') + call_for_rmcifs = mock.call([ + 'mcsop', 'rmcifs', 'fakename' + ]) + response_for_rmnasdir = ('', '') + call_for_rmnasdir = mock.call([ + 'mcsop', 'rmnasdir', '"/fs/fakename/fakename"' + ]) + response_for_rmfs = ('', '') + call_for_rmfs = mock.call([ + 'mcsop', 'rmfs', '-name', '"fakename"' + ]) + + side_effect = [response_for_rmnasdir, response_for_rmfs] + calls = [call_for_rmnasdir, call_for_rmfs] + if proto == 'CIFS': + side_effect.insert(0, response_for_rmcifs) + calls.insert(0, call_for_rmcifs) + self.sshMock.side_effect = side_effect + + self.assistant.delete_share('fakename', proto) + + mock_cse.assert_called_once_with('fakename') + if share_exist: + self.sshMock.assert_has_calls(calls) + else: + self.sshMock.assert_not_called() + + def test_extend_share(self): + response_for_lsfs = ('\n'.join([ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool0!fs0!10GB!1GB', + 'pool1!fs1!8GB!3GB' + ]), '') + call_for_lsfs = mock.call([ + 'mcsinq', 'lsfs', '-delim', '!', '-all' + ]) + response_for_expandfs = ('', '') + call_for_expandfs = mock.call([ + 'mcsop', 'expandfs', '-name', '"fs0"', '-size', '2g' + ]) + self.sshMock.side_effect = [response_for_lsfs, response_for_expandfs] + + self.assistant.extend_share('fs0', 12) + + self.sshMock.assert_has_calls([call_for_lsfs, call_for_expandfs]) + + @ddt.data('CIFS', 'NFS') + def test_get_export_locations(self, proto): + response_for_lsnode = ('\n'.join([ + 'id!name', + '1!node1', + '2!node2' + ]), '') + call_for_lsnode = mock.call([ + 'mcsinq', 'lsnode', '-delim', '!' + ]) + response_for_lsfs_node1 = ('\n'.join([ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool0!fs0!10GB!1GB' + ]), '') + call_for_lsfs_node1 = mock.call([ + 'mcsinq', 'lsfs', '-delim', '!', '-node', '"node1"' + ]) + response_for_lsfs_node2 = ('\n'.join([ + 'pool_name!fs_name!total_capacity!used_capacity', + 'pool1!fs1!10GB!1GB' + ]), '') + call_for_lsfs_node2 = mock.call([ + 'mcsinq', 'lsfs', '-delim', '!', '-node', '"node2"' + ]) + response_for_lsnasportip = ('\n'.join([ + 'node_name!id!ip!mask!gw!link_state', + 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', + 'node1!2!192.168.10.2!255.255.255.0!192.168.10.254!active', + 'node1!3!!!!inactive', + 'node2!1!192.168.10.3!255.255.255.0!192.168.10.254!active', + 'node2!2!192.168.10.4!255.255.255.0!192.168.10.254!active', + 'node2!3!!!!inactive' + ]), '') + call_for_lsnasportip = mock.call([ + 'mcsinq', 'lsnasportip', '-delim', '!' + ]) + self.sshMock.side_effect = [ + response_for_lsnode, + response_for_lsfs_node1, + response_for_lsfs_node2, + response_for_lsnasportip + ] + calls = [ + call_for_lsnode, + call_for_lsfs_node1, + call_for_lsfs_node2, + call_for_lsnasportip + ] + + ret = self.assistant.get_export_locations('fs1', proto) + if proto == 'CIFS': + locations = [ + { + 'path': '\\\\192.168.10.3\\fs1', + 'is_admin_only': False, + 'metadata': {} + }, + { + 'path': '\\\\192.168.10.4\\fs1', + 'is_admin_only': False, + 'metadata': {} + } + ] + else: + locations = [ + { + 'path': '192.168.10.3:/fs/fs1/fs1', + 'is_admin_only': False, + 'metadata': {} + }, + { + 'path': '192.168.10.4:/fs/fs1/fs1', + 'is_admin_only': False, + 'metadata': {} + } + ] + self.assertEqual(locations, ret) + self.sshMock.assert_has_calls(calls) + + def test_classify_nfs_client_spec_has_nfsinfo(self): + response_for_lsnfslist = ('\n'.join([ + 'path', + '/fs/fs01/fs01' + ]), '') + call_for_lsnfslist = mock.call([ + 'mcsinq', 'lsnfslist', '-delim', '!', '"/fs/fs01/fs01"' + ]) + response_for_lsnfsinfo = ('\n'.join([ + 'ip!mask!rights!all_squash!root_squash', + '192.168.1.0!255.255.255.0!rw!all_squash!root_squash', + '192.168.2.0!255.255.255.0!rw!all_squash!root_squash' + ]), '') + call_for_lsnfsinfo = mock.call([ + 'mcsinq', 'lsnfsinfo', '-delim', '!', '"/fs/fs01/fs01"' + ]) + self.sshMock.side_effect = [ + response_for_lsnfslist, response_for_lsnfsinfo + ] + calls = [call_for_lsnfslist, call_for_lsnfsinfo] + + client_spec = [ + '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', + '192.168.3.0/255.255.255.0:rw:all_squash:root_squash' + ] + add_spec, del_spec = self.assistant.classify_nfs_client_spec( + client_spec, '/fs/fs01/fs01' + ) + + self.assertEqual( + add_spec, ['192.168.3.0/255.255.255.0:rw:all_squash:root_squash'] + ) + self.assertEqual( + del_spec, ['192.168.1.0/255.255.255.0:rw:all_squash:root_squash'] + ) + self.sshMock.assert_has_calls(calls) + + def test_classify_nfs_client_spec_has_no_nfsinfo(self): + cmdlist = [ + 'mcsinq', 'lsnfslist', '-delim', '!', '"/fs/fs01/fs01"' + ] + self.sshMock.return_value = ('', '') + + client_spec = [ + '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', + ] + add_spec, del_spec = self.assistant.classify_nfs_client_spec( + client_spec, '/fs/fs01/fs01' + ) + + self.assertEqual(client_spec, add_spec) + self.assertEqual([], del_spec) + self.sshMock.assert_called_once_with(cmdlist) + + def test_access_rule_to_client_spec(self): + rule = { + 'access_type': 'ip', + 'access_to': '192.168.10.0/24', + 'access_level': 'rw' + } + + ret = self.assistant.access_rule_to_client_spec(rule) + + spec = '192.168.10.0/255.255.255.0:rw:all_squash:root_squash' + self.assertEqual(spec, ret) + + def test_access_rule_to_client_spec_type_failed(self): + rule = { + 'access_type': 'user', + 'access_to': 'test01', + 'access_level': 'rw' + } + + self.assertRaises( + exception.ShareBackendException, + self.assistant.access_rule_to_client_spec, + rule + ) + + def test_access_rule_to_client_spec_ipversion_failed(self): + rule = { + 'access_type': 'ip', + 'access_to': '2001:db8::/64', + 'access_level': 'rw' + } + + self.assertRaises( + exception.ShareBackendException, + self.assistant.access_rule_to_client_spec, + rule + ) + + @ddt.data(True, False) + def test_update_nfs_access(self, check_del_add): + response_for_rmnfsclient = ('', '') + call_for_rmnfsclient = mock.call( + ['mcsop', 'rmnfsclient', '-path', '"/fs/fs01/fs01"', '-client', + '192.168.1.0/255.255.255.0'] + ) + response_for_addnfsclient = ('', '') + call_for_addnfsclient = mock.call( + ['mcsop', 'addnfsclient', '-path', '"/fs/fs01/fs01"', '-client', + '192.168.3.0/255.255.255.0:rw:all_squash:root_squash'] + ) + access_rules = [ + { + 'access_type': 'ip', + 'access_to': '192.168.2.0/24', + 'access_level': 'rw' + }, + { + 'access_type': 'ip', + 'access_to': '192.168.3.0/24', + 'access_level': 'rw' + } + ] + add_rules = [ + { + 'access_type': 'ip', + 'access_to': '192.168.3.0/24', + 'access_level': 'rw' + } + ] + del_rules = [ + { + 'access_type': 'ip', + 'access_to': '192.168.1.0/24', + 'access_level': 'rw' + }, + { + 'access_type': 'ip', + 'access_to': '192.168.4.0/24', + 'access_level': 'rw' + } + ] + + cncs_mock = mock.Mock(return_value=( + ['192.168.3.0/255.255.255.0:rw:all_squash:root_squash'], + ['192.168.1.0/255.255.255.0:rw:all_squash:root_squash'] + )) + self.mock_object(self.assistant, 'classify_nfs_client_spec', cncs_mock) + self.sshMock.side_effect = [ + response_for_rmnfsclient, response_for_addnfsclient + ] + + if check_del_add: + self.assistant.update_nfs_access('fs01', [], add_rules, del_rules) + else: + self.assistant.update_nfs_access('fs01', access_rules, [], []) + + if check_del_add: + cncs_mock.assert_called_once_with( + [], '/fs/fs01/fs01' + ) + else: + cncs_mock.assert_called_once_with( + [ + '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', + '192.168.3.0/255.255.255.0:rw:all_squash:root_squash' + ], + '/fs/fs01/fs01' + ) + + self.sshMock.assert_has_calls( + [call_for_rmnfsclient, call_for_addnfsclient] + ) + + def test_classify_cifs_rights(self): + cmdlist = ['mcsinq', 'lscifsinfo', '-delim', '!', '"fs01"'] + response_for_lscifsinfo = '\n'.join([ + 'path!oplocks!type!name!rights', + '/fs/fs01/fs01!on!LU!user1!rw', + '/fs/fs01/fs01!on!LU!user2!rw' + ]) + self.sshMock.return_value = (response_for_lscifsinfo, '') + + access_rights = [ + 'LU:user2:rw', + 'LU:user3:rw' + ] + add_rights, del_rights = self.assistant.classify_cifs_rights( + access_rights, 'fs01' + ) + + self.sshMock.assert_called_once_with(cmdlist) + self.assertEqual(['LU:user3:rw'], add_rights) + self.assertEqual(['LU:user1:rw'], del_rights) + + def test_access_rule_to_rights(self): + rule = { + 'access_type': 'user', + 'access_to': 'test01', + 'access_level': 'rw' + } + + ret = self.assistant.access_rule_to_rights(rule) + self.assertEqual('LU:test01:rw', ret) + + def test_access_rule_to_rights_fail_type(self): + rule = { + 'access_type': 'ip', + 'access_to': '192.168.1.0/24', + 'access_level': 'rw' + } + + self.assertRaises( + exception.ShareBackendException, + self.assistant.access_rule_to_rights, + rule + ) + + @ddt.data(True, False) + def test_update_cifs_access(self, check_del_add): + response_for_rmcifsuser = ('', None) + call_for_rmcifsuser = mock.call( + ['mcsop', 'rmcifsuser', '-name', 'fs01', '-rights', 'LU:user1'] + ) + response_for_addcifsuser = ('', None) + call_for_addcifsuser = mock.call( + ['mcsop', 'addcifsuser', '-name', 'fs01', '-rights', 'LU:user3:rw'] + ) + access_rules = [ + { + 'access_type': 'user', + 'access_to': 'user2', + 'access_level': 'rw' + }, + { + 'access_type': 'user', + 'access_to': 'user3', + 'access_level': 'rw' + } + ] + add_rules = [ + { + 'access_type': 'user', + 'access_to': 'user3', + 'access_level': 'rw' + } + ] + del_rules = [ + { + 'access_type': 'user', + 'access_to': 'user1', + 'access_level': 'rw' + } + ] + + ccr_mock = mock.Mock(return_value=(['LU:user3:rw'], ['LU:user1:rw'])) + self.mock_object(self.assistant, 'classify_cifs_rights', ccr_mock) + self.sshMock.side_effect = [ + response_for_rmcifsuser, response_for_addcifsuser + ] + + if check_del_add: + self.assistant.update_cifs_access('fs01', [], add_rules, del_rules) + else: + self.assistant.update_cifs_access('fs01', access_rules, [], []) + + if not check_del_add: + ccr_mock.assert_called_once_with( + ['LU:user2:rw', 'LU:user3:rw'], 'fs01' + ) + + self.sshMock.assert_has_calls( + [call_for_rmcifsuser, call_for_addcifsuser] + ) + + def test_check_access_type(self): + rules1 = { + 'access_type': 'ip', + 'access_to': '192.168.1.0/24', + 'access_level': 'rw' + } + rules2 = { + 'access_type': 'ip', + 'access_to': '192.168.2.0/24', + 'access_level': 'rw' + } + rules3 = { + 'access_type': 'user', + 'access_to': 'user1', + 'access_level': 'rw' + } + rules4 = { + 'access_type': 'user', + 'access_to': 'user2', + 'access_level': 'rw' + } + + ret = self.assistant.check_access_type('ip', [rules1], [rules2]) + self.assertTrue(ret) + ret = self.assistant.check_access_type('user', [rules3], [rules4]) + self.assertTrue(ret) + ret = self.assistant.check_access_type('ip', [rules1], [rules3]) + self.assertFalse(ret) + ret = self.assistant.check_access_type('user', [rules3], [rules1]) + self.assertFalse(ret) + + @ddt.data( + {'proto': 'CIFS', 'ret': True}, + {'proto': 'CIFS', 'ret': False}, + {'proto': 'NFS', 'ret': True}, + {'proto': 'NFS', 'ret': False}, + {'proto': 'unknown', 'ret': True}) + @ddt.unpack + def test_update_access(self, proto, ret): + uca_mock = self.mock_object( + self.assistant, 'update_cifs_access', mock.Mock() + ) + una_mock = self.mock_object( + self.assistant, 'update_nfs_access', mock.Mock() + ) + cat_mock = self.mock_object( + self.assistant, 'check_access_type', mock.Mock(return_value=ret) + ) + + if proto == 'unknown': + self.assertRaises( + exception.ShareBackendException, + self.assistant.update_access, + 'fs01', + proto, + [], + [], + [] + ) + cat_mock.assert_not_called() + elif ret is False: + self.assertRaises( + exception.InvalidShareAccess, + self.assistant.update_access, + 'fs01', + proto, + [], + [], + [] + ) + cat_mock.assert_called_once() + else: + self.assistant.update_access( + 'fs01', + proto, + [], + [], + [] + ) + if proto == 'CIFS': + uca_mock.assert_called_once_with('fs01', [], [], []) + una_mock.assert_not_called() + else: + una_mock.assert_called_once_with('fs01', [], [], []) + uca_mock.assert_not_called() + cat_mock.assert_called_once() diff --git a/releasenotes/notes/inspur-instorage-driver-51d7a67f253f3ecd.yaml b/releasenotes/notes/inspur-instorage-driver-51d7a67f253f3ecd.yaml new file mode 100644 index 0000000000..e1d3a4d33d --- /dev/null +++ b/releasenotes/notes/inspur-instorage-driver-51d7a67f253f3ecd.yaml @@ -0,0 +1,6 @@ +--- +prelude: > + Add Inspur InStorage driver. +features: + - Add new Inspur InStorage driver, support share create, delete, extend, + and access through NFS and CIFS protocol.