From 4705a7d52015353d8259574d2a89ffe2df44a7a9 Mon Sep 17 00:00:00 2001 From: Ratnakaram Rajesh Date: Mon, 28 Dec 2020 05:06:56 +0200 Subject: [PATCH] Adding Zadara Manila driver Implements: blueprint zadara-manila-driver Change-Id: Iae0eaf8b37d5eecc352af6546fd0cfa4cadab497 --- doc/source/admin/index.rst | 1 + doc/source/admin/zadara_driver.rst | 132 +++ manila/exception.py | 69 ++ manila/share/drivers/zadara/__init__.py | 0 manila/share/drivers/zadara/common.py | 496 ++++++++ manila/share/drivers/zadara/zadara.py | 748 ++++++++++++ manila/tests/share/drivers/zadara/__init__.py | 0 .../tests/share/drivers/zadara/test_zadara.py | 1052 +++++++++++++++++ ...zadara-manila-driver-cb22b647e60f7ab8.yaml | 3 + 9 files changed, 2501 insertions(+) create mode 100644 doc/source/admin/zadara_driver.rst create mode 100644 manila/share/drivers/zadara/__init__.py create mode 100644 manila/share/drivers/zadara/common.py create mode 100644 manila/share/drivers/zadara/zadara.py create mode 100644 manila/tests/share/drivers/zadara/__init__.py create mode 100644 manila/tests/share/drivers/zadara/test_zadara.py create mode 100644 releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index 2cacb207fe..058854f79e 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -94,3 +94,4 @@ each back end. tegile_driver nexentastor5_driver ../configuration/shared-file-systems/drivers/windows-smb-driver + zadara_driver diff --git a/doc/source/admin/zadara_driver.rst b/doc/source/admin/zadara_driver.rst new file mode 100644 index 0000000000..24a69c13c8 --- /dev/null +++ b/doc/source/admin/zadara_driver.rst @@ -0,0 +1,132 @@ +.. + Copyright (c) 2021 Zadara Inc. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +======================================= +Zadara VPSA Driver for OpenStack Manila +======================================= + +`Zadara’s `__ Virtual Private Storage Array (VPSA) +is the first software defined, Enterprise-Storage-as-a-Service. It is an +elastic and private block and file storage system which provides +enterprise-grade data protection and data management storage services. + +Manila VPSA driver provides a seamless management capabilities for VPSA +volumes, in this case, NFS & SMB volumes without losing the added value +provided by the VPSA Storage Array/Flash-Array. + +Requirements +------------ + +- VPSA Storage Array/Flash-Array running version 20.12 or higher. + +- Networking preparation - the Zadara VPSA driver for Manila support DHSS=False + (driver_handles_share_servers), the driver does not handle + the network configuration, it is up to the administrator to ensure + connectivity from a manila-share node and the Openstack cloud to the + VPSA Front-End network (such as neutron flat/VLAN network). + +Supported shared filesystems and operations +------------------------------------------- + +Share file system supported +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- SMB (CIFS) +- NFS + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +The following operations are supported: + +- Create a share. +- Delete a share. +- Extend a share. +- Create a snapshot. +- Delete a snapshot. +- Create a share from snapshot. +- Allow share access. +- Manage a share. + +.. note:: + + - Only IP access type is supported + - Both RW and RO access levels supported + + +Backend Configuration +~~~~~~~~~~~~~~~~~~~~~ + +The following parameters need to be configured in the [DEFAULT] section of +manila configuration (/etc/manila/manila.conf): + +- `enabled_share_backends` = Name of the section on manila.conf used to specify + a backend i.e. *enabled_share_backends = zadaravpsa* + +- `enabled_share_protocols` - Specify a list of protocols to be allowed for + share creation. The VPSA driver support the following options: *NFS* or + *CIFS* or *NFS, CIFS* + +The following parameters need to be configured in the [backend] section of +manila configuration (/etc/manila/manila.conf): + +Driver options +-------------- + +- `zadara_vpsa_host` = +- `zadara_vpsa_port` = +- `zadara_vpsa_use_ssl` = +- `zadara_driver_ssl_cert_path` = +- `zadara_vpsa_poolname` - +- `zadara_vol_encrypt` = +- `zadara_gen3_vol_compress` = +- `zadara_share_name_template` = +- `zadara_share_snap_name_template` = +- `driver_handles_share_servers` = +- `share_driver` = manila.share.drivers.zadara.zadara.ZadaraVPSAShareDriver + +Back-end configuration example +------------------------------ + +.. code-block:: ini + + [DEFAULT] + enabled_share_backends = zadaravpsa + enabled_share_protocols = NFS,CIFS + + [zadaravpsa] + driver_handles_share_servers = False + zadara_vpsa_host = vsa-00000010-mycloud.zadaravpsa.com + zadara_vpsa_port = 443 + zadara_access_key = MYSUPERSECRETACCESSKEY + zadara_vpsa_poolname = pool-00010001 + share_backend_name = zadaravpsa + zadara_vpsa_use_ssl = true + share_driver = manila.share.drivers.zadara.zadara.ZadaraVPSAShareDriver diff --git a/manila/exception.py b/manila/exception.py index 509cbc890f..51767bd926 100644 --- a/manila/exception.py +++ b/manila/exception.py @@ -1009,3 +1009,72 @@ class InfortrendCLIException(ShareBackendException): class InfortrendNASException(ShareBackendException): message = _("Infortrend NAS exception: %(err)s") + + +# Zadara storage driver +class ZadaraUnknownCmd(ShareBackendException): + message = _("Unknown or unsupported command %(cmd)s") + + +class ZadaraSessionRequestException(ShareBackendException): + message = _("%(msg)s") + + +class ZadaraBadHTTPResponseStatus(ShareBackendException): + message = _("Bad HTTP response status %(status)s") + + +class ZadaraFailedCmdWithDump(ShareBackendException): + message = _("Operation failed with status=%(status)s. Full dump: %(data)s") + + +class ZadaraVPSANoActiveController(ShareBackendException): + message = _("Unable to find any active VPSA controller") + + +class ZadaraServerCreateFailure(ShareBackendException): + message = _("Unable to create server object for initiator %(name)s") + + +class ZadaraAttachmentsNotFound(ShareBackendException): + message = _("Failed to retrieve attachments for volume %(name)s") + + +class ZadaraManilaInvalidAccessKey(ShareBackendException): + message = _("Invalid VPSA access key") + + +class ZadaraVPSAVolumeShareFailed(ShareBackendException): + message = _("Failed to create VPSA backend share. Error: %(error)s") + + +class ZadaraInvalidShareAccessType(ShareBackendException): + message = _("Only ip access type allowed for the Zadara manila share.") + + +class ZadaraShareNotFound(ShareBackendException): + message = _("Share %(name)s could not be found.") + + +class ZadaraExtendShareFailed(ShareBackendException): + message = _("Failed to extend VPSA backend share. Error: %(error)s") + + +class ZadaraInvalidProtocol(ShareBackendException): + message = _("The type of protocol %(protocol_type)s for Zadara " + "manila driver is not supported. Only NFS or CIFS " + "protocol is supported.") + + +class ZadaraShareNotValid(ShareBackendException): + message = _("Share %(name)s is not valid.") + + +class ZadaraVPSASnapshotCreateFailed(ShareBackendException): + message = _("Failed to create VPSA share %(name)s snapshot. " + "Error: %(error)s") + + +class ZadaraVPSASnapshotManageFailed(ShareBackendException): + message = _("Failed to manage VPSA share snapshot with id %(snap_id)s. " + "Error: %(error)s") diff --git a/manila/share/drivers/zadara/__init__.py b/manila/share/drivers/zadara/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/share/drivers/zadara/common.py b/manila/share/drivers/zadara/common.py new file mode 100644 index 0000000000..048ffce7d1 --- /dev/null +++ b/manila/share/drivers/zadara/common.py @@ -0,0 +1,496 @@ +# Copyright (c) 2020 Zadara Storage, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import re + +from oslo_config import cfg +from oslo_log import log as logging +import requests + +LOG = logging.getLogger(__name__) + +# Number of seconds the repsonse for the request sent to +# vpsa is expected. Else the request will be timed out. +# Setting it to 300 seconds initially. +vpsa_timeout = 300 + + +# Common exception class for all the exceptions that +# are used to redirect to the driver specific exceptions. +class CommonException(Exception): + def __init__(self): + pass + + class UnknownCmd(Exception): + def __init__(self, cmd): + self.cmd = cmd + + class BadHTTPResponseStatus(Exception): + def __init__(self, status): + self.status = status + + class FailedCmdWithDump(Exception): + def __init__(self, status, data): + self.status = status + self.data = data + + class SessionRequestException(Exception): + def __init__(self, msg): + self.msg = msg + + class ZadaraInvalidAccessKey(Exception): + pass + + +exception = CommonException() + + +zadara_opts = [ + cfg.HostAddressOpt('zadara_vpsa_host', + default=None, + help='VPSA - Management Host name or IP address'), + cfg.PortOpt('zadara_vpsa_port', + default=None, + help='VPSA - Port number'), + cfg.BoolOpt('zadara_vpsa_use_ssl', + default=False, + help='VPSA - Use SSL connection'), + cfg.BoolOpt('zadara_ssl_cert_verify', + default=True, + help='If set to True the http client will validate the SSL ' + 'certificate of the VPSA endpoint.'), + cfg.StrOpt('zadara_access_key', + default=None, + help='VPSA access key', + secret=True), + cfg.StrOpt('zadara_vpsa_poolname', + default=None, + help='VPSA - Storage Pool assigned for volumes'), + cfg.BoolOpt('zadara_vol_encrypt', + default=False, + help='VPSA - Default encryption policy for volumes. ' + 'If the option is neither configured nor provided ' + 'as metadata, the VPSA will inherit the default value.'), + cfg.BoolOpt('zadara_gen3_vol_dedupe', + default=False, + help='VPSA - Enable deduplication for volumes. ' + 'If the option is neither configured nor provided ' + 'as metadata, the VPSA will inherit the default value.'), + cfg.BoolOpt('zadara_gen3_vol_compress', + default=False, + help='VPSA - Enable compression for volumes. ' + 'If the option is neither configured nor provided ' + 'as metadata, the VPSA will inherit the default value.'), + cfg.BoolOpt('zadara_default_snap_policy', + default=False, + help="VPSA - Attach snapshot policy for volumes. " + "If the option is neither configured nor provided " + "as metadata, the VPSA will inherit the default value.")] + + +# Class used to connect and execute the commands on +# Zadara Virtual Private Storage Array (VPSA). +class ZadaraVPSAConnection(object): + """Executes driver commands on VPSA.""" + + def __init__(self, conf, driver_ssl_cert_path, block): + self.conf = conf + self.access_key = conf.zadara_access_key + if not self.access_key: + raise exception.ZadaraInvalidAccessKey() + self.driver_ssl_cert_path = driver_ssl_cert_path + # Choose the volume type of either block or file-type + # that will help to filter volumes. + self.vol_type_str = 'showonlyblock' if block else 'showonlyfile' + + def _generate_vpsa_cmd(self, cmd, **kwargs): + """Generate command to be sent to VPSA.""" + + # Dictionary of applicable VPSA commands in the following format: + # 'command': (method, API_URL, {optional parameters}) + vpsa_commands = { + # Volume operations + 'create_volume': ('POST', + '/api/volumes.json', + {'name': kwargs.get('name'), + 'capacity': kwargs.get('size'), + 'pool': self.conf.zadara_vpsa_poolname, + 'block': 'YES' + if self.vol_type_str == 'showonlyblock' + else 'NO', + 'thin': 'YES', + 'crypt': 'YES' + if self.conf.zadara_vol_encrypt else 'NO', + 'compress': 'YES' + if self.conf.zadara_gen3_vol_compress else 'NO', + 'dedupe': 'YES' + if self.conf.zadara_gen3_vol_dedupe else 'NO', + 'attachpolicies': 'NO' + if not self.conf.zadara_default_snap_policy + else 'YES'}), + 'delete_volume': ('DELETE', + '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), + {'force': 'YES'}), + 'expand_volume': ('POST', + '/api/volumes/%s/expand.json' + % kwargs.get('vpsa_vol'), + {'capacity': kwargs.get('size')}), + 'rename_volume': ('POST', + '/api/volumes/%s/rename.json' + % kwargs.get('vpsa_vol'), + {'new_name': kwargs.get('new_name')}), + # Snapshot operations + # Snapshot request is triggered for a single volume though the + # API call implies that snapshot is triggered for CG (legacy API). + 'create_snapshot': ('POST', + '/api/consistency_groups/%s/snapshots.json' + % kwargs.get('cg_name'), + {'display_name': kwargs.get('snap_name')}), + 'delete_snapshot': ('DELETE', + '/api/snapshots/%s.json' + % kwargs.get('snap_id'), + {}), + 'rename_snapshot': ('POST', + '/api/snapshots/%s/rename.json' + % kwargs.get('snap_id'), + {'newname': kwargs.get('new_name')}), + 'create_clone_from_snap': ('POST', + '/api/consistency_groups/%s/clone.json' + % kwargs.get('cg_name'), + {'name': kwargs.get('name'), + 'snapshot': kwargs.get('snap_id')}), + 'create_clone': ('POST', + '/api/consistency_groups/%s/clone.json' + % kwargs.get('cg_name'), + {'name': kwargs.get('name')}), + # Server operations + 'create_server': ('POST', + '/api/servers.json', + {'iqn': kwargs.get('iqn'), + 'iscsi': kwargs.get('iscsi_ip'), + 'display_name': kwargs.get('iqn') + if kwargs.get('iqn') + else kwargs.get('iscsi_ip')}), + # Attach/Detach operations + 'attach_volume': ('POST', + '/api/servers/%s/volumes.json' + % kwargs.get('vpsa_srv'), + {'volume_name[]': kwargs.get('vpsa_vol'), + 'access_type': kwargs.get('share_proto'), + 'readonly': kwargs.get('read_only'), + 'force': 'YES'}), + 'detach_volume': ('POST', + '/api/volumes/%s/detach.json' + % kwargs.get('vpsa_vol'), + {'server_name[]': kwargs.get('vpsa_srv'), + 'force': 'YES'}), + # Update volume comment + 'update_volume': ('POST', + '/api/volumes/%s/update_comment.json' + % kwargs.get('vpsa_vol'), + {'new_comment': kwargs.get('new_comment')}), + + # Get operations + 'list_volumes': ('GET', + '/api/volumes.json?%s=YES' % self.vol_type_str, + {}), + 'get_volume': ('GET', + '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), + {}), + 'get_volume_by_name': ('GET', + '/api/volumes.json?display_name=%s' + % kwargs.get('display_name'), + {}), + 'get_pool': ('GET', + '/api/pools/%s.json' % kwargs.get('pool_name'), + {}), + 'list_controllers': ('GET', + '/api/vcontrollers.json', + {}), + 'list_servers': ('GET', + '/api/servers.json', + {}), + 'list_vol_snapshots': ('GET', + '/api/consistency_groups/%s/snapshots.json' + % kwargs.get('cg_name'), + {}), + 'list_vol_attachments': ('GET', + '/api/volumes/%s/servers.json' + % kwargs.get('vpsa_vol'), + {}), + 'list_snapshots': ('GET', + '/api/snapshots.json', + {}), + # Put operations + 'change_export_name': ('PUT', + '/api/volumes/%s/export_name.json' + % kwargs.get('vpsa_vol'), + {'exportname': kwargs.get('exportname')})} + try: + method, url, params = vpsa_commands[cmd] + # Populate the metadata for the volume creation + metadata = kwargs.get('metadata') + if metadata: + for key, value in metadata.items(): + params[key] = value + except KeyError: + raise exception.UnknownCmd(cmd=cmd) + + if method == 'GET': + params = dict(page=1, start=0, limit=0) + body = None + + elif method in ['DELETE', 'POST', 'PUT']: + body = params + params = None + + else: + msg = ('Method %(method)s is not defined' % {'method': method}) + LOG.error(msg) + raise AssertionError(msg) + + # 'access_key' was generated using username and password + # or it was taken from the input file + headers = {'X-Access-Key': self.access_key} + + return method, url, params, body, headers + + def send_cmd(self, cmd, **kwargs): + """Send command to VPSA Controller.""" + + if not self.access_key: + raise exception.ZadaraInvalidAccessKey() + + method, url, params, body, headers = self._generate_vpsa_cmd(cmd, + **kwargs) + LOG.debug('Invoking %(cmd)s using %(method)s request.', + {'cmd': cmd, 'method': method}) + + host = self._get_target_host(self.conf.zadara_vpsa_host) + port = int(self.conf.zadara_vpsa_port) + + protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http" + if protocol == "https": + if not self.conf.zadara_ssl_cert_verify: + verify = False + else: + verify = (self.driver_ssl_cert_path + if self.driver_ssl_cert_path else True) + else: + verify = False + + if port: + api_url = "%s://%s:%d%s" % (protocol, host, port, url) + else: + api_url = "%s://%s%s" % (protocol, host, url) + + try: + with requests.Session() as session: + session.headers.update(headers) + response = session.request(method, api_url, params=params, + data=body, headers=headers, + verify=verify, timeout=vpsa_timeout) + except requests.exceptions.RequestException as e: + msg = ('Exception: %s') % e + raise exception.SessionRequestException(msg=msg) + + if response.status_code != 200: + raise exception.BadHTTPResponseStatus( + status=response.status_code) + + data = response.content + json_data = json.loads(data) + response = json_data['response'] + status = int(response['status']) + if status == 5: + # Invalid Credentials + raise exception.ZadaraInvalidAccessKey() + + if status != 0: + raise exception.FailedCmdWithDump(status=status, data=data) + + if method in ['POST', 'DELETE']: + LOG.debug('Operation completed with status code %(status)s', + {'status': status}) + return response + + def _get_target_host(self, vpsa_host): + """Helper for target host formatting.""" + ipv6_without_brackets = ':' in vpsa_host and vpsa_host[-1] != ']' + if ipv6_without_brackets: + return ('[%s]' % vpsa_host) + return ('%s' % vpsa_host) + + def _get_active_controller_details(self): + """Return details of VPSA's active controller.""" + data = self.send_cmd('list_controllers') + ctrl = None + vcontrollers = data.get('vcontrollers', []) + for controller in vcontrollers: + if controller['state'] == 'active': + ctrl = controller + break + + if ctrl is not None: + target_ip = (ctrl['iscsi_ipv6'] if + ctrl['iscsi_ipv6'] else + ctrl['iscsi_ip']) + return dict(target=ctrl['target'], + ip=target_ip, + chap_user=ctrl['vpsa_chap_user'], + chap_passwd=ctrl['vpsa_chap_secret']) + return None + + def _check_access_key_validity(self): + """Check VPSA access key""" + if not self.access_key: + raise exception.ZadaraInvalidAccessKey() + active_ctrl = self._get_active_controller_details() + if active_ctrl is None: + raise exception.ZadaraInvalidAccessKey() + + def _get_vpsa_volume(self, name): + """Returns a single vpsa volume based on the display name""" + volume = None + display_name = name + if re.search(r"\s", name): + display_name = re.split(r"\s", name)[0] + data = self.send_cmd('get_volume_by_name', + display_name=display_name) + if data['status'] != 0: + return None + volumes = data['volumes'] + + for vol in volumes: + if vol['display_name'] == name: + volume = vol + break + return volume + + def _get_vpsa_volume_by_id(self, vpsa_vol): + """Returns a single vpsa volume based on name""" + data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol) + return data['volume'] + + def _get_volume_cg_name(self, name): + """Return name of the consistency group for the volume. + + cg-name is a volume uniqe identifier (legacy attribute) + and not consistency group as it may imply. + """ + volume = self._get_vpsa_volume(name) + if volume is not None: + return volume['cg_name'] + + return None + + def _get_all_vpsa_snapshots(self): + """Returns snapshots from all vpsa volumes""" + data = self.send_cmd('list_snapshots') + return data['snapshots'] + + def _get_all_vpsa_volumes(self): + """Returns all vpsa block volumes from the configured pool""" + data = self.send_cmd('list_volumes') + # FIXME: Work around to filter volumes belonging to given pool + # Remove this when we have the API fixed to filter based + # on pools. This API today does not have virtual_capacity field + volumes = [] + + for volume in data['volumes']: + if volume['pool_name'] == self.conf.zadara_vpsa_poolname: + volumes.append(volume) + + return volumes + + def _get_server_name(self, initiator, share): + """Return VPSA's name for server object. + + 'share' will be true to search for filesystem volumes + """ + data = self.send_cmd('list_servers') + servers = data.get('servers', []) + for server in servers: + if share: + if server['iscsi_ip'] == initiator: + return server['name'] + else: + if server['iqn'] == initiator: + return server['name'] + return None + + def _create_vpsa_server(self, iqn=None, iscsi_ip=None): + """Create server object within VPSA (if doesn't exist).""" + initiator = iscsi_ip if iscsi_ip else iqn + share = True if iscsi_ip else False + vpsa_srv = self._get_server_name(initiator, share) + if not vpsa_srv: + data = self.send_cmd('create_server', iqn=iqn, iscsi_ip=iscsi_ip) + if data['status'] != 0: + return None + vpsa_srv = data['server_name'] + return vpsa_srv + + def _get_servers_attached_to_volume(self, vpsa_vol): + """Return all servers attached to volume.""" + servers = vpsa_vol.get('server_ext_names') + list_servers = [] + if servers: + list_servers = servers.split(',') + return list_servers + + def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None): + """Detach volume from all attached servers.""" + if vpsa_srv: + list_servers_ids = [vpsa_srv] + else: + list_servers_ids = self._get_servers_attached_to_volume(vpsa_vol) + + for server_id in list_servers_ids: + # Detach volume from server + self.send_cmd('detach_volume', vpsa_srv=server_id, + vpsa_vol=vpsa_vol['name']) + + def _get_volume_snapshots(self, cg_name): + """Get snapshots in the consistency group""" + data = self.send_cmd('list_vol_snapshots', cg_name=cg_name) + snapshots = data.get('snapshots', []) + return snapshots + + def _get_snap_id(self, cg_name, snap_name): + """Return snapshot ID for particular volume.""" + snapshots = self._get_volume_snapshots(cg_name) + for snap_vol in snapshots: + if snap_vol['display_name'] == snap_name: + return snap_vol['name'] + + return None + + def _get_pool_capacity(self, pool_name): + """Return pool's total and available capacities.""" + data = self.send_cmd('get_pool', pool_name=pool_name) + pool = data.get('pool') + if pool is not None: + total = int(pool['capacity']) + free = int(pool['available_capacity']) + provisioned = int(pool['provisioned_capacity']) + LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free, ' + '%(provisioned)sGB provisioned', + {'name': pool_name, 'total': total, + 'free': free, 'provisioned': provisioned}) + return total, free, provisioned + + return 'unknown', 'unknown', 'unknown' diff --git a/manila/share/drivers/zadara/zadara.py b/manila/share/drivers/zadara/zadara.py new file mode 100644 index 0000000000..2ccf8b1319 --- /dev/null +++ b/manila/share/drivers/zadara/zadara.py @@ -0,0 +1,748 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared File system services driver for Zadara +Virtual Private Storage Array (VPSA). +""" + +import socket + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils + +from manila import exception as manila_exception +from manila.i18n import _ +from manila.share import api +from manila.share import driver +from manila.share.drivers.zadara import common + +CONF = cfg.CONF +CONF.register_opts(common.zadara_opts) + +LOG = logging.getLogger(__name__) + +manila_opts = [ + cfg.StrOpt('zadara_share_name_template', + default='OS_share-%s', + help='VPSA - Default template for VPSA share names'), + cfg.StrOpt('zadara_share_snap_name_template', + default='OS_share-snapshot-%s', + help='VPSA - Default template for VPSA share names'), + cfg.StrOpt('zadara_driver_ssl_cert_path', + default=None, + help='Can be used to specify a non default path to a ' + 'CA_BUNDLE file or directory with certificates ' + 'of trusted CAs, which will be used to validate ' + 'the backend')] + + +class ZadaraVPSAShareDriver(driver.ShareDriver): + """Zadara VPSA Share driver. + + Version history:: + + 20.12-01 - Driver changes intended and aligned with + openstack latest release. + 20.12-02 - Fixed #18723 - Manila: Parsing the export location in a + more generic way while managing the vpsa share + 20.12-03 - Adding the metadata support while creating share to + configure vpsa. + 20.12-20 - IPv6 connectivity support for Manila driver + 20.12-21 - Adding unit tests and fixing review comments from the + openstack community. + 20.12-22 - Addressing review comments from the manila community. + 20.12-23 - Addressing review comments from the manila community. + 20.12-24 - Addressing review comments from the manila community. + """ + + VERSION = '20.12-24' + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "ZadaraStorage_VPSA_CI" + + def __init__(self, *args, **kwargs): + """Do initialization.""" + super(ZadaraVPSAShareDriver, self).__init__(False, *args, **kwargs) + self.vpsa = None + self.configuration.append_config_values(common.zadara_opts) + self.configuration.append_config_values(manila_opts) + self.api = api.API() + # The valid list of share options that can be specified + # as the metadata while creating manila share + self.share_options = ['smbguest', 'smbonly', 'smbwindowsacl', + 'smbfilecreatemask', 'smbbrowseable', + 'smbhiddenfiles', 'smbhideunreadable', + 'smbhideunwriteable', 'smbhidedotfiles', + 'smbstoredosattributes', 'smbdircreatemask', + 'smbmaparchive', 'smbencryptionmode', + 'smbenableoplocks', 'smbaiosize', + 'nfsrootsquash', 'nfsallsquash', + 'nfsanongid', 'nfsanonuid', + 'atimeupdate', 'readaheadkb', 'crypt', + 'compress', 'dedupe', 'attachpolicies'] + + def _check_access_key_validity(self): + try: + self.vpsa._check_access_key_validity() + except common.exception.ZadaraInvalidAccessKey: + raise manila_exception.ZadaraManilaInvalidAccessKey() + + def do_setup(self, context): + """Any initialization the share driver does while starting. + + Establishes initial connection with VPSA and retrieves access_key. + Need to pass driver_ssl_cert_path here (and not fetch it from the + config opts directly in common code), because this config option is + different for different drivers and so cannot be figured in the + common code. + """ + driver_ssl_cert_path = self.configuration.zadara_driver_ssl_cert_path + self.vpsa = common.ZadaraVPSAConnection(self.configuration, + driver_ssl_cert_path, False) + + def check_for_setup_error(self): + """Returns an error (exception) if prerequisites aren't met.""" + self._check_access_key_validity() + + def vpsa_send_cmd(self, cmd, **kwargs): + try: + response = self.vpsa.send_cmd(cmd, **kwargs) + except common.exception.UnknownCmd as e: + raise manila_exception.ZadaraUnknownCmd(cmd=e.cmd) + except common.exception.SessionRequestException as e: + raise manila_exception.ZadaraSessionRequestException(msg=e.msg) + except common.exception.BadHTTPResponseStatus as e: + raise manila_exception.ZadaraBadHTTPResponseStatus(status=e.status) + except common.exception.FailedCmdWithDump as e: + raise manila_exception.ZadaraFailedCmdWithDump(status=e.status, + data=e.data) + except common.exception.ZadaraInvalidAccessKey: + raise manila_exception.ZadaraManilaInvalidAccessKey() + return response + + def _get_zadara_share_template_name(self, share_id): + return self.configuration.zadara_share_name_template % share_id + + def _get_share_export_location(self, share): + export_location = '' + share_proto = share['share_proto'].upper() + + share_name = self._get_zadara_share_template_name(share['id']) + vpsa_volume = self.vpsa._get_vpsa_volume(share_name) + if not vpsa_volume: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + if share_proto == 'NFS': + export_location = vpsa_volume['nfs_export_path'] + if share_proto == 'CIFS': + export_location = vpsa_volume['smb_export_path'] + return export_location + + def _check_share_protocol(self, share): + share_proto = share['share_proto'].upper() + if share_proto not in ('NFS', 'CIFS'): + msg = _("Only NFS or CIFS protocol are currently supported. " + "Share provided %(share)s with protocol " + "%(proto)s.") % {'share': share['id'], + 'proto': share['share_proto']} + LOG.error(msg) + raise manila_exception.ZadaraInvalidProtocol( + protocol_type=share_proto) + + def is_valid_metadata(self, metadata): + LOG.debug('Metadata while creating share: %(metadata)s', + {'metadata': metadata}) + for key, value in metadata.items(): + if key in self.share_options: + # Check for the values allowed with provided metadata + if key in ['smbguest', 'smbonly', 'smbwindowsacl', + 'smbbrowseable', 'smbhideunreadable', + 'smbhideunwriteable', 'smbhidedotfiles', + 'smbstoredosattributes', 'smbmaparchive', + 'smbenableoplocks', 'nfsrootsquash', + 'nfsallsquash', 'atimeupdate', 'crypt', + 'compress', 'dedupe', 'attachpolicies']: + if value in ['YES', 'NO']: + continue + else: + return False + if key in ['smbfilecreatemask', 'smbdircreatemask']: + if value.isdigit(): + # The valid permissions should be for user,group,other + # with another special digit for attributes. Ex:0755 + if len(value) != 4: + return False + # No special permission bits for suid,sgid, + # stickybit are allowed for vpsa share. + if int(value[0]) != 0: + return False + # The permissions are always specified in octal + for i in range(1, len(value)): + if int(value[i]) > 7: + return False + continue + else: + return False + if key == 'smbaiosize': + if value.isdigit() and value in ['16384', '1']: + continue + else: + return False + if key == 'smbencryptionmode': + if value in ['off', 'desired', 'required']: + continue + else: + return False + if key in ['nfsanongid', 'nfsanonuid']: + if value.isdigit() and int(value) != 0: + continue + else: + return False + if key == 'readaheadkb': + if value in ['16', '64', '128', '256', '512']: + continue + else: + return False + return True + + def create_share(self, context, share, share_server=None): + """Create a Zadara share and export it. + + :param context: A RequestContext. + :param share: A Share. + :param share_server: Not used currently + :return: The export locations dictionary. + """ + # Check share's protocol. + # Throw an exception immediately if it is an invalid protocol. + self._check_share_protocol(share) + share_name = self._get_zadara_share_template_name(share['id']) + + # Collect the share metadata provided and validate it + metadata = self.api.get_share_metadata(context, + {'id': share['share_id']}) + if not self.is_valid_metadata(metadata): + raise manila_exception.ManilaException(_( + "Not a valid metadata provided for the share %s") + % share['id']) + + data = self.vpsa_send_cmd('create_volume', + name=share_name, + size=share['size'], + metadata=metadata) + if data['status'] != 0: + raise manila_exception.ZadaraVPSAVolumeShareFailed( + error=data['status']) + + export_location = self._get_share_export_location(share) + return {'path': export_location} + + def _allow_access(self, context, share, access): + """Allow access to the share.""" + access_type = access['access_type'] + share_proto = share['share_proto'].upper() + if share_proto == 'CIFS': + share_proto = 'SMB' + + if access_type != 'ip': + raise manila_exception.ZadaraInvalidShareAccessType() + access_ip = access['access_to'] + access_level = 'YES' + if access['access_level'] == 'rw': + access_level = 'NO' + + # First: Check Active controller: if not valid, raise exception + ctrl = self.vpsa._get_active_controller_details() + if not ctrl: + raise manila_exception.ZadaraVPSANoActiveController() + + # Get volume name + vol_name = self._get_zadara_share_template_name(share['id']) + vpsa_volume = self.vpsa._get_vpsa_volume(vol_name) + + if not vpsa_volume: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + # Get/Create server name for given IP + vpsa_srv = self.vpsa._create_vpsa_server(iscsi_ip=access_ip) + if not vpsa_srv: + raise manila_exception.ZadaraServerCreateFailure(name=access_ip) + + servers = self.vpsa._get_servers_attached_to_volume(vpsa_volume) + attach = None + for server in servers: + if server == vpsa_srv: + attach = server + break + # Attach volume to server + if attach is None: + self.vpsa_send_cmd('attach_volume', + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_volume['name'], + share_proto=share_proto, + read_only=access_level) + + data = self.vpsa_send_cmd('list_vol_attachments', + vpsa_vol=vpsa_volume['name']) + server = None + servers = data.get('servers', []) + for srv in servers: + if srv['iscsi_ip'] == access_ip: + server = srv + break + + if server is None: + raise manila_exception.ZadaraAttachmentsNotFound( + name=vpsa_volume['name']) + + ctrl_ip = self.vpsa._get_target_host(ctrl['ip']) + properties = {'target_discovered': False, + 'target_portal': (('%s:%s') % (ctrl_ip, '3260')), + 'target_ip': server['iscsi_ip'], + 'id': share['id'], + 'auth_method': 'CHAP', + 'auth_username': ctrl['chap_user'], + 'auth_password': ctrl['chap_passwd']} + + LOG.debug('Attach properties: %(properties)s', + {'properties': strutils.mask_password(properties)}) + return {'driver_volume_type': share['share_proto'], 'data': properties} + + def delete_share(self, context, share, share_server=None): + """Delete share. Auto detach from all servers. + + """ + # Get share name + share_name = self._get_zadara_share_template_name(share['id']) + volume = self.vpsa._get_vpsa_volume(share_name) + if not volume: + LOG.warning('Volume %s could not be found. ' + 'It might be already deleted', share['id']) + return + + self.vpsa._detach_vpsa_volume(vpsa_vol=volume) + + # Delete volume associate with the share + self.vpsa_send_cmd('delete_volume', vpsa_vol=volume['name']) + + def _deny_access(self, context, share, access, share_server=None): + """Deny access to the share from the host. + + Auto detach from all servers. + """ + # First: Check Active controller: if not valid, raise exception + ctrl = self.vpsa._get_active_controller_details() + if not ctrl: + raise manila_exception.ZadaraVPSANoActiveController() + + # Get share name + share_name = self._get_zadara_share_template_name(share['id']) + volume = self.vpsa._get_vpsa_volume(share_name) + if not volume: + LOG.error('Volume %s could not be found.' + 'It might be already deleted', share['id']) + return + + self.vpsa._detach_vpsa_volume(vpsa_vol=volume) + + def update_access(self, context, share, access_rules, add_rules, + delete_rules, share_server=None): + access_updates = {} + if add_rules: + # Add rules for accessing share + for access_rule in add_rules: + try: + self._allow_access(context, share, access_rule) + except manila_exception.ZadaraInvalidShareAccessType: + LOG.error("Only ip access type allowed for Zadara share. " + "Failed to allow %(access_level)s access to " + "%(access_to)s for rule %(id)s. Setting rule " + "to 'error' state.", + {'access_level': access_rule['access_level'], + 'access_to': access_rule['access_to'], + 'id': access_rule['access_id']}) + access_updates.update( + {access_rule['access_id']: {'state': 'error'}}) + if delete_rules: + # Delete access rules for provided share + for access_rule in delete_rules: + self._deny_access(context, share, access_rule) + return access_updates + + def extend_share(self, share, new_size, share_server=None): + """Extend an existing share. + + """ + # Get the backend volume name for the share + share_name = self._get_zadara_share_template_name(share['id']) + vpsa_volume = self.vpsa._get_vpsa_volume(share_name) + if not vpsa_volume: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + size = vpsa_volume['virtual_capacity'] + expand_size = new_size - size + data = self.vpsa_send_cmd('expand_volume', + vpsa_vol=vpsa_volume['name'], + size=expand_size) + if data['status'] != 0: + raise manila_exception.ZadaraExtendShareFailed( + error=data['status']) + + def _ensure_share(self, context, share, share_server=None): + """Ensure that the share has a backend volume and it is exported. + + """ + # Get the backend volume name for the share + share_name = self._get_zadara_share_template_name(share['id']) + vpsa_volume = self.vpsa._get_vpsa_volume(share_name) + if not vpsa_volume: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + export_locations = share['export_locations'] + if export_locations: + return export_locations + else: + servers_list = (self.vpsa._get_servers_attached_to_volume( + vpsa_volume)) + if len(servers_list) != 0: + msg = (_('Servers attached to the VPSA volume %s without ' + 'any locations exported.') % vpsa_volume['name']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotValid( + name=share['id']) + + def _update_share_stats(self): + + backend_name = self.configuration.share_backend_name + dhss = self.configuration.driver_handles_share_servers + vpsa_poolname = self.configuration.zadara_vpsa_poolname + (total, free, provisioned) = ( + self.vpsa._get_pool_capacity(vpsa_poolname)) + ctrl = self.vpsa._get_active_controller_details() + if not ctrl: + raise manila_exception.ZadaraVPSANoActiveController() + ipv4_support = False if ':' in ctrl['ip'] else True + + # VPSA backend pool + single_pool = dict( + pool_name=vpsa_poolname, + total_capacity_gb=total, + free_capacity_gb=free, + allocated_capacity_gb=(total - free), + provisioned_capacity_gb=provisioned, + reserved_percentage=self.configuration.reserved_share_percentage, + compression=[True, False], + dedupe=[True, False], + thin_provisioning=True + ) + + data = dict( + share_backend_name=backend_name, + driver_handles_share_servers=dhss, + vendor_name='Zadara Storage', + driver_version=self.VERSION, + storage_protocol='NFS_CIFS', + pools=[single_pool], + snapshot_support=True, + create_share_from_snapshot_support=True, + revert_to_snapshot_support=False, + mount_snapshot_support=False, + ipv4_support=ipv4_support, + ipv6_support=not ipv4_support + ) + super(ZadaraVPSAShareDriver, self)._update_share_stats(data) + + def create_snapshot(self, context, snapshot, share_server=None): + """Creates a snapshot.""" + LOG.debug('Create snapshot: %s', snapshot['id']) + + # Retrieve the CG name for the base volume + share = snapshot['share'] + volume_name = self._get_zadara_share_template_name(share['id']) + cg_name = self.vpsa._get_volume_cg_name(volume_name) + if not cg_name: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + snap_name = (self.configuration.zadara_share_snap_name_template + % snapshot['id']) + data = self.vpsa_send_cmd('create_snapshot', + cg_name=cg_name, + snap_name=snap_name) + if data['status'] != 0: + raise manila_exception.ZadaraVPSASnapshotCreateFailed( + name=share['id'], error=data['status']) + + return {'provider_location': data['snapshot_name']} + + def delete_snapshot(self, context, snapshot, share_server=None): + """Deletes a snapshot.""" + LOG.debug('Delete snapshot: %s', snapshot['id']) + + # Retrieve the CG name for the base volume + share = snapshot['share'] + volume_name = self._get_zadara_share_template_name(share['id']) + cg_name = self.vpsa._get_volume_cg_name(volume_name) + if not cg_name: + # If the volume isn't present, then don't attempt to delete + LOG.warning('snapshot: original volume %s not found, ' + 'skipping delete operation', + volume_name) + return + + snap_name = (self.configuration.zadara_share_snap_name_template + % snapshot['id']) + snap_id = self.vpsa._get_snap_id(cg_name, snap_name) + if not snap_id: + # If the snapshot isn't present, then don't attempt to delete + LOG.warning('snapshot: snapshot %s not found, ' + 'skipping delete operation', snap_name) + return + + self.vpsa_send_cmd('delete_snapshot', + snap_id=snap_id) + + def create_share_from_snapshot(self, context, share, snapshot, + share_server=None, parent_share=None): + """Creates a share from a snapshot. + + """ + LOG.debug('Creating share from snapshot: %s', snapshot['id']) + + # Retrieve the CG name for the base volume + volume_name = (self._get_zadara_share_template_name( + snapshot['share_instance_id'])) + cg_name = self.vpsa._get_volume_cg_name(volume_name) + if not cg_name: + msg = (_('VPSA volume for share %s ' + 'could not be found.') % share['id']) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + snap_name = (self.configuration.zadara_share_snap_name_template + % snapshot['id']) + snap_id = self.vpsa._get_snap_id(cg_name, snap_name) + if not snap_id: + msg = _('Snapshot %(name)s not found') % {'name': snap_name} + LOG.error(msg) + raise manila_exception.ShareSnapshotNotFound( + snapshot_id=snap_name) + + self._check_share_protocol(share) + + share_name = self._get_zadara_share_template_name(share['id']) + self.vpsa_send_cmd('create_clone_from_snap', + cg_name=cg_name, + name=share_name, + snap_id=snap_id) + + if share['size'] > snapshot['size']: + self.extend_share(share, share['size']) + + export_location = self._get_share_export_location(share) + return [{'path': export_location}] + + def _get_export_name_from_export_path(self, proto, export_path): + if proto == 'nfs' and '\\' in export_path: + return None + if proto == 'cifs' and '/' in export_path: + return None + + # Extract the export name from the provided export path + if proto == 'nfs': + separator = '/' + export_location = export_path.strip(separator) + export_name = export_location.split(separator)[-1] + else: + separator = '\\' + export_location = export_path.strip(separator) + export_name = export_location.split(separator)[-1] + return export_name + + def _extract_vpsa_volume_from_share(self, share): + """Returns a vpsa volume based on the export location""" + if not share['export_locations'][0]['path']: + return None + + share_proto = share['share_proto'].lower() + export_path = share['export_locations'][0]['path'] + export_name = self._get_export_name_from_export_path(share_proto, + export_path) + if export_name is None: + msg = (_('Please verify the specifed protocol and export path.')) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + volume = None + volumes = self.vpsa._get_all_vpsa_volumes() + # Find the volume with the corresponding export name + for vol in volumes: + if share_proto == 'nfs': + vol_export_path = vol.get('nfs_export_path', None) + else: + vol_export_path = vol.get('smb_export_path', None) + + vol_export_name = self._get_export_name_from_export_path( + share_proto, vol_export_path) + if export_name == vol_export_name: + volume = vol + break + + # Check the additional smb export paths of the volume + if (share_proto == 'cifs' and + vol['additional_smb_export_paths_count'] > 0): + for additional_path in vol['additional_smb_export_paths']: + vol_export_name = self._get_export_name_from_export_path( + share_proto, additional_path) + if export_name == vol_export_name: + volume = vol + break + if volume: + return volume + else: + msg = (_('Manage backend share could not be found. It might be ' + 'deleted or please verify the specifed protocol and ' + 'export path.')) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + def manage_existing(self, share, driver_options): + # Check whether the specified protocol is supported or not. + self._check_share_protocol(share) + + LOG.info("Share %(shr_path)s will be managed with share %(shr_name)s.", + {'shr_path': share['export_locations'][0]['path'], + 'shr_name': share['id']}) + + # Find the backend vpsa volume for the provided export location + vpsa_volume = self._extract_vpsa_volume_from_share(share) + + # Check if the volume is available + if vpsa_volume['status'] != 'Available': + msg = (_('Existing share %(name)s is not available') + % {'name': vpsa_volume['name']}) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + new_share_name = self._get_zadara_share_template_name(share['id']) + new_vpsa_share = self.vpsa._get_vpsa_volume(new_share_name) + if new_vpsa_share: + msg = (_('Share %(new_name)s already exists') + % {'new_name': new_share_name}) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + # Rename the volume to the manila share specified name + data = self.vpsa_send_cmd('rename_volume', + vpsa_vol=vpsa_volume['name'], + new_name=new_share_name) + if data['status'] != 0: + msg = (_('Renaming volume %(old_name)s to %(new_name)s ' + 'has failed.') % {'old_name': vpsa_volume['name'], + 'new_name': new_share_name}) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + return {'size': vpsa_volume['provisioned_capacity'], + 'export_locations': share['export_locations'][0]['path']} + + def unmanage(self, share): + """Removes the specified volume from Manila management""" + pass + + def manage_existing_snapshot(self, snapshot, driver_options): + share = snapshot['share'] + share_name = self._get_zadara_share_template_name(share['id']) + + vpsa_volume = self.vpsa._get_vpsa_volume(share_name) + if not vpsa_volume: + msg = (_('Volume %(name)s could not be found. ' + 'It might be already deleted') % {'name': share_name}) + LOG.error(msg) + raise manila_exception.ZadaraShareNotFound(name=share['id']) + + # Check if the provider_location is specified + if not snapshot['provider_location']: + msg = (_('Provider location as snap id of the VPSA backend ' + 'should be provided')) + LOG.error(msg) + raise manila_exception.ManilaException(msg) + + new_name = (self.configuration.zadara_share_snap_name_template + % snapshot['id']) + new_snap_id = self.vpsa._get_snap_id(vpsa_volume['cg_name'], + new_name) + if new_snap_id: + msg = (_('Snapshot with name %s already exists') % new_name) + LOG.debug(msg) + return + + data = self.vpsa_send_cmd('rename_snapshot', + snap_id=snapshot['provider_location'], + new_name=new_name) + if data['status'] != 0: + raise manila_exception.ZadaraVPSASnapshotManageFailed( + snap_id=snapshot['provider_location'], + error=data['status']) + + def unmanage_snapshot(self, snapshot): + """Removes the specified snapshot from Manila management""" + pass + + def get_configured_ip_versions(self): + """"Get allowed IP versions. + + The shares created should have export location as per the + IP version. Currently, zadara backend doesn't support both + ipv4 and ipv6. Collect the supported IP version from the + vpsa's active controller + """ + ctrl = self.vpsa._get_active_controller_details() + if not ctrl: + raise manila_exception.ZadaraVPSANoActiveController() + + if ':' in ctrl['ip']: + return [6] + else: + return [4] + + def get_backend_info(self, context): + return { + 'version': self.VERSION, + 'vsa_feip': socket.gethostbyname(self.vpsa.conf.zadara_vpsa_host), + 'vsa_port': self.vpsa.conf.zadara_vpsa_port + } + + def ensure_shares(self, context, shares): + updates = {} + for share in shares: + updates[share['id']] = { + 'export_locations': self._ensure_share(context, share)} + return updates diff --git a/manila/tests/share/drivers/zadara/__init__.py b/manila/tests/share/drivers/zadara/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/tests/share/drivers/zadara/test_zadara.py b/manila/tests/share/drivers/zadara/test_zadara.py new file mode 100644 index 0000000000..72d6f5ea4a --- /dev/null +++ b/manila/tests/share/drivers/zadara/test_zadara.py @@ -0,0 +1,1052 @@ +# Copyright (c) 2021 Zadara Storage, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Zadara VPSA Share driver +""" + +import copy +import requests + +from unittest import mock +from urllib import parse + +from manila import context +from manila import exception as manila_exception +from manila.share import configuration +from manila.share.drivers.zadara import zadara +from manila import test +from manila.tests import fake_share + + +def check_access_key(func): + """A decorator for all operations that needed an API before executing""" + def wrap(self, *args, **kwargs): + if not self._is_correct_access_key(): + return RUNTIME_VARS['bad_login'] + return func(self, *args, **kwargs) + + return wrap + + +DEFAULT_RUNTIME_VARS = { + 'status': 200, + 'user': 'test', + 'password': 'test_password', + 'access_key': '0123456789ABCDEF', + 'volumes': [], + 'servers': [], + 'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})], + 'counter': 1000, + + "login": """ + { + "response": { + "user": { + "updated-at": "2021-01-22", + "access-key": "%s", + "id": 1, + "created-at": "2021-01-22", + "email": "jsmith@example.com", + "username": "jsmith" + }, + "status": 0 + } + }""", + "good": """ + { + "response": { + "status": 0 + } + }""", + "good_snapshot": """ + { + "response": { + "snapshot_name": "fakesnaplocation", + "status": 0 + } + }""", + "bad_login": """ + { + "response": { + "status": 5, + "status-msg": "Some message..." + } + }""", + "bad_volume": """ + { + "response": { + "status": 10081, + "status-msg": "Virtual volume xxx should be found" + } + }""", + "fake_volume": """ + { + "response": { + "volumes": [], + "status": 0, + "status-msg": "Virtual volume xxx doesn't exist" + } + }""", + "bad_server": """ + { + "response": { + "status": 10086, + "status-msg": "Server xxx not found" + } + }""", + "server_created": """ + { + "response": { + "server_name": "%s", + "status": 0 + } + }""", +} + +RUNTIME_VARS = None + + +class FakeResponse(object): + def __init__(self, method, url, params, body, headers, **kwargs): + # kwargs include: verify, timeout + self.method = method + self.url = url + self.body = body + self.params = params + self.headers = headers + self.status = RUNTIME_VARS['status'] + + @property + def access_key(self): + """Returns Response Access Key""" + return self.headers["X-Access-Key"] + + def read(self): + ops = {'POST': [('/api/users/login.json', self._login), + ('/api/volumes.json', self._create_volume), + ('/api/servers.json', self._create_server), + ('/api/servers/*/volumes.json', self._attach), + ('/api/volumes/*/rename.json', self._rename), + ('/api/volumes/*/detach.json', self._detach), + ('/api/volumes/*/expand.json', self._expand), + ('/api/consistency_groups/*/snapshots.json', + self._create_snapshot), + ('/api/snapshots/*/rename.json', + self._rename_snapshot), + ('/api/consistency_groups/*/clone.json', + self._create_clone_from_snapshot), + ('/api/consistency_groups/*/clone.json', + self._create_clone)], + 'DELETE': [('/api/volumes/*', self._delete), + ('/api/snapshots/*', self._delete_snapshot)], + 'GET': [('/api/volumes.json?showonlyfile=YES', + self._list_volumes), + ('/api/volumes.json?display_name=*', + self._get_volume_by_name), + ('/api/pools/*.json', self._get_pool), + ('/api/vcontrollers.json', self._list_controllers), + ('/api/servers.json', self._list_servers), + ('/api/consistency_groups/*/snapshots.json', + self._list_vol_snapshots), + ('/api/volumes/*/servers.json', + self._list_vol_attachments)] + } + + ops_list = ops[self.method] + for (templ_url, func) in ops_list: + if self._compare_url(self.url, templ_url): + result = func() + return result + + @staticmethod + def _compare_url(url, template_url): + items = url.split('/') + titems = template_url.split('/') + for (i, titem) in enumerate(titems): + if '*' not in titem and titem != items[i]: + return False + if '?' in titem and titem.split('=')[0] != items[i].split('=')[0]: + return False + + return True + + @staticmethod + def _get_counter(): + cnt = RUNTIME_VARS['counter'] + RUNTIME_VARS['counter'] += 1 + return cnt + + def _login(self): + params = self.body + if (params['user'] == RUNTIME_VARS['user'] and + params['password'] == RUNTIME_VARS['password']): + return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] + else: + return RUNTIME_VARS['bad_login'] + + def _is_correct_access_key(self): + return self.access_key == RUNTIME_VARS['access_key'] + + @check_access_key + def _create_volume(self): + params = self.body + params['display-name'] = params['name'] + params['cg-name'] = params['name'] + params['snapshots'] = [] + params['server_ext_names'] = '' + params['provisioned-capacity'] = 1 + vpsa_vol = 'volume-%07d' % self._get_counter() + params['nfs-export-path'] = '10.2.1.56:/export/%s' % vpsa_vol + RUNTIME_VARS['volumes'].append((vpsa_vol, params)) + return RUNTIME_VARS['good'] + + @check_access_key + def _create_server(self): + params = self.body + + params['display-name'] = params['display_name'] + vpsa_srv = 'srv-%07d' % self._get_counter() + RUNTIME_VARS['servers'].append((vpsa_srv, params)) + return RUNTIME_VARS['server_created'] % vpsa_srv + + @check_access_key + def _attach(self): + srv = self.url.split('/')[3] + + params = self.body + + vol = params['volume_name[]'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == vol: + attachments = params['server_ext_names'].split(',') + if srv in attachments: + # already attached - ok + return RUNTIME_VARS['good'] + else: + if not attachments[0]: + params['server_ext_names'] = srv + else: + params['server_ext_names'] += ',' + srv + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _detach(self): + params = self.body + vol = self.url.split('/')[3] + srv = params['server_name[]'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == vol: + attachments = params['server_ext_names'].split(',') + if srv not in attachments: + return RUNTIME_VARS['bad_server'] + else: + attachments.remove(srv) + params['server_ext_names'] = (','.join([str(elem) + for elem in attachments])) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _expand(self): + params = self.body + vol = self.url.split('/')[3] + capacity = params['capacity'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == vol: + params['capacity'] = capacity + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _rename(self): + params = self.body + vol = self.url.split('/')[3] + + for (vol_name, vol_params) in RUNTIME_VARS['volumes']: + if vol_params['name'] == vol: + vol_params['name'] = params['new_name'] + vol_params['display-name'] = params['new_name'] + vol_params['cg-name'] = params['new_name'] + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _rename_snapshot(self): + params = self.body + vpsa_snapshot = self.url.split('/')[3] + + for (vol_name, vol_params) in RUNTIME_VARS['volumes']: + for snapshot in vol_params['snapshots']: + if vpsa_snapshot == snapshot['provider-location']: + snapshot['name'] = params['newname'] + snapshot['display-name'] = params['newname'] + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _create_snapshot(self): + params = self.body + cg_name = self.url.split('/')[3] + snap_name = params['display_name'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['cg-name'] == cg_name: + snapshots = params['snapshots'] + if snap_name in snapshots: + # already attached + return RUNTIME_VARS['bad_volume'] + else: + snapshots.append(snap_name) + return RUNTIME_VARS['good_snapshot'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _delete_snapshot(self): + snap = self.url.split('/')[3].split('.')[0] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if snap in params['snapshots']: + params['snapshots'].remove(snap) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + @check_access_key + def _create_clone_from_snapshot(self): + params = self.body + params['display-name'] = params['name'] + params['cg-name'] = params['name'] + params['capacity'] = 1 + params['snapshots'] = [] + params['server_ext_names'] = '' + params['pool'] = 'pool-0001' + params['provisioned-capacity'] = 1 + vpsa_vol = 'volume-%07d' % self._get_counter() + params['nfs-export-path'] = '10.2.1.56:/export/%s' % vpsa_vol + RUNTIME_VARS['volumes'].append((vpsa_vol, params)) + return RUNTIME_VARS['good'] + + @check_access_key + def _create_clone(self): + params = self.body + params['display-name'] = params['name'] + params['cg-name'] = params['name'] + params['capacity'] = 1 + params['snapshots'] = [] + params['server_ext_names'] = '' + vpsa_vol = 'volume-%07d' % self._get_counter() + RUNTIME_VARS['volumes'].append((vpsa_vol, params)) + return RUNTIME_VARS['good'] + + def _delete(self): + vol = self.url.split('/')[3].split('.')[0] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == vol: + if params['server_ext_names']: + # there are attachments - should be volume busy error + return RUNTIME_VARS['bad_volume'] + else: + RUNTIME_VARS['volumes'].remove((vol_name, params)) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _generate_list_resp(self, null_body, body, lst, vol): + resp = '' + for (obj, params) in lst: + if vol: + resp += body % (params['name'], + params['display-name'], + params['cg-name'], + params['capacity'], + params['pool'], + params['provisioned-capacity'], + params['nfs-export-path']) + else: + resp += body % (obj, params['display-name']) + if resp: + return resp + else: + return null_body + + def _list_volumes(self): + null_body = """ + { + "response": { + "volumes": [ + ], + "status": 0 + } + }""" + body = """ + { + "response": { + "volumes": %s, + "status": 0 + } + }""" + + volume_obj = """ + { + "name": "%s", + "display_name": "%s", + "cg_name": "%s", + "status": "Available", + "virtual_capacity": %d, + "pool_name": "%s", + "allocated-capacity": 1, + "provisioned_capacity": "%s", + "raid-group-name": "r5", + "cache": "write-through", + "created-at": "2021-01-22", + "modified-at": "2021-01-22", + "nfs_export_path": "%s" + } + """ + if len(RUNTIME_VARS['volumes']) == 0: + return null_body + resp = '' + volume_list = '' + count = 0 + for (vol_name, params) in RUNTIME_VARS['volumes']: + volume_dict = volume_obj % (params['name'], + params['display-name'], + params['cg-name'], + params['capacity'], + params['pool'], + params['provisioned-capacity'], + params['nfs-export-path']) + if count == 0: + volume_list += volume_dict + count += 1 + elif count != len(RUNTIME_VARS['volumes']): + volume_list = volume_list + ',' + volume_dict + count += 1 + if volume_list: + volume_list = '[' + volume_list + ']' + resp = body % volume_list + return resp + + return RUNTIME_VARS['bad_volume'] + + def _get_volume_by_name(self): + volume_name = self.url.split('=')[1] + body = """ + { + "response": { + "volumes": [ + { + "name": "%s", + "display_name": "%s", + "cg_name": "%s", + "status": "Available", + "virtual_capacity": %d, + "pool_name": "%s", + "allocated-capacity": 1, + "provisioned_capacity": %d, + "raid-group-name": "r5", + "cache": "write-through", + "created-at": "2021-01-22", + "modified-at": "2021-01-22", + "nfs_export_path": "%s", + "server_ext_names": "%s" + } + ], + "status": 0 + } + }""" + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == volume_name: + resp = body % (volume_name, params['display-name'], + params['cg-name'], params['capacity'], + params['pool'], params['provisioned-capacity'], + params['nfs-export-path'], + params['server_ext_names']) + return resp + + return RUNTIME_VARS['fake_volume'] + + def _list_controllers(self): + null_body = """ + { + "response": { + "vcontrollers": [ + ], + "status": 0 + } + }""" + body = """ + { + "response": { + "vcontrollers": [ + { + "name": "%s", + "display_name": "%s", + "state": "active", + "target": + "iqn.2011-04.zadarastorage:vsa-xxx:1", + "iscsi_ip": "1.1.1.1", + "iscsi_ipv6": "", + "mgmt-ip": "1.1.1.1", + "software-ver": "0.0.09-05.1--77.7", + "heartbeat1": "ok", + "heartbeat2": "ok", + "vpsa_chap_user": "test_chap_user", + "vpsa_chap_secret": "test_chap_secret" + } + ], + "status": 0 + } + }""" + return self._generate_list_resp(null_body, + body, + RUNTIME_VARS['controllers'], + False) + + def _get_pool(self): + response = """ + { + "response": { + "pool": { + "name": "pool-0001", + "capacity": 100, + "available_capacity": 99, + "provisioned_capacity": 1 + }, + "status": 0 + } + }""" + return response + + def _list_servers(self): + null_body = """ + { + "response": { + "servers": [ + ], + "status": 0 + } + }""" + body = """ + { + "response": { + "servers": %s, + "status": 0 + } + }""" + + server_obj = """ + { + "name": "%s", + "display_name": "%s", + "iscsi_ip": "%s", + "status": "Active", + "created-at": "2021-01-22", + "modified-at": "2021-01-22" + } + """ + resp = '' + server_list = '' + count = 0 + for (obj, params) in RUNTIME_VARS['servers']: + server_dict = server_obj % (obj, + params['display-name'], + params['iqn']) + if count == 0: + server_list += server_dict + count += 1 + elif count != len(RUNTIME_VARS['servers']): + server_list = server_list + ',' + server_dict + count += 1 + server_list = '[' + server_list + ']' + resp = body % server_list + if resp: + return resp + else: + return null_body + + def _get_server_obj(self, name): + for (srv_name, params) in RUNTIME_VARS['servers']: + if srv_name == name: + return params + + def _list_vol_attachments(self): + vol = self.url.split('/')[3] + null_body = """ + { + "response": { + "servers": [ + ], + "status": 0 + } + }""" + body = """ + { + "response": { + "servers": %s, + "status": 0 + } + }""" + + server_obj = """ + { + "name": "%s", + "display_name": "%s", + "iscsi_ip": "%s", + "target": + "iqn.2011-04.zadarastorage:vsa-xxx:1", + "lun": 0 + } + """ + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['name'] == vol: + attachments = params['server_ext_names'].split(',') + if not attachments[0]: + return null_body + resp = '' + server_list = '' + count = 0 + for server in attachments: + srv_params = self._get_server_obj(server) + server_dict = (server_obj % (server, + srv_params['display_name'], + srv_params['iscsi'])) + if count == 0: + server_list += server_dict + count += 1 + elif count != len(attachments): + server_list = server_list + ',' + server_dict + count += 1 + server_list = '[' + server_list + ']' + resp = body % server_list + return resp + + return RUNTIME_VARS['bad_volume'] + + def _list_vol_snapshots(self): + cg_name = self.url.split('/')[3] + + null_body = """ + { + "response": { + "snapshots": [ + ], + "status": 0 + } + }""" + + body = """ + { + "response": { + "snapshots": %s, + "status": 0 + } + }""" + + snapshot_obj = """ + { + "name": "%s", + "display_name": "%s", + "status": "normal", + "cg-name": "%s", + "pool-name": "pool-00000001" + } + """ + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['cg-name'] == cg_name: + snapshots = params['snapshots'] + if len(snapshots) == 0: + return null_body + resp = '' + snapshot_list = '' + count = 0 + + for snapshot in snapshots: + snapshot_dict = snapshot_obj % (snapshot, snapshot, + cg_name) + if count == 0: + snapshot_list += snapshot_dict + count += 1 + elif count != len(snapshots): + snapshot_list = snapshot_list + ',' + snapshot_dict + count += 1 + snapshot_list = '[' + snapshot_list + ']' + resp = body % snapshot_list + return resp + + return RUNTIME_VARS['bad_volume'] + + +class FakeRequests(object): + """A fake requests for zadara volume driver tests.""" + def __init__(self, method, api_url, params=None, data=None, + headers=None, **kwargs): + apiurl_items = parse.urlparse(api_url) + if apiurl_items.query: + url = apiurl_items.path + '?' + apiurl_items.query + else: + url = apiurl_items.path + res = FakeResponse(method, url, params, data, headers, **kwargs) + self.content = res.read() + self.status_code = res.status + + +class ZadaraVPSAShareDriverTestCase(test.TestCase): + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def setUp(self): + super(ZadaraVPSAShareDriverTestCase, self).setUp() + + def _safe_get(opt): + return getattr(self.configuration, opt) + + self._context = context.get_admin_context() + self.configuration = mock.Mock(spec=configuration.Configuration) + self.configuration.safe_get = mock.Mock(side_effect=_safe_get) + + global RUNTIME_VARS + RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) + + self.configuration.driver_handles_share_servers = False + self.configuration.network_config_group = ( + 'fake_network_config_group') + self.configuration.admin_network_config_group = ( + 'fake_admin_network_config_group') + self.configuration.reserved_percentage = 0 + self.configuration.zadara_use_iser = True + self.configuration.zadara_vpsa_host = '192.168.5.5' + self.configuration.zadara_vpsa_port = '80' + self.configuration.zadara_user = 'test' + self.configuration.zadara_password = 'test_password' + self.configuration.zadara_access_key = '0123456789ABCDEF' + self.configuration.zadara_vpsa_poolname = 'pool-0001' + self.configuration.zadara_vol_encrypt = False + self.configuration.zadara_share_name_template = 'OS_share-%s' + self.configuration.zadara_share_snap_name_template = ( + 'OS_share-snapshot-%s') + self.configuration.zadara_vpsa_use_ssl = False + self.configuration.zadara_ssl_cert_verify = False + self.configuration.zadara_default_snap_policy = False + self.configuration.zadara_driver_ssl_cert_path = None + self.configuration.zadara_gen3_vol_compress = True + self.configuration.zadara_gen3_vol_dedupe = True + self.configuration.share_backend_name = 'zadaravpsa' + self.configuration.reserved_share_percentage = '0' + self.configuration.replication_domain = None + self.configuration.filter_function = None + self.configuration.goodness_function = None + self.configuration.goodness_function = None + self.driver = (zadara.ZadaraVPSAShareDriver( + configuration=self.configuration)) + self.driver.do_setup(None) + self.driver.api.get_share_metadata = mock.Mock(return_value={}) + self.driver._get_share_export_location = mock.Mock() + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_do_setup(self): + self.driver.do_setup(self._context) + self.assertIsNotNone(self.driver.vpsa) + self.assertEqual(self.driver.vpsa.access_key, + self.configuration.zadara_access_key) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_no_active_ctrl(self): + share = fake_share.fake_share(id='fakeid', share_proto='NFS', + share_id='fakeshareid') + self.driver.create_share(self._context, share) + access = fake_share.fake_access() + + RUNTIME_VARS['controllers'] = [] + self.assertRaises(manila_exception.ZadaraVPSANoActiveController, + self.driver._allow_access, + self._context, + share, access) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_create_share_unsupported_proto(self): + share = fake_share.fake_share(share_proto='INVALID') + self.assertRaises(manila_exception.ZadaraInvalidProtocol, + self.driver.create_share, + self._context, + share) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_create_delete_share(self): + """Create share.""" + share = fake_share.fake_share(share_proto='NFS', + share_id='fakeshareid') + self.driver.create_share(self._context, share) + self.driver.delete_share(self._context, share) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_create_delete_multiple_shares(self): + """Create/Delete multiple shares.""" + share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', + share_id='fakeshareid1') + self.driver.create_share(self._context, share1) + + share2 = fake_share.fake_share(id='fakeid2', share_proto='CIFS', + share_id='fakeshareid2') + self.driver.create_share(self._context, share2) + + self.driver.delete_share(self._context, share1) + self.driver.delete_share(self._context, share2) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_delete_non_existent(self): + """Delete non-existent share.""" + share = fake_share.fake_share(share_proto='NFS', + share_id='fakeshareid') + self.driver.delete_share(self._context, share) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_create_delete_share_snapshot(self): + """Create/Delete share snapshot.""" + share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', + share_id='fakeshareid1') + self.driver.create_share(self._context, share1) + snapshot = fake_share.fake_snapshot(name='fakesnap', + share=share1, + share_name=share1['name'], + share_id=share1['id'], + provider_location='fakelocation') + + share2 = fake_share.fake_share(id='fakeid2', share_proto='NFS', + share_id='fakeshareid2') + self.assertRaises(manila_exception.ManilaException, + self.driver.create_snapshot, + self._context, + {'name': snapshot['name'], + 'id': snapshot['id'], + 'share': share2}) + + self.driver.create_snapshot(self._context, snapshot) + + # Deleted should succeed for missing volume + self.driver.delete_snapshot(self._context, + {'name': snapshot['name'], + 'id': snapshot['id'], + 'share': share2}) + # Deleted should succeed for missing snap + self.driver.delete_snapshot(self._context, + {'name': 'wrong_snap', + 'id': 'wrong_id', + 'share': share1}) + + self.driver.delete_snapshot(self._context, snapshot) + self.driver.delete_share(self._context, share1) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_extend_share(self): + """Expand share test.""" + share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', + share_id='fakeshareid', size=10) + share2 = fake_share.fake_share(id='fakeid2', + share_proto='NFS', size=10) + self.driver.create_share(self._context, share1) + + self.assertRaises(manila_exception.ZadaraShareNotFound, + self.driver.extend_share, + share2, 15) + + self.driver.extend_share(share1, 15) + self.driver.delete_share(self._context, share1) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_create_share_from_snapshot(self): + """Create a share from snapshot test.""" + share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', + share_id='fakeshareid1') + share2 = fake_share.fake_share(id='fakeid2', share_proto='NFS', + share_id='fakeshareid2') + self.driver.create_share(self._context, share1) + + snapshot = fake_share.fake_snapshot(name='fakesnap', + share=share1, + share_name=share1['name'], + share_id=share1['id'], + share_instance_id=share1['id'], + provider_location='fakelocation') + self.driver.create_snapshot(self._context, snapshot) + + self.assertRaises(manila_exception.ManilaException, + self.driver.create_share_from_snapshot, + self._context, + share2, + {'name': snapshot['name'], + 'id': snapshot['id'], + 'share': share2, + 'share_instance_id': share2['id']}) + + self.assertRaises(manila_exception.ManilaException, + self.driver.create_share_from_snapshot, + self._context, + share2, + {'name': 'fakesnapname', + 'id': 'fakesnapid', + 'share': share1, + 'share_instance_id': share1['id']}) + + self.driver.create_share_from_snapshot(self._context, share2, snapshot) + self.driver.delete_share(self._context, share1) + self.driver.delete_share(self._context, share2) + + def create_vpsa_backend_share(self): + vpsashare_params = {} + vpsashare_params['id'] = 'fake_id' + vpsashare_params['name'] = 'fake_name' + vpsashare_params['display-name'] = 'fake_name' + vpsashare_params['cg-name'] = 'fake_name' + vpsashare_params['size'] = 1 + vpsashare_params['capacity'] = 1 + vpsashare_params['pool'] = 'pool-0001' + vpsashare_params['share_proto'] = 'NFS' + vpsashare_params['nfs-export-path'] = '10.2.1.56:/export/manage_id' + vpsashare_params['provisioned-capacity'] = 1 + vpsashare_params['server_ext_names'] = '' + vpsa_volname = 'fake-volume' + vpsa_share = (vpsa_volname, vpsashare_params) + RUNTIME_VARS['volumes'].append(vpsa_share) + return vpsa_share + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_manage_existing_share(self): + share1 = {'id': 'manage_name', + 'name': 'manage_name', + 'display-name': 'manage_name', + 'size': 1, + 'share_proto': 'NFS', + 'export_locations': + [{'path': '10.2.1.56:/export/manage_id'}]} + driver_options = {} + vpsa_share = self.create_vpsa_backend_share() + + self.driver.manage_existing(share1, driver_options) + self.assertEqual(vpsa_share[1]['display-name'].split('-')[1], + share1['display-name']) + self.driver.delete_share(self._context, share1) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_get_share_stats(self): + """Get stats test.""" + self.configuration.safe_get.return_value = 'ZadaraVPSAShareDriver' + data = self.driver.get_share_stats(True) + self.assertEqual('Zadara Storage', data['vendor_name']) + self.assertEqual('unknown', data['total_capacity_gb']) + self.assertEqual('unknown', data['free_capacity_gb']) + self.assertEqual(data['reserved_percentage'], + self.configuration.reserved_percentage) + self.assertEqual(data['snapshot_support'], True) + self.assertEqual(data['create_share_from_snapshot_support'], True) + self.assertEqual(data['revert_to_snapshot_support'], False) + self.assertEqual(data['vendor_name'], 'Zadara Storage') + self.assertEqual(data['driver_version'], self.driver.VERSION) + self.assertEqual(data['storage_protocol'], 'NFS_CIFS') + self.assertEqual(data['share_backend_name'], + self.configuration.share_backend_name) + + def test_allow_access_with_incorrect_access_type(self): + share = fake_share.fake_share(id='fakeid1', share_proto='NFS') + access = fake_share.fake_access(access_type='fake_type') + + self.assertRaises(manila_exception.ZadaraInvalidShareAccessType, + self.driver._allow_access, + self._context, share, access) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_share_allow_deny_access(self): + """Test share access allow any deny rules.""" + share = fake_share.fake_share(id='fakeid', share_proto='NFS', + share_id='fakeshareid') + self.driver.create_share(self._context, share) + access = fake_share.fake_access() + + # Attach server for accessing share with the fake access rules + allow_access = self.driver._allow_access(self._context, share, access) + self.assertEqual(allow_access['driver_volume_type'], + share['share_proto']) + self.assertEqual('1.1.1.1:3260', + allow_access['data']['target_portal']) + (srv_name, srv_params) = RUNTIME_VARS['servers'][0] + self.assertEqual(srv_params['iscsi'], + allow_access['data']['target_ip']) + self.assertEqual(share['id'], allow_access['data']['id']) + self.assertEqual('CHAP', allow_access['data']['auth_method']) + self.assertEqual('test_chap_user', + allow_access['data']['auth_username']) + self.assertEqual('test_chap_secret', + allow_access['data']['auth_password']) + + # Detach will not throw any error with missing access rules + dup_access = fake_share.fake_access() + self.driver._deny_access(self._context, share, dup_access) + # Detach server from the share with deny access rules + self.driver._deny_access(self._context, share, access) + self.driver.delete_share(self._context, share) + + def create_vpsa_backend_share_snapshot(self, share): + vpsasnap_params = {} + vpsasnap_params['id'] = 'fakesnapid' + vpsasnap_params['name'] = 'fakesnapname' + vpsasnap_params['display-name'] = 'fakesnapname' + vpsasnap_params['provider-location'] = 'fakesnaplocation' + (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] + vol_params['snapshots'].append(vpsasnap_params) + + @mock.patch.object(requests.Session, 'request', FakeRequests) + def test_manage_existing_snapshot(self): + share = {'id': 'fake_id', + 'share_id': 'fake_shareid', + 'name': 'fake_name', + 'display-name': 'fake_name', + 'cg-name': 'fake_name', + 'size': 1, + 'capacity': 1, + 'share_proto': 'NFS', + 'pool': 'pool-0001', + 'nfs-export-path': '10.2.1.56:/export/manage_id', + 'provisioned-capacity': 1} + + self.driver.create_share(self._context, share) + # Create a backend share that will be managed for manila + self.create_vpsa_backend_share_snapshot(share) + + snapshot = {'id': 'manage_snapname', + 'name': 'manage_snapname', + 'display_name': 'manage_snapname', + 'provider_location': 'fakesnaplocation', + 'share': share} + driver_options = {} + + self.driver.manage_existing_snapshot(snapshot, driver_options) + + # Check that the backend share has been renamed + (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] + self.assertEqual( + vol_params['snapshots'][0]['display-name'].split('-')[2], + snapshot['display_name']) + self.driver.delete_snapshot(self._context, snapshot) + self.driver.delete_share(self._context, share) diff --git a/releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml b/releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml new file mode 100644 index 0000000000..6699175bea --- /dev/null +++ b/releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added Manila driver for Zadara VPSA Storage Array/Flash-Array.