[NetApp] Share server migration through SVM migrate

Implements share server migration using a proper mechanism
provided by ONTAP. In case the driver identifies that the ONTAP
version matches the version where this mechanism is available,
ONTAP will automatically chose to use this instead of SVM DR.

- Implemented new methods for migrating a share server using a
new mechanism provided by ONTAP, when both source and destination
clusters have versions >= 9.10. This new migration mechanism
supports nondisruptive migrations in case there aren't network
changes in the migration.

- The NetApp now does not need to create an actual share server in
the backend prior to the migration, in case SVM Migrate is being
used.

- The NetApp ONTAP driver can now reuse network allocations from
the source share server in case a share network change wasn't
identified.

Change-Id: Idf1581d933d11280287f6801fd4aa886a627f66f
Depends-On: I48bafd92fe7a4d4ae0bafd5bf1961dace56b6005
This commit is contained in:
silvacarloss 2021-05-13 11:41:49 -03:00
parent c419a7bad0
commit 74d5a1b2cf
18 changed files with 2853 additions and 388 deletions

View File

@ -23,12 +23,14 @@ import re
from lxml import etree
from oslo_log import log
from oslo_serialization import jsonutils
import requests
from requests import auth
import six
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import rest_endpoints
from manila.share.drivers.netapp import utils
LOG = log.getLogger(__name__)
@ -62,28 +64,23 @@ EPOLICYNOTFOUND = '18251'
EEVENTNOTFOUND = '18253'
ESCOPENOTFOUND = '18259'
ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS = '18815'
ENFS_V4_0_ENABLED_MIGRATION_FAILURE = '13172940'
EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER = '13172984'
STYLE_LOGIN_PASSWORD = 'basic_auth'
TRANSPORT_TYPE_HTTP = 'http'
TRANSPORT_TYPE_HTTPS = 'https'
STYLE_CERTIFICATE = 'certificate_auth'
class NaServer(object):
class BaseClient(object):
"""Encapsulates server connection logic."""
TRANSPORT_TYPE_HTTP = 'http'
TRANSPORT_TYPE_HTTPS = 'https'
SERVER_TYPE_FILER = 'filer'
SERVER_TYPE_DFM = 'dfm'
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
URL_DFM = 'apis/XMLrequest'
NETAPP_NS = 'http://www.netapp.com/filer/admin'
STYLE_LOGIN_PASSWORD = 'basic_auth'
STYLE_CERTIFICATE = 'certificate_auth'
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, style=None,
ssl_cert_path=None, username=None, password=None, port=None,
trace=False, api_trace_pattern=None):
super(BaseClient, self).__init__()
self._host = host
self.set_server_type(server_type)
self.set_transport_type(transport_type)
self.set_style(style)
if port:
@ -99,9 +96,21 @@ class NaServer(object):
# Note(felipe_rodrigues): it will verify with the mozila CA roots,
# given by certifi package.
self._ssl_verify = True
LOG.debug('Using NetApp controller: %s', self._host)
def get_style(self):
"""Get the authorization style for communicating with the server."""
return self._auth_style
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
if style.lower() not in (STYLE_LOGIN_PASSWORD, STYLE_CERTIFICATE):
raise ValueError('Unsupported authentication style')
self._auth_style = style.lower()
def get_transport_type(self):
"""Get the transport type protocol."""
return self._protocol
@ -112,38 +121,13 @@ class NaServer(object):
Supports http and https transport types.
"""
if transport_type.lower() not in (
NaServer.TRANSPORT_TYPE_HTTP,
NaServer.TRANSPORT_TYPE_HTTPS):
TRANSPORT_TYPE_HTTP, TRANSPORT_TYPE_HTTPS):
raise ValueError('Unsupported transport type')
self._protocol = transport_type.lower()
if self._protocol == NaServer.TRANSPORT_TYPE_HTTP:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(80)
else:
self.set_port(8088)
else:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(443)
else:
self.set_port(8488)
self._refresh_conn = True
def get_style(self):
"""Get the authorization style for communicating with the server."""
return self._auth_style
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD,
NaServer.STYLE_CERTIFICATE):
raise ValueError('Unsupported authentication style')
self._auth_style = style.lower()
def get_server_type(self):
"""Get the target server type."""
"""Get the server type."""
return self._server_type
def set_server_type(self, server_type):
@ -151,16 +135,7 @@ class NaServer(object):
Supports filer and dfm server types.
"""
if server_type.lower() not in (NaServer.SERVER_TYPE_FILER,
NaServer.SERVER_TYPE_DFM):
raise ValueError('Unsupported server type')
self._server_type = server_type.lower()
if self._server_type == NaServer.SERVER_TYPE_FILER:
self._url = NaServer.URL_FILER
else:
self._url = NaServer.URL_DFM
self._ns = NaServer.NETAPP_NS
self._refresh_conn = True
raise NotImplementedError()
def set_api_version(self, major, minor):
"""Set the API version."""
@ -216,14 +191,6 @@ class NaServer(object):
return self._timeout
return None
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self._vfiler
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self._vfiler = vfiler
def get_vserver(self):
"""Get the vserver to use in tunneling."""
return self._vserver
@ -242,10 +209,110 @@ class NaServer(object):
self._password = password
self._refresh_conn = True
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
pass
def _build_session(self):
"""Builds a session in the client."""
if self._auth_style == STYLE_LOGIN_PASSWORD:
auth_handler = self._create_basic_auth_handler()
else:
auth_handler = self._create_certificate_auth_handler()
self._session = requests.Session()
self._session.auth = auth_handler
self._session.verify = self._ssl_verify
headers = self._build_headers()
self._session.headers = headers
def _build_headers(self):
"""Adds the necessary headers to the session."""
raise NotImplementedError()
def _create_basic_auth_handler(self):
"""Creates and returns a basic HTTP auth handler."""
return auth.HTTPBasicAuth(self._username, self._password)
def _create_certificate_auth_handler(self):
"""Creates and returns a certificate auth handler."""
raise NotImplementedError()
def __str__(self):
"""Gets a representation of the client."""
return "server: %s" % (self._host)
class ZapiClient(BaseClient):
SERVER_TYPE_FILER = 'filer'
SERVER_TYPE_DFM = 'dfm'
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
URL_DFM = 'apis/XMLrequest'
NETAPP_NS = 'http://www.netapp.com/filer/admin'
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
super(ZapiClient, self).__init__(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self.set_server_type(server_type)
self._set_port()
def _set_port(self):
"""Defines which port will be used to communicate with ONTAP."""
if self._protocol == TRANSPORT_TYPE_HTTP:
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self.set_port(80)
else:
self.set_port(8088)
else:
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self.set_port(443)
else:
self.set_port(8488)
def set_server_type(self, server_type):
"""Set the target server type.
Supports filer and dfm server types.
"""
if server_type.lower() not in (ZapiClient.SERVER_TYPE_FILER,
ZapiClient.SERVER_TYPE_DFM):
raise ValueError('Unsupported server type')
self._server_type = server_type.lower()
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self._url = ZapiClient.URL_FILER
else:
self._url = ZapiClient.URL_DFM
self._ns = ZapiClient.NETAPP_NS
self._refresh_conn = True
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self._vfiler
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self._vfiler = vfiler
def invoke_elem(self, na_element, enable_tunneling=False):
"""Invoke the API on the server."""
if na_element and not isinstance(na_element, NaElement):
ValueError('NaElement must be supplied to invoke API')
request_element = self._create_request(na_element, enable_tunneling)
request_d = request_element.to_string()
@ -282,7 +349,8 @@ class NaServer(object):
return response_element
def invoke_successfully(self, na_element, enable_tunneling=False):
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
@ -290,7 +358,12 @@ class NaServer(object):
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
result = self.invoke_elem(na_element, enable_tunneling)
if api_args:
na_element.translate_struct(api_args)
result = self.invoke_elem(
na_element, enable_tunneling=enable_tunneling)
if result.has_attr('status') and result.get_attr('status') == 'passed':
return result
code = (result.get_attr('errno')
@ -336,7 +409,8 @@ class NaServer(object):
raise ValueError('ontapi version has to be atleast 1.15'
' to send request to vserver')
def _parse_response(self, response):
@staticmethod
def _parse_response(response):
"""Get the NaElement for the response."""
if not response:
raise NaApiError('No response received')
@ -349,28 +423,287 @@ class NaServer(object):
return processed_response.get_child_by_name('results')
def _get_url(self):
"""Get the base url to send the request."""
host = self._host
if ':' in host:
host = '[%s]' % host
return '%s://%s:%s/%s' % (self._protocol, host, self._port, self._url)
def _build_session(self):
if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD:
auth_handler = self._create_basic_auth_handler()
def _build_headers(self):
"""Build and return headers."""
return {'Content-Type': 'text/xml'}
class RestClient(BaseClient):
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
super(RestClient, self).__init__(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self._set_port()
def _set_port(self):
if self._protocol == TRANSPORT_TYPE_HTTP:
self.set_port(80)
else:
auth_handler = self._create_certificate_auth_handler()
self.set_port(443)
self._session = requests.Session()
self._session.auth = auth_handler
self._session.verify = self._ssl_verify
self._session.headers = {
'Content-Type': 'text/xml', 'charset': 'utf-8'}
def _get_request_info(self, api_name, session):
"""Returns the request method and url to be used in the REST call."""
def _create_basic_auth_handler(self):
return auth.HTTPBasicAuth(self._username, self._password)
request_methods = {
'post': session.post,
'get': session.get,
'put': session.put,
'delete': session.delete,
'patch': session.patch,
}
rest_call = rest_endpoints.endpoints.get(api_name)
return request_methods[rest_call['method']], rest_call['url']
def _create_certificate_auth_handler(self):
raise NotImplementedError()
def _add_query_params_to_url(self, url, query):
"""Populates the URL with specified filters."""
filters = ""
for k, v in query.items():
filters += "%(key)s=%(value)s&" % {"key": k, "value": v}
url += "?" + filters
return url
def invoke_elem(self, na_element, api_args=None):
"""Invoke the API on the server."""
if na_element and not isinstance(na_element, NaElement):
raise ValueError('NaElement must be supplied to invoke API')
api_name = na_element.get_name()
api_name_matches_regex = (re.match(self._api_trace_pattern, api_name)
is not None)
data = api_args.get("body") if api_args else {}
if (not hasattr(self, '_session') or not self._session
or self._refresh_conn):
self._build_session()
request_method, action_url = self._get_request_info(
api_name, self._session)
url_params = api_args.get("url_params") if api_args else None
if url_params:
action_url = action_url % url_params
query = api_args.get("query") if api_args else None
if query:
action_url = self._add_query_params_to_url(
action_url, api_args['query'])
url = self._get_base_url() + action_url
data = jsonutils.dumps(data) if data else data
if self._trace and api_name_matches_regex:
message = ("Request: %(method)s %(url)s. Request body "
"%(body)s") % {
"method": request_method,
"url": action_url,
"body": api_args.get("body") if api_args else {}
}
LOG.debug(message)
try:
if hasattr(self, '_timeout'):
response = request_method(
url, data=data, timeout=self._timeout)
else:
response = request_method(url, data=data)
except requests.HTTPError as e:
raise NaApiError(e.errno, e.strerror)
except requests.URLRequired as e:
raise exception.StorageCommunicationException(six.text_type(e))
except Exception as e:
raise NaApiError(message=e)
response = (
jsonutils.loads(response.content) if response.content else None)
if self._trace and api_name_matches_regex:
LOG.debug("Response: %s", response)
return response
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=False):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
result = self.invoke_elem(na_element, api_args=api_args)
if not result.get('error'):
return result
result_error = result.get('error')
code = (result_error.get('code')
or 'ESTATUSFAILED')
if code == ESIS_CLONE_NOT_LICENSED:
msg = 'Clone operation failed: FlexClone not licensed.'
else:
msg = (result_error.get('message')
or 'Execution status is failed due to unknown reason')
raise NaApiError(code, msg)
def _get_base_url(self):
"""Get the base URL for REST requests."""
host = self._host
if ':' in host:
host = '[%s]' % host
return '%s://%s:%s/api/' % (self._protocol, host, self._port)
def _build_headers(self):
"""Build and return headers for a REST request."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
return headers
class NaServer(object):
"""Encapsulates server connection logic."""
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
self.zapi_client = ZapiClient(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self.rest_client = RestClient(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern
)
self._host = host
LOG.debug('Using NetApp controller: %s', self._host)
def get_transport_type(self, use_zapi_client=True):
"""Get the transport type protocol."""
return self.get_client(use_zapi=use_zapi_client).get_transport_type()
def set_transport_type(self, transport_type):
"""Set the transport type protocol for API.
Supports http and https transport types.
"""
self.zapi_client.set_transport_type(transport_type)
self.rest_client.set_transport_type(transport_type)
def get_style(self, use_zapi_client=True):
"""Get the authorization style for communicating with the server."""
return self.get_client(use_zapi=use_zapi_client).get_style()
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
self.zapi_client.set_style(style)
self.rest_client.set_style(style)
def get_server_type(self, use_zapi_client=True):
"""Get the target server type."""
return self.get_client(use_zapi=use_zapi_client).get_server_type()
def set_server_type(self, server_type):
"""Set the target server type.
Supports filer and dfm server types.
"""
self.zapi_client.set_server_type(server_type)
self.rest_client.set_server_type(server_type)
def set_api_version(self, major, minor):
"""Set the API version."""
self.zapi_client.set_api_version(major, minor)
self.rest_client.set_api_version(1, 0)
def set_system_version(self, system_version):
"""Set the ONTAP system version."""
self.zapi_client.set_system_version(system_version)
self.rest_client.set_system_version(system_version)
def get_api_version(self, use_zapi_client=True):
"""Gets the API version tuple."""
return self.get_client(use_zapi=use_zapi_client).get_api_version()
def get_system_version(self, use_zapi_client=True):
"""Gets the ONTAP system version."""
return self.get_client(use_zapi=use_zapi_client).get_system_version()
def set_port(self, port):
"""Set the server communication port."""
self.zapi_client.set_port(port)
self.rest_client.set_port(port)
def get_port(self, use_zapi_client=True):
"""Get the server communication port."""
return self.get_client(use_zapi=use_zapi_client).get_port()
def set_timeout(self, seconds):
"""Sets the timeout in seconds."""
self.zapi_client.set_timeout(seconds)
self.rest_client.set_timeout(seconds)
def get_timeout(self, use_zapi_client=True):
"""Gets the timeout in seconds if set."""
return self.get_client(use_zapi=use_zapi_client).get_timeout()
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self.zapi_client.get_vfiler()
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self.zapi_client.set_vfiler(vfiler)
def get_vserver(self, use_zapi_client=True):
"""Get the vserver to use in tunneling."""
return self.get_client(use_zapi=use_zapi_client).get_vserver()
def set_vserver(self, vserver):
"""Set the vserver to use if tunneling gets enabled."""
self.zapi_client.set_vserver(vserver)
self.rest_client.set_vserver(vserver)
def set_username(self, username):
"""Set the user name for authentication."""
self.zapi_client.set_username(username)
self.rest_client.set_username(username)
def set_password(self, password):
"""Set the password for authentication."""
self.zapi_client.set_password(password)
self.rest_client.set_password(password)
def get_client(self, use_zapi=True):
"""Chooses the client to be used in the request."""
if use_zapi:
return self.zapi_client
return self.rest_client
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
return self.get_client(use_zapi=use_zapi).invoke_successfully(
na_element, api_args=api_args, enable_tunneling=enable_tunneling)
def __str__(self):
return "server: %s" % (self._host)

View File

@ -81,12 +81,13 @@ class NetAppBaseClient(object):
return string.split('}', 1)[1]
return string
def send_request(self, api_name, api_args=None, enable_tunneling=True):
def send_request(self, api_name, api_args=None, enable_tunneling=True,
use_zapi=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
return self.connection.invoke_successfully(
request, api_args=api_args, enable_tunneling=enable_tunneling,
use_zapi=use_zapi)
@na_utils.trace
def get_licenses(self):

View File

@ -74,6 +74,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
ontapi_1_120 = ontapi_version >= (1, 120)
ontapi_1_140 = ontapi_version >= (1, 140)
ontapi_1_150 = ontapi_version >= (1, 150)
ontap_9_10 = self.get_system_version()['version-tuple'] >= (9, 10, 0)
self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x)
@ -95,6 +96,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
supported=ontapi_1_150)
self.features.add_feature('LDAP_LDAP_SERVERS',
supported=ontapi_1_120)
self.features.add_feature('SVM_MIGRATE', supported=ontap_9_10)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
@ -1040,6 +1042,24 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
return interfaces
@na_utils.trace
def disable_network_interface(self, vserver_name, interface_name):
api_args = {
'administrative-status': 'down',
'interface-name': interface_name,
'vserver': vserver_name,
}
self.send_request('net-interface-modify', api_args)
@na_utils.trace
def delete_network_interface(self, vserver_name, interface_name):
self.disable_network_interface(vserver_name, interface_name)
api_args = {
'interface-name': interface_name,
'vserver': vserver_name
}
self.send_request('net-interface-delete', api_args)
@na_utils.trace
def get_ipspace_name_for_vlan_port(self, vlan_node, vlan_port, vlan_id):
"""Gets IPSpace name for specified VLAN"""
@ -3605,7 +3625,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
# NOTE(cknight): Cannot use deepcopy on the connection context
node_client = copy.copy(self)
node_client.connection = copy.copy(self.connection)
node_client.connection = copy.copy(self.connection.get_client())
node_client.connection.set_timeout(25)
try:
@ -5453,3 +5473,173 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise exception.NetAppException(msg)
return fpolicy_status
@na_utils.trace
def is_svm_migrate_supported(self):
"""Checks if the cluster supports SVM Migrate."""
return self.features.SVM_MIGRATE
# ------------------------ REST CALLS ONLY ------------------------
@na_utils.trace
def _format_request(self, request_data, headers={}, query={},
url_params={}):
"""Receives the request data and formats it into a request pattern.
:param request_data: the body to be sent to the request.
:param headers: additional headers to the request.
:param query: filters to the request.
:param url_params: parameters to be added to the request.
"""
request = {
"body": request_data,
"headers": headers,
"query": query,
"url_params": url_params
}
return request
@na_utils.trace
def svm_migration_start(
self, source_cluster_name, source_share_server_name,
dest_aggregates, dest_ipspace=None, check_only=False):
"""Send a request to start the SVM migration in the backend.
:param source_cluster_name: the name of the source cluster.
:param source_share_server_name: the name of the source server.
:param dest_aggregates: the aggregates where volumes will be placed in
the migration.
:param dest_ipspace: created IPspace for the migration.
:param check_only: If the call will only check the feasibility.
deleted after the cutover or not.
"""
request = {
"auto_cutover": False,
"auto_source_cleanup": True,
"check_only": check_only,
"source": {
"cluster": {"name": source_cluster_name},
"svm": {"name": source_share_server_name},
},
"destination": {
"volume_placement": {
"aggregates": dest_aggregates,
},
},
}
if dest_ipspace:
ipspace_data = {
"ipspace": {
"name": dest_ipspace,
}
}
request["destination"].update(ipspace_data)
api_args = self._format_request(request)
return self.send_request(
'svm-migration-start', api_args=api_args, use_zapi=False)
@na_utils.trace
def get_migration_check_job_state(self, job_id):
"""Get the job state of a share server migration.
:param job_id: id of the job to be searched.
"""
try:
job = self.get_job(job_id)
return job
except netapp_api.NaApiError as e:
if e.code == netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE:
msg = _(
'NFS v4.0 is not supported while migrating vservers.')
LOG.error(msg)
raise exception.NetAppException(message=e.message)
if e.code == netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER:
msg = _('Both source and destination clusters must be AFF '
'systems.')
LOG.error(msg)
raise exception.NetAppException(message=e.message)
msg = (_('Failed to check migration support. Reason: '
'%s' % e.message))
raise exception.NetAppException(msg)
@na_utils.trace
def svm_migrate_complete(self, migration_id):
"""Send a request to complete the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {
"action": "cutover"
}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(
request, url_params=url_params)
return self.send_request(
'svm-migration-complete', api_args=api_args, use_zapi=False)
@na_utils.trace
def svm_migrate_cancel(self, migration_id):
"""Send a request to cancel the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(request, url_params=url_params)
return self.send_request(
'svm-migration-cancel', api_args=api_args, use_zapi=False)
@na_utils.trace
def svm_migration_get(self, migration_id):
"""Send a request to get the progress of the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(request, url_params=url_params)
return self.send_request(
'svm-migration-get', api_args=api_args, use_zapi=False)
@na_utils.trace
def svm_migrate_pause(self, migration_id):
"""Send a request to pause a migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {
"action": "pause"
}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(
request, url_params=url_params)
return self.send_request(
'svm-migration-pause', api_args=api_args, use_zapi=False)
@na_utils.trace
def get_job(self, job_uuid):
"""Get a job in ONTAP.
:param job_uuid: uuid of the job to be searched.
"""
request = {}
url_params = {
"job_uuid": job_uuid
}
api_args = self._format_request(request, url_params=url_params)
return self.send_request(
'get-job', api_args=api_args, use_zapi=False)

View File

@ -0,0 +1,49 @@
# Copyright 2021 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ENDPOINT_MIGRATION_ACTIONS = 'svm/migrations/%(svm_migration_id)s'
ENDPOINT_MIGRATIONS = 'svm/migrations'
ENDPOINT_JOB_ACTIONS = 'cluster/jobs/%(job_uuid)s'
endpoints = {
'system-get-version': {
'method': 'get',
'url': 'cluster?fields=version',
},
'svm-migration-start': {
'method': 'post',
'url': ENDPOINT_MIGRATIONS
},
'svm-migration-complete': {
'method': 'patch',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'svm-migration-cancel': {
'method': 'delete',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'svm-migration-get': {
'method': 'get',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'get-job': {
'method': 'get',
'url': ENDPOINT_JOB_ACTIONS
},
'svm-migration-pause': {
'method': 'patch',
'url': ENDPOINT_MIGRATION_ACTIONS
},
}

View File

@ -85,6 +85,13 @@ def get_client_for_backend(backend_name, vserver_name=None):
return client
def get_client_for_host(host):
"""Returns a cluster client to the desired host."""
backend_name = share_utils.extract_host(host, level='backend_name')
client = get_client_for_backend(backend_name)
return client
class DataMotionSession(object):
def _get_backend_volume_name(self, config, share_obj):

View File

@ -304,7 +304,7 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
self.library.share_server_migration_start(
return self.library.share_server_migration_start(
context, src_share_server, dest_share_server, shares, snapshots)
def share_server_migration_continue(self, context, src_share_server,

View File

@ -1310,7 +1310,8 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _create_export(self, share, share_server, vserver, vserver_client,
clear_current_export_policy=True,
ensure_share_already_exists=False, replica=False):
ensure_share_already_exists=False, replica=False,
share_host=None):
"""Creates NAS storage."""
helper = self._get_helper(share)
helper.set_client(vserver_client)
@ -1325,9 +1326,11 @@ class NetAppCmodeFileStorageLibrary(object):
msg_args = {'vserver': vserver, 'proto': share['share_proto']}
raise exception.NetAppException(msg % msg_args)
host = share_host if share_host else share['host']
# Get LIF addresses with metadata
export_addresses = self._get_export_addresses_with_metadata(
share, share_server, interfaces)
share, share_server, interfaces, host)
# Create the share and get a callback for generating export locations
callback = helper.create_share(
@ -1355,11 +1358,11 @@ class NetAppCmodeFileStorageLibrary(object):
@na_utils.trace
def _get_export_addresses_with_metadata(self, share, share_server,
interfaces):
interfaces, share_host):
"""Return interface addresses with locality and other metadata."""
# Get home node so we can identify preferred paths
aggregate_name = share_utils.extract_host(share['host'], level='pool')
aggregate_name = share_utils.extract_host(share_host, level='pool')
home_node = self._get_aggregate_node(aggregate_name)
# Get admin LIF addresses so we can identify admin export locations

View File

@ -44,6 +44,8 @@ SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
SEGMENTED_NETWORK_TYPES = ('vlan',)
DEFAULT_MTU = 1500
CLUSTER_IPSPACES = ('Cluster', 'Default')
SERVER_MIGRATE_SVM_DR = 'svm_dr'
SERVER_MIGRATE_SVM_MIGRATE = 'svm_migrate'
class NetAppCmodeMultiSVMFileStorageLibrary(
@ -306,10 +308,12 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return 'ipspace_' + network_id.replace('-', '_')
@na_utils.trace
def _create_ipspace(self, network_info):
def _create_ipspace(self, network_info, client=None):
"""If supported, create an IPspace for a new Vserver."""
if not self._client.features.IPSPACES:
desired_client = client if client else self._client
if not desired_client.features.IPSPACES:
return None
if (network_info['network_allocations'][0]['network_type']
@ -324,7 +328,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return client_cmode.DEFAULT_IPSPACE
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
self._client.create_ipspace(ipspace_name)
desired_client.create_ipspace(ipspace_name)
return ipspace_name
@ -903,6 +907,213 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
manage_existing(share, driver_options,
share_server=share_server))
@na_utils.trace
def _check_compatibility_using_svm_dr(
self, src_client, dest_client, shares_request_spec, pools):
"""Send a request to pause a migration.
:param src_client: source cluster client.
:param dest_client: destination cluster client.
:param shares_request_spec: shares specifications.
:param pools: pools to be used during the migration.
:returns server migration mechanism name and compatibility result
example: (svm_dr, True).
"""
method = SERVER_MIGRATE_SVM_DR
if (not src_client.is_svm_dr_supported()
or not dest_client.is_svm_dr_supported()):
msg = _("Cannot perform server migration because at least one of "
"the backends doesn't support SVM DR.")
LOG.error(msg)
return method, False
# Check capacity.
server_total_size = (shares_request_spec.get('shares_size', 0) +
shares_request_spec.get('snapshots_size', 0))
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
# configured and greater than 1, we'll consider thin provisioning
# enable for all shares.
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
if self.configuration.netapp_server_migration_check_capacity is True:
if not self._check_capacity_compatibility(pools, thin_provisioning,
server_total_size):
msg = _("Cannot perform server migration because destination "
"host doesn't have enough free space.")
LOG.error(msg)
return method, False
return method, True
@na_utils.trace
def _get_job_uuid(self, job):
"""Get the uuid of a job."""
job = job.get("job", {})
return job.get("uuid")
@na_utils.trace
def _wait_for_operation_status(
self, operation_id, func_get_operation, desired_status='success',
timeout=None):
"""Waits until a given operation reachs the desired status.
:param operation_id: ID of the operation to be searched.
:param func_get_operation: Function to be used to get the operation
details.
:param desired_status: Operation expected status.
:param timeout: How long (in seconds) should the driver wait for the
status to be reached.
"""
if not timeout:
timeout = (
self.configuration.netapp_server_migration_state_change_timeout
)
interval = 10
retries = int(timeout / interval) or 1
@utils.retry(exception.ShareBackendException, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_status():
# Get the job based on its id.
operation = func_get_operation(operation_id)
status = operation.get("status") or operation.get("state")
if status != desired_status:
msg = _(
"Operation %(operation_id)s didn't reach status "
"%(desired_status)s. Current status is %(status)s.") % {
'operation_id': operation_id,
'desired_status': desired_status,
'status': status
}
LOG.debug(msg)
# Failed, no need to retry.
if status == 'error':
msg = _('Operation %(operation_id)s is in error status.'
'Reason: %(message)s')
raise exception.NetAppException(
msg % {'operation_id': operation_id,
'message': operation.get('message')})
# Didn't fail, so we can retry.
raise exception.ShareBackendException(msg)
elif status == desired_status:
msg = _(
'Operation %(operation_id)s reached status %(status)s.')
LOG.debug(
msg, {'operation_id': operation_id, 'status': status})
return
try:
wait_for_status()
except exception.NetAppException:
raise
except exception.ShareBackendException:
msg_args = {'operation_id': operation_id, 'status': desired_status}
msg = _('Timed out while waiting for operation %(operation_id)s '
'to reach status %(status)s') % msg_args
raise exception.NetAppException(msg)
@na_utils.trace
def _check_compatibility_for_svm_migrate(
self, source_cluster_name, source_share_server_name,
source_share_server, dest_aggregates, dest_client):
"""Checks if the migration can be performed using SVM Migrate.
1. Send the request to the backed to check if the migration is possible
2. Wait until the job finishes checking the migration status
"""
# Reuse network information from the source share server in the SVM
# Migrate if the there was no share network changes.
network_info = {
'network_allocations':
source_share_server['network_allocations'],
'neutron_subnet_id':
source_share_server['share_network_subnet'].get(
'neutron_subnet_id')
}
# 2. Create new ipspace, port and broadcast domain.
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['network_allocations'][0]['segmentation_id']
destination_ipspace = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(
network_info, client=dest_client)
self._create_port_and_broadcast_domain(
destination_ipspace, network_info)
def _cleanup_ipspace(ipspace):
try:
dest_client.delete_ipspace(ipspace)
except Exception:
LOG.info(
'Did not delete ipspace used to check the compatibility '
'for SVM Migrate. It is possible that it was reused and '
'there are other entities consuming it.')
# 1. Sends the request to the backend.
try:
job = dest_client.svm_migration_start(
source_cluster_name, source_share_server_name, dest_aggregates,
dest_ipspace=destination_ipspace, check_only=True)
except Exception:
LOG.error('Failed to check compatibility for migration.')
_cleanup_ipspace(destination_ipspace)
raise
job_id = self._get_job_uuid(job)
try:
# 2. Wait until the job to check the migration status concludes.
self._wait_for_operation_status(
job_id, dest_client.get_migration_check_job_state)
_cleanup_ipspace(destination_ipspace)
return True
except exception.NetAppException:
# Performed the check with the given parameters and the backend
# returned an error, so the migration is not compatible
_cleanup_ipspace(destination_ipspace)
return False
@na_utils.trace
def _check_for_migration_support(
self, src_client, dest_client, source_share_server,
shares_request_spec, src_cluster_name, pools):
"""Checks if the migration is supported and chooses the way to do it
In terms of performance, SVM Migrate is more adequate and it should
be prioritised over a SVM DR migration. If both source and destination
clusters do not support SVM Migrate, then SVM DR is the option to be
used.
1. Checks if both source and destination clients support SVM Migrate.
2. Requests the migration.
"""
# 1. Checks if both source and destination clients support SVM Migrate.
if (dest_client.is_svm_migrate_supported()
and src_client.is_svm_migrate_supported()):
source_share_server_name = self._get_vserver_name(
source_share_server['id'])
# Check if the migration is supported.
try:
result = self._check_compatibility_for_svm_migrate(
src_cluster_name, source_share_server_name,
source_share_server, self._find_matching_aggregates(),
dest_client)
return SERVER_MIGRATE_SVM_MIGRATE, result
except Exception:
LOG.error('Failed to check the compatibility for the share '
'server migration using SVM Migrate.')
return SERVER_MIGRATE_SVM_MIGRATE, False
# SVM Migrate is not supported, try to check the compatibility using
# SVM DR.
return self._check_compatibility_using_svm_dr(
src_client, dest_client, shares_request_spec, pools)
@na_utils.trace
def share_server_migration_check_compatibility(
self, context, source_share_server, dest_host, old_share_network,
@ -958,16 +1169,17 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
LOG.error(msg)
return not_compatible
# Check for SVM DR support
pools = self._get_pools()
# NOTE(dviroel): These clients can only be used for non-tunneling
# requests.
dst_client = data_motion.get_client_for_backend(dest_backend_name,
vserver_name=None)
if (not src_client.is_svm_dr_supported()
or not dst_client.is_svm_dr_supported()):
msg = _("Cannot perform server migration because at leat one of "
"the backends doesn't support SVM DR.")
LOG.error(msg)
migration_method, compatibility = self._check_for_migration_support(
src_client, dst_client, source_share_server, shares_request_spec,
src_cluster_name, pools)
if not compatibility:
return not_compatible
# Blocking different security services for now
@ -985,7 +1197,6 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
LOG.error(msg)
return not_compatible
pools = self._get_pools()
# Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support'
specs_to_validate = ('netapp_flexvol_encryption',
'revert_to_snapshot_support')
@ -1000,25 +1211,12 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return not_compatible
# TODO(dviroel): disk_type extra-spec
# Check capacity
server_total_size = (shares_request_spec.get('shares_size', 0) +
shares_request_spec.get('snapshots_size', 0))
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
# configured and greater than 1, we'll consider thin provisioning
# enable for all shares.
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
if self.configuration.netapp_server_migration_check_capacity is True:
if not self._check_capacity_compatibility(pools, thin_provisioning,
server_total_size):
msg = _("Cannot perform server migration because destination "
"host doesn't have enough free space.")
LOG.error(msg)
return not_compatible
nondisruptive = (migration_method == SERVER_MIGRATE_SVM_MIGRATE)
compatibility = {
'compatible': True,
'writable': True,
'nondisruptive': False,
'nondisruptive': nondisruptive,
'preserve_snapshots': True,
'share_network_id': new_share_network['id'],
'migration_cancel': True,
@ -1027,9 +1225,9 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return compatibility
def share_server_migration_start(self, context, source_share_server,
dest_share_server, share_intances,
snapshot_instances):
@na_utils.trace
def _migration_start_using_svm_dr(
self, source_share_server, dest_share_server):
"""Start share server migration using SVM DR.
1. Create vserver peering between source and destination
@ -1078,14 +1276,126 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
msg = _('Could not initialize SnapMirror between %(src)s and '
'%(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)
return None
@na_utils.trace
def _migration_start_using_svm_migrate(
self, context, source_share_server, dest_share_server, src_client,
dest_client):
"""Start share server migration using SVM Migrate.
1. Check if share network reusage is supported
2. Create a new ipspace, port and broadcast domain to the dest server
3. Send the request start the share server migration
4. Read the job id and get the id of the migration
5. Set the migration uuid in the backend details
"""
# 1. Check if share network reusage is supported
# NOTE(carloss): if share network was not changed, SVM migrate can
# reuse the network allocation from the source share server, so as
# Manila haven't made new allocations, we can just get allocation data
# from the source share server.
if not dest_share_server['network_allocations']:
share_server_to_get_network_info = source_share_server
else:
share_server_to_get_network_info = dest_share_server
# Reuse network information from the source share server in the SVM
# Migrate if the there was no share network changes.
network_info = {
'network_allocations':
share_server_to_get_network_info['network_allocations'],
'neutron_subnet_id':
share_server_to_get_network_info['share_network_subnet'].get(
'neutron_subnet_id')
}
# 2. Create new ipspace, port and broadcast domain.
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['network_allocations'][0]['segmentation_id']
destination_ipspace = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(
network_info, client=dest_client)
self._create_port_and_broadcast_domain(
destination_ipspace, network_info)
# Prepare the migration request.
src_cluster_name = src_client.get_cluster_name()
source_share_server_name = self._get_vserver_name(
source_share_server['id'])
# 3. Send the migration request to ONTAP.
try:
result = dest_client.svm_migration_start(
src_cluster_name, source_share_server_name,
self._find_matching_aggregates(),
dest_ipspace=destination_ipspace)
# 4. Read the job id and get the id of the migration.
result_job = result.get("job", {})
job_details = dest_client.get_job(result_job.get("uuid"))
job_description = job_details.get('description')
migration_uuid = job_description.split('/')[-1]
except Exception:
# As it failed, we must remove the ipspace, ports and broadcast
# domain.
dest_client.delete_ipspace(destination_ipspace)
msg = _("Unable to start the migration for share server %s."
% source_share_server['id'])
raise exception.NetAppException(msg)
# 5. Returns migration data to be saved as backend details.
server_info = {
"backend_details": {
na_utils.MIGRATION_OPERATION_ID_KEY: migration_uuid
}
}
return server_info
@na_utils.trace
def share_server_migration_start(
self, context, source_share_server, dest_share_server,
share_intances, snapshot_instances):
"""Start share server migration.
This method will choose the best migration strategy to perform the
migration, based on the storage functionalities support.
"""
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_client = data_motion.get_client_for_backend(
dest_backend_name, vserver_name=None)
__, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
use_svm_migrate = (
src_client.is_svm_migrate_supported()
and dest_client.is_svm_migrate_supported())
if use_svm_migrate:
result = self._migration_start_using_svm_migrate(
context, source_share_server, dest_share_server, src_client,
dest_client)
else:
result = self._migration_start_using_svm_dr(
source_share_server, dest_share_server)
msg_args = {
'src': source_share_server['id'],
'dest': dest_share_server['id'],
'migration_method': 'SVM Migrate' if use_svm_migrate else 'SVM DR'
}
msg = _('Starting share server migration from %(src)s to %(dest)s.')
msg = _('Starting share server migration from %(src)s to %(dest)s '
'using %(migration_method)s as migration method.')
LOG.info(msg, msg_args)
return result
def _get_snapmirror_svm(self, source_share_server, dest_share_server):
dm_session = data_motion.DataMotionSession()
try:
@ -1104,9 +1414,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return snapmirrors
@na_utils.trace
def share_server_migration_continue(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances):
def _share_server_migration_continue_svm_dr(
self, source_share_server, dest_share_server):
"""Continues a share server migration using SVM DR."""
snapmirrors = self._get_snapmirror_svm(source_share_server,
dest_share_server)
@ -1141,10 +1450,69 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return False
@na_utils.trace
def share_server_migration_complete(self, context, source_share_server,
def _share_server_migration_continue_svm_migrate(self, dest_share_server,
migration_id):
"""Continues the migration for a share server.
:param dest_share_server: reference for the destination share server.
:param migration_id: ID of the migration.
"""
dest_client = data_motion.get_client_for_host(
dest_share_server['host'])
try:
result = dest_client.svm_migration_get(migration_id)
except netapp_api.NaApiError as e:
msg = (_('Failed to continue the migration for share server '
'%(server_id)s. Reason: %(reason)s'
) % {'server_id': dest_share_server['id'],
'reason': e.message}
)
raise exception.NetAppException(message=msg)
return (
result.get("state") == na_utils.MIGRATION_STATE_READY_FOR_CUTOVER)
@na_utils.trace
def share_server_migration_continue(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances, new_network_alloc):
"""Completes share server migration using SVM DR.
snapshot_instances):
"""Continues the migration of a share server."""
# If the migration operation was started using SVM migrate, it
# returned a migration ID to get information about the job afterwards.
migration_id = self._get_share_server_migration_id(
dest_share_server)
# Checks the progress for a SVM migrate migration.
if migration_id:
return self._share_server_migration_continue_svm_migrate(
dest_share_server, migration_id)
# Checks the progress of a SVM DR Migration.
return self._share_server_migration_continue_svm_dr(
source_share_server, dest_share_server)
def _setup_networking_for_destination_vserver(
self, vserver_client, vserver_name, new_net_allocations):
ipspace_name = vserver_client.get_vserver_ipspace(vserver_name)
# NOTE(dviroel): Security service and NFS configuration should be
# handled by SVM DR, so no changes will be made here.
vlan = new_net_allocations['segmentation_id']
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_network_for_destination_vserver():
self._setup_network_for_vserver(
vserver_name, vserver_client, new_net_allocations,
ipspace_name,
enable_nfs=False,
security_services=None)
setup_network_for_destination_vserver()
@na_utils.trace
def _share_server_migration_complete_svm_dr(
self, source_share_server, dest_share_server, src_vserver,
src_client, share_instances, new_net_allocations):
"""Perform steps to complete the SVM DR migration.
1. Do a last SnapMirror update.
2. Quiesce, abort and then break the relationship.
@ -1152,9 +1520,12 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
4. Configure network interfaces in the destination vserver
5. Start the destinarion vserver
6. Delete and release the snapmirror
7. Build the list of export_locations for each share
8. Release all resources from the source share server
"""
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
dm_session = data_motion.DataMotionSession()
try:
# 1. Start an update to try to get a last minute transfer before we
@ -1165,15 +1536,6 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
# Ignore any errors since the current source may be unreachable
pass
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
try:
# 2. Attempt to quiesce, abort and then break SnapMirror
dm_session.quiesce_and_break_snapmirror_svm(source_share_server,
@ -1191,20 +1553,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
src_client.stop_vserver(src_vserver)
# 4. Setup network configuration
ipspace_name = dest_client.get_vserver_ipspace(dest_vserver)
# NOTE(dviroel): Security service and NFS configuration should be
# handled by SVM DR, so no changes will be made here.
vlan = new_network_alloc['segmentation_id']
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_network_for_destination_vserver():
self._setup_network_for_vserver(
dest_vserver, dest_client, new_network_alloc, ipspace_name,
enable_nfs=False,
security_services=None)
setup_network_for_destination_vserver()
self._setup_networking_for_destination_vserver(
dest_client, dest_vserver, new_net_allocations)
# 5. Start the destination.
dest_client.start_vserver(dest_vserver)
@ -1237,7 +1587,100 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
dm_session.delete_snapmirror_svm(source_share_server,
dest_share_server)
# 7. Build a dict with shares/snapshot location updates
@na_utils.trace
def _share_server_migration_complete_svm_migrate(
self, migration_id, dest_share_server):
"""Completes share server migration using SVM Migrate.
1. Call functions to conclude the migration for SVM Migrate
2. Waits until the job gets a success status
3. Wait until the migration cancellation reach the desired status
"""
dest_client = data_motion.get_client_for_host(
dest_share_server['host'])
try:
# Triggers the migration completion.
job = dest_client.svm_migrate_complete(migration_id)
job_id = self._get_job_uuid(job)
# Wait until the job is successful.
self._wait_for_operation_status(
job_id, dest_client.get_job)
# Wait until the migration is entirely finished.
self._wait_for_operation_status(
migration_id, dest_client.svm_migration_get,
desired_status=na_utils.MIGRATION_STATE_MIGRATE_COMPLETE)
except exception.NetAppException:
msg = _(
"Failed to complete the migration for "
"share server %s.") % dest_share_server['id']
raise exception.NetAppException(msg)
@na_utils.trace
def share_server_migration_complete(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances, new_network_alloc):
"""Completes share server migration.
1. Call functions to conclude the migration for SVM DR or SVM Migrate
2. Build the list of export_locations for each share
3. Release all resources from the source share server
"""
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
migration_id = self._get_share_server_migration_id(dest_share_server)
share_server_to_get_vserver_name_from = (
source_share_server if migration_id else dest_share_server)
dest_vserver, dest_client = self._get_vserver(
share_server=share_server_to_get_vserver_name_from,
backend_name=dest_backend_name)
server_backend_details = {}
# 1. Call functions to conclude the migration for SVM DR or SVM
# Migrate.
if migration_id:
self._share_server_migration_complete_svm_migrate(
migration_id, dest_share_server)
server_backend_details = source_share_server['backend_details']
# If there are new network allocations to be added, do so, and add
# them to the share server's backend details.
if dest_share_server['network_allocations']:
# Teardown the current network allocations
current_network_interfaces = (
dest_client.list_network_interfaces())
# Need a cluster client to be able to remove the current
# network interfaces
dest_cluster_client = data_motion.get_client_for_host(
dest_share_server['host'])
for interface_name in current_network_interfaces:
dest_cluster_client.delete_network_interface(
src_vserver, interface_name)
self._setup_networking_for_destination_vserver(
dest_client, src_vserver, new_network_alloc)
server_backend_details.pop('ports')
ports = {}
for allocation in dest_share_server['network_allocations']:
ports[allocation['id']] = allocation['ip_address']
server_backend_details['ports'] = jsonutils.dumps(ports)
else:
self._share_server_migration_complete_svm_dr(
source_share_server, dest_share_server, src_vserver,
src_client, share_instances, new_network_alloc)
# 2. Build a dict with shares/snapshot location updates.
# NOTE(dviroel): For SVM DR, the share names aren't modified, only the
# export_locations are updated due to network changes.
share_updates = {}
@ -1248,9 +1691,11 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
share_name = self._get_backend_share_name(instance['id'])
volume = dest_client.get_volume(share_name)
dest_aggregate = volume.get('aggregate')
# Update share attributes according with share extra specs
self._update_share_attributes_after_server_migration(
instance, src_client, dest_aggregate, dest_client)
if not migration_id:
# Update share attributes according with share extra specs.
self._update_share_attributes_after_server_migration(
instance, src_client, dest_aggregate, dest_client)
except Exception:
msg_args = {
@ -1262,36 +1707,58 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'in the destination vserver.') % msg_args
raise exception.NetAppException(message=msg)
new_share_data = {
'pool_name': volume.get('aggregate')
}
share_host = instance['host']
# If using SVM migrate, must already ensure the export policies
# using the new host information.
if migration_id:
old_aggregate = share_host.split('#')[1]
share_host = share_host.replace(
old_aggregate, dest_aggregate)
export_locations = self._create_export(
instance, dest_share_server, dest_vserver, dest_client,
clear_current_export_policy=False,
ensure_share_already_exists=True)
ensure_share_already_exists=True,
share_host=share_host)
new_share_data.update({'export_locations': export_locations})
share_updates.update({
instance['id']: {
'export_locations': export_locations,
'pool_name': volume.get('aggregate')
}})
share_updates.update({instance['id']: new_share_data})
# NOTE(dviroel): Nothing to update in snapshot instances since the
# provider location didn't change.
# 8. Release source share resources
for instance in share_instances:
self._delete_share(instance, src_vserver, src_client,
remove_export=True)
# NOTE(carloss): as SVM DR works like a replica, we must delete the
# source shares after the migration. In case of SVM Migrate, the shares
# were moved to the destination, so there's no need to remove them.
# Then, we need to delete the source server
if not migration_id:
# 3. Release source share resources.
for instance in share_instances:
self._delete_share(instance, src_vserver, src_client,
remove_export=True)
# NOTE(dviroel): source share server deletion must be triggered by
# the manager after finishing the migration
LOG.info('Share server migration completed.')
return {
'share_updates': share_updates,
'server_backend_details': server_backend_details
}
def share_server_migration_cancel(self, context, source_share_server,
dest_share_server, shares, snapshots):
"""Cancel a share server migration that is using SVM DR."""
@na_utils.trace
def _get_share_server_migration_id(self, dest_share_server):
return dest_share_server['backend_details'].get(
na_utils.MIGRATION_OPERATION_ID_KEY)
@na_utils.trace
def _migration_cancel_using_svm_dr(
self, source_share_server, dest_share_server, shares):
"""Cancel a share server migration that is using SVM DR."""
dm_session = data_motion.DataMotionSession()
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
@ -1318,6 +1785,73 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'and %(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)
@na_utils.trace
def _migration_cancel_using_svm_migrate(self, migration_id,
dest_share_server):
"""Cancel a share server migration that is using SVM migrate.
1. Gets information about the migration
2. Pauses the migration, as it can't be cancelled without pausing
3. Ask to ONTAP to actually cancel the migration
"""
# 1. Gets information about the migration.
dest_client = data_motion.get_client_for_host(
dest_share_server['host'])
migration_information = dest_client.svm_migration_get(migration_id)
# Gets the ipspace that was created so we can delete it if it's not
# being used anymore.
dest_ipspace_name = (
migration_information["destination"]["ipspace"]["name"])
# 2. Pauses the migration.
try:
# Request the migration to be paused and wait until the job is
# successful.
job = dest_client.svm_migrate_pause(migration_id)
job_id = self._get_job_uuid(job)
self._wait_for_operation_status(job_id, dest_client.get_job)
# Wait until the migration get actually paused.
self._wait_for_operation_status(
migration_id, dest_client.svm_migration_get,
desired_status=na_utils.MIGRATION_STATE_MIGRATE_PAUSED)
except exception.NetAppException:
msg = _("Failed to pause the share server migration.")
raise exception.NetAppException(message=msg)
try:
# 3. Ask to ONTAP to actually cancel the migration.
job = dest_client.svm_migrate_cancel(migration_id)
job_id = self._get_job_uuid(job)
self._wait_for_operation_status(
job_id, dest_client.get_job)
except exception.NetAppException:
msg = _("Failed to cancel the share server migration.")
raise exception.NetAppException(message=msg)
# If there is need to, remove the ipspace.
if (dest_ipspace_name and dest_ipspace_name not in CLUSTER_IPSPACES
and not dest_client.ipspace_has_data_vservers(
dest_ipspace_name)):
dest_client.delete_ipspace(dest_ipspace_name)
return
@na_utils.trace
def share_server_migration_cancel(self, context, source_share_server,
dest_share_server, shares, snapshots):
"""Send the request to cancel the SVM migration."""
migration_id = self._get_share_server_migration_id(dest_share_server)
if migration_id:
return self._migration_cancel_using_svm_migrate(
migration_id, dest_share_server)
self._migration_cancel_using_svm_dr(
source_share_server, dest_share_server, shares)
LOG.info('Share server migration was cancelled.')
def share_server_migration_get_progress(self, context, src_share_server,

View File

@ -36,6 +36,12 @@ VALID_TRACE_FLAGS = ['method', 'api']
TRACE_METHOD = False
TRACE_API = False
API_TRACE_PATTERN = '(.*)'
SVM_MIGRATE_POLICY_TYPE_NAME = 'migrate'
MIGRATION_OPERATION_ID_KEY = 'migration_operation_id'
MIGRATION_STATE_READY_FOR_CUTOVER = 'ready_for_cutover'
MIGRATION_STATE_READY_FOR_SOURCE_CLEANUP = 'ready_for_source_cleanup'
MIGRATION_STATE_MIGRATE_COMPLETE = 'migrate_complete'
MIGRATION_STATE_MIGRATE_PAUSED = 'migrate_paused'
def validate_driver_instantiation(**kwargs):

View File

@ -87,6 +87,7 @@ VSERVER_INFO = {
'state': VSERVER_STATE,
}
SNAPMIRROR_POLICY_NAME = 'fake_snapmirror_policy'
SNAPMIRROR_POLICY_TYPE = 'async_mirror'
USER_NAME = 'fake_user'
@ -2742,12 +2743,14 @@ SNAPMIRROR_POLICY_GET_ITER_RESPONSE = etree.XML("""
<attributes-list>
<snapmirror-policy-info>
<policy-name>%(policy_name)s</policy-name>
<type>%(policy_type)s</type>
<vserver-name>%(vserver_name)s</vserver-name>
</snapmirror-policy-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % {
'policy_name': SNAPMIRROR_POLICY_NAME,
'policy_type': SNAPMIRROR_POLICY_TYPE,
'vserver_name': VSERVER_NAME,
})
@ -2899,6 +2902,7 @@ FAKE_NA_ELEMENT = api.NaElement(etree.XML(FAKE_VOL_XML))
FAKE_INVOKE_DATA = 'somecontent'
FAKE_XML_STR = 'abc'
FAKE_REST_CALL_STR = 'def'
FAKE_API_NAME = 'volume-get-iter'
@ -2960,3 +2964,136 @@ FAKE_MANAGE_VOLUME = {
FAKE_KEY_MANAGER_ERROR = "The onboard key manager is not enabled. To enable \
it, run \"security key-manager setup\"."
FAKE_ACTION_URL = '/endpoint'
FAKE_BASE_URL = '10.0.0.3/api'
FAKE_HTTP_BODY = {'fake_key': 'fake_value'}
FAKE_HTTP_QUERY = {'type': 'fake_type'}
FAKE_HTTP_HEADER = {"fake_header_key": "fake_header_value"}
FAKE_URL_PARAMS = {"fake_url_key": "fake_url_value_to_be_concatenated"}
FAKE_MIGRATION_RESPONSE_WITH_JOB = {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"job": {
"start_time": "2021-08-27T19:23:41.691Z",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412",
"description": "Fake Job",
"state": "success",
"message": "Complete: Successful",
"end_time": "2021-08-27T19:23:41.691Z",
"code": "0"
}
}
FAKE_JOB_ID = FAKE_MIGRATION_RESPONSE_WITH_JOB['job']['uuid']
FAKE_MIGRATION_POST_ID = 'fake_migration_id'
FAKE_JOB_SUCCESS_STATE = {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"start_time": "2021-08-27T19:23:41.691Z",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412",
"description": "POST migrations/%s" % FAKE_MIGRATION_POST_ID,
"state": "success",
"message": "Complete: Successful",
"end_time": "2021-08-27T19:23:41.691Z",
"code": "0"
}
FAKE_MIGRATION_JOB_SUCCESS = {
"auto_cutover": True,
"auto_source_cleanup": True,
"current_operation": "none",
"cutover_complete_time": "2020-12-02T18:36:19-08:00",
"cutover_start_time": "2020-12-02T18:36:19-08:00",
"cutover_trigger_time": "2020-12-02T18:36:19-08:00",
"destination": {
"ipspace": {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "exchange",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"
},
"volume_placement": {
"aggregates": [
{
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "aggr1",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"
}
],
"volumes": [
{
"aggregate": {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "aggr1",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"
},
"volume": {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "this_volume",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"
}
}
]
}
},
"end_time": "2020-12-02T18:36:19-08:00",
"last_failed_state": "precheck_started",
"last_operation": "none",
"last_pause_time": "2020-12-02T18:36:19-08:00",
"last_resume_time": "2020-12-02T18:36:19-08:00",
"messages": [
{
"code": 852126,
"message": "SVM migrate cannot start since a volume move is "
"running.""Retry the command once volume move has "
"finished."
}
],
"point_of_no_return": True,
"restart_count": 0,
"source": {
"cluster": {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "cluster1",
"uuid": "1cd8a442-86d1-11e0-ae1c-123478563412"
},
"svm": {
"_links": {
"self": {
"href": "/api/resourcelink"
}
},
"name": "svm1",
"uuid": "02c9e252-41be-11e9-81d5-00a0986138f7"
}
},
"start_time": "2020-12-02T18:36:19-08:00",
"state": "migrate_complete",
"uuid": "4ea7a442-86d1-11e0-ae1c-123478563412"
}

View File

@ -19,6 +19,7 @@
Tests for NetApp API layer
"""
from oslo_serialization import jsonutils
from unittest import mock
import ddt
@ -26,6 +27,7 @@ import requests
from manila import exception
from manila.share.drivers.netapp.dataontap.client import api
from manila.share.drivers.netapp.dataontap.client import rest_endpoints
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ -174,11 +176,11 @@ class NetAppApiElementTransTests(test.TestCase):
@ddt.ddt
class NetAppApiServerTests(test.TestCase):
class NetAppApiServerZapiClientTests(test.TestCase):
"""Test case for NetApp API server methods"""
def setUp(self):
self.root = api.NaServer('127.0.0.1')
super(NetAppApiServerTests, self).setUp()
self.root = api.NaServer('127.0.0.1').zapi_client
super(NetAppApiServerZapiClientTests, self).setUp()
@ddt.data(None, fake.FAKE_XML_STR)
def test_invoke_elem_value_error(self, na_element):
@ -262,3 +264,201 @@ class NetAppApiServerTests(test.TestCase):
expected_log_count = 2 if log else 0
self.assertEqual(expected_log_count, api.LOG.debug.call_count)
@ddt.ddt
class NetAppApiServerRestClientTests(test.TestCase):
"""Test case for NetApp API Rest server methods"""
def setUp(self):
self.root = api.NaServer('127.0.0.1').rest_client
super(NetAppApiServerRestClientTests, self).setUp()
def test_invoke_elem_value_error(self):
"""Tests whether invalid NaElement parameter causes error"""
na_element = fake.FAKE_REST_CALL_STR
self.assertRaises(ValueError, self.root.invoke_elem, na_element)
def _setup_mocks_for_invoke_element(self, mock_post_action):
self.mock_object(api, 'LOG')
self.root._session = fake.FAKE_HTTP_SESSION
self.root._session.post = mock_post_action
self.mock_object(self.root, '_build_session')
self.mock_object(
self.root, '_get_request_info', mock.Mock(
return_value=(self.root._session.post, fake.FAKE_ACTION_URL)))
self.mock_object(
self.root, '_get_base_url',
mock.Mock(return_value=fake.FAKE_BASE_URL))
return fake.FAKE_BASE_URL
def test_invoke_elem_http_error(self):
"""Tests handling of HTTPError"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=requests.HTTPError()))
self.assertRaises(api.NaApiError, self.root.invoke_elem,
na_element)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
def test_invoke_elem_urlerror(self):
"""Tests handling of URLError"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=requests.URLRequired()))
self.assertRaises(exception.StorageCommunicationException,
self.root.invoke_elem,
na_element)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
def test_invoke_elem_unknown_exception(self):
"""Tests handling of Unknown Exception"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=Exception))
exception = self.assertRaises(api.NaApiError, self.root.invoke_elem,
na_element)
self.assertEqual('unknown', exception.code)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
@ddt.data(
{'trace_enabled': False,
'trace_pattern': '(.*)',
'log': False,
'query': None,
'body': fake.FAKE_HTTP_BODY
},
{'trace_enabled': True,
'trace_pattern': '(?!(volume)).*',
'log': False,
'query': None,
'body': fake.FAKE_HTTP_BODY
},
{'trace_enabled': True,
'trace_pattern': '(.*)',
'log': True,
'query': fake.FAKE_HTTP_QUERY,
'body': fake.FAKE_HTTP_BODY
},
{'trace_enabled': True,
'trace_pattern': '^volume-(info|get-iter)$',
'log': True,
'query': fake.FAKE_HTTP_QUERY,
'body': fake.FAKE_HTTP_BODY
}
)
@ddt.unpack
def test_invoke_elem_valid(self, trace_enabled, trace_pattern, log, query,
body):
"""Tests the method invoke_elem with valid parameters"""
self.root._session = fake.FAKE_HTTP_SESSION
response = mock.Mock()
response.content = 'fake_response'
self.root._session.post = mock.Mock(return_value=response)
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self.root._trace = trace_enabled
self.root._api_trace_pattern = trace_pattern
expected_url = fake.FAKE_BASE_URL + fake.FAKE_ACTION_URL
api_args = {
"body": body,
"query": query
}
self.mock_object(api, 'LOG')
mock_build_session = self.mock_object(self.root, '_build_session')
mock_get_req_info = self.mock_object(
self.root, '_get_request_info', mock.Mock(
return_value=(self.root._session.post, fake.FAKE_ACTION_URL)))
mock_add_query_params = self.mock_object(
self.root, '_add_query_params_to_url', mock.Mock(
return_value=fake.FAKE_ACTION_URL))
mock_get_base_url = self.mock_object(
self.root, '_get_base_url',
mock.Mock(return_value=fake.FAKE_BASE_URL))
mock_json_loads = self.mock_object(
jsonutils, 'loads', mock.Mock(return_value='fake_response'))
mock_json_dumps = self.mock_object(
jsonutils, 'dumps', mock.Mock(return_value=body))
result = self.root.invoke_elem(na_element, api_args=api_args)
self.assertEqual('fake_response', result)
expected_log_count = 2 if log else 0
self.assertEqual(expected_log_count, api.LOG.debug.call_count)
self.assertTrue(mock_build_session.called)
mock_get_req_info.assert_called_once_with(
element_name, self.root._session)
if query:
mock_add_query_params.assert_called_once_with(
fake.FAKE_ACTION_URL, query)
self.assertTrue(mock_get_base_url.called)
self.root._session.post.assert_called_once_with(
expected_url, data=body)
mock_json_loads.assert_called_once_with('fake_response')
mock_json_dumps.assert_called_once_with(body)
@ddt.data(
('svm-migration-start', rest_endpoints.ENDPOINT_MIGRATIONS, 'post'),
('svm-migration-complete', rest_endpoints.ENDPOINT_MIGRATION_ACTIONS,
'patch')
)
@ddt.unpack
def test__get_request_info(self, api_name, expected_url, expected_method):
self.root._session = fake.FAKE_HTTP_SESSION
for http_method in ['post', 'get', 'put', 'delete', 'patch']:
setattr(self.root._session, http_method, mock.Mock())
method, url = self.root._get_request_info(api_name, self.root._session)
self.assertEqual(method, getattr(self.root._session, expected_method))
self.assertEqual(expected_url, url)
@ddt.data(
{'is_ipv6': False, 'protocol': 'http', 'port': '80'},
{'is_ipv6': False, 'protocol': 'https', 'port': '443'},
{'is_ipv6': True, 'protocol': 'http', 'port': '80'},
{'is_ipv6': True, 'protocol': 'https', 'port': '443'})
@ddt.unpack
def test__get_base_url(self, is_ipv6, protocol, port):
self.root._host = '10.0.0.3' if not is_ipv6 else 'FF01::1'
self.root._protocol = protocol
self.root._port = port
host_formated_for_url = (
'[%s]' % self.root._host if is_ipv6 else self.root._host)
# example of the expected format: http://10.0.0.3:80/api/
expected_result = (
protocol + '://' + host_formated_for_url + ':' + port + '/api/')
base_url = self.root._get_base_url()
self.assertEqual(expected_result, base_url)
def test__add_query_params_to_url(self):
url = 'endpoint/to/get/data'
filters = "?"
for k, v in fake.FAKE_HTTP_QUERY.items():
filters += "%(key)s=%(value)s&" % {"key": k, "value": v}
expected_formated_url = url + filters
formatted_url = self.root._add_query_params_to_url(
url, fake.FAKE_HTTP_QUERY)
self.assertEqual(expected_formated_url, formatted_url)

View File

@ -42,6 +42,8 @@ class NetAppBaseClientTestCase(test.TestCase):
self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.connection.zapi_client = mock.Mock()
self.connection.rest_client = mock.Mock()
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE)
@ -97,16 +99,23 @@ class NetAppBaseClientTestCase(test.TestCase):
self.assertEqual('tag_name', result)
def test_send_request(self):
@ddt.data(True, False)
def test_send_request(self, use_zapi):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api')
self.client.send_request('fake-api', use_zapi=use_zapi)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
self.assertTrue(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
self.assertEqual(
use_zapi,
self.connection.invoke_successfully.call_args[1][
'use_zapi'])
def test_send_request_no_tunneling(self):
@ -117,20 +126,32 @@ class NetAppBaseClientTestCase(test.TestCase):
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertFalse(self.connection.invoke_successfully.call_args[0][1])
self.assertFalse(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
def test_send_request_with_args(self):
@ddt.data(True, False)
def test_send_request_with_args(self, use_zapi):
element = netapp_api.NaElement('fake-api')
api_args = {'arg1': 'data1', 'arg2': 'data2'}
element.translate_struct(api_args)
self.client.send_request('fake-api', api_args=api_args)
self.client.send_request('fake-api', api_args=api_args,
use_zapi=use_zapi)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
self.assertEqual(
api_args, self.connection.invoke_successfully.call_args[1][
'api_args'])
self.assertTrue(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
self.assertEqual(
use_zapi,
self.connection.invoke_successfully.call_args[1][
'use_zapi'])
def test_get_licenses(self):

View File

@ -1769,6 +1769,40 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.call('net-interface-get-iter', None)])
self.assertListEqual([], result)
def test_disable_network_interface(self):
interface_name = fake.NETWORK_INTERFACES[0]['interface_name']
vserver_name = fake.VSERVER_NAME
expected_api_args = {
'administrative-status': 'down',
'interface-name': interface_name,
'vserver': vserver_name,
}
self.mock_object(self.client, 'send_request')
self.client.disable_network_interface(vserver_name, interface_name)
self.client.send_request.assert_called_once_with(
'net-interface-modify', expected_api_args)
def test_delete_network_interface(self):
interface_name = fake.NETWORK_INTERFACES[0]['interface_name']
vserver_name = fake.VSERVER_NAME
expected_api_args = {
'interface-name': interface_name,
'vserver': vserver_name,
}
self.mock_object(self.client, 'disable_network_interface')
self.mock_object(self.client, 'send_request')
self.client.delete_network_interface(vserver_name, interface_name)
self.client.disable_network_interface.assert_called_once_with(
vserver_name, interface_name)
self.client.send_request.assert_called_once_with(
'net-interface-delete', expected_api_args)
def test_get_ipspaces(self):
self.client.features.add_feature('IPSPACES')
@ -7627,8 +7661,10 @@ class NetAppClientCmodeTestCase(test.TestCase):
fake.SNAPMIRROR_POLICY_GET_ITER_RESPONSE)
self.mock_object(self.client, 'send_iter_request',
mock.Mock(return_value=api_response))
result_elem = [fake.SNAPMIRROR_POLICY_NAME]
result = self.client.get_snapmirror_policies(fake.VSERVER_NAME)
result = self.client.get_snapmirror_policies(
fake.VSERVER_NAME)
expected_api_args = {
'query': {
@ -7645,7 +7681,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_iter_request.assert_called_once_with(
'snapmirror-policy-get-iter', expected_api_args)
self.assertEqual([fake.SNAPMIRROR_POLICY_NAME], result)
self.assertEqual(result_elem, result)
@ddt.data(True, False, None)
def test_start_vserver(self, force):
@ -8254,3 +8290,231 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.assertEqual(expected, result)
self.client.send_iter_request.assert_called_once_with(
'fpolicy-policy-status-get-iter', expected_args)
def test_is_svm_migrate_supported(self):
self.client.features.add_feature('SVM_MIGRATE')
result = self.client.is_svm_migrate_supported()
self.assertTrue(result)
@ddt.data(
{"body": fake.FAKE_HTTP_BODY,
"headers": fake.FAKE_HTTP_HEADER,
"query": {},
"url_params": fake.FAKE_URL_PARAMS
},
{"body": {},
"headers": fake.FAKE_HTTP_HEADER,
"query": fake.FAKE_HTTP_QUERY,
"url_params": fake.FAKE_URL_PARAMS
},
)
@ddt.unpack
def test__format_request(self, body, headers, query, url_params):
expected_result = {
"body": body,
"headers": headers,
"query": query,
"url_params": url_params
}
result = self.client._format_request(
body, headers=headers, query=query, url_params=url_params)
for k, v in expected_result.items():
self.assertIn(k, result)
self.assertEqual(result.get(k), v)
@ddt.data(
{"dest_ipspace": None, "check_only": True},
{"dest_ipspace": "fake_dest_ipspace", "check_only": False},
)
@ddt.unpack
def test_svm_migration_start(self, dest_ipspace, check_only):
api_args = {
"auto_cutover": False,
"auto_source_cleanup": True,
"check_only": check_only,
"source": {
"cluster": {"name": fake.CLUSTER_NAME},
"svm": {"name": fake.VSERVER_NAME},
},
"destination": {
"volume_placement": {
"aggregates": [fake.SHARE_AGGREGATE_NAME],
},
},
}
if dest_ipspace:
ipspace_data = {
"ipspace": {"name": dest_ipspace}
}
api_args['destination'].update(ipspace_data)
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=api_args))
self.mock_object(
self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB))
result = self.client.svm_migration_start(
fake.CLUSTER_NAME, fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME],
dest_ipspace=dest_ipspace, check_only=check_only)
self.client._format_request.assert_called_once_with(api_args)
self.client.send_request.assert_called_once_with(
'svm-migration-start', api_args=api_args, use_zapi=False)
self.assertEqual(result, fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)
@ddt.data({"check_only": False}, {"check_only": True})
def test_share_server_migration_start_failed(self, check_only):
api_args = {}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=api_args))
self.mock_object(
self.client, 'send_request',
mock.Mock(side_effect=netapp_api.NaApiError(message='fake')))
self.assertRaises(
netapp_api.NaApiError,
self.client.svm_migration_start,
fake.CLUSTER_NAME, fake.VSERVER_NAME,
[fake.SHARE_AGGREGATE_NAME],
check_only=check_only
)
def test_svm_migrate_complete(self):
migration_id = 'ongoing_migration_id'
request = {
'action': 'cutover'
}
expected_url_params = {
'svm_migration_id': migration_id
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(
self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB))
self.client.svm_migrate_complete(migration_id)
self.client._format_request.assert_called_once_with(
request, url_params=expected_url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-complete', api_args=request, use_zapi=False)
def test_get_job(self):
request = {}
job_uuid = 'fake_job_uuid'
url_params = {
'job_uuid': job_uuid
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_JOB_SUCCESS_STATE))
result = self.client.get_job(job_uuid)
self.assertEqual(fake.FAKE_JOB_SUCCESS_STATE, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'get-job', api_args=request, use_zapi=False)
def test_svm_migrate_cancel(self):
request = {}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(
self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB))
result = self.client.svm_migrate_cancel(migration_id)
self.assertEqual(fake.FAKE_MIGRATION_RESPONSE_WITH_JOB, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-cancel', api_args=request, use_zapi=False)
def test_svm_migration_get(self):
request = {}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(
self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_MIGRATION_JOB_SUCCESS))
result = self.client.svm_migration_get(migration_id)
self.assertEqual(fake.FAKE_MIGRATION_JOB_SUCCESS, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-get', api_args=request, use_zapi=False)
def test_svm_migrate_pause(self):
request = {
"action": "pause"
}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(
self.client, 'send_request',
mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB))
result = self.client.svm_migrate_pause(migration_id)
self.assertEqual(fake.FAKE_MIGRATION_RESPONSE_WITH_JOB, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-pause', api_args=request, use_zapi=False)
def test_migration_check_job_state(self):
self.mock_object(self.client, 'get_job',
mock.Mock(return_value=fake.FAKE_JOB_SUCCESS_STATE))
result = self.client.get_migration_check_job_state(
fake.FAKE_JOB_ID
)
self.assertEqual(result, fake.FAKE_JOB_SUCCESS_STATE)
self.client.get_job.assert_called_once_with(fake.FAKE_JOB_ID)
@ddt.data(netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE,
netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER)
def test_migration_check_job_state_failed(self, error_code):
self.mock_object(
self.client, 'get_job',
mock.Mock(side_effect=netapp_api.NaApiError(code=error_code)))
self.assertRaises(
exception.NetAppException,
self.client.get_migration_check_job_state,
fake.FAKE_JOB_ID
)
self.client.get_job.assert_called_once_with(fake.FAKE_JOB_ID)

View File

@ -26,6 +26,7 @@ from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp import options as na_opts
from manila.share import utils as share_utils
from manila import test
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
from manila.tests.share.drivers.netapp import fakes as na_fakes
@ -93,6 +94,22 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
ssl_cert_path='/etc/ssl/certs', trace=mock.ANY,
vserver='fake_vserver')
def test_get_client_for_host(self):
mock_extract_host = self.mock_object(
share_utils, 'extract_host',
mock.Mock(return_value=fake.BACKEND_NAME))
mock_get_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.mock_cmode_client))
returned_client = data_motion.get_client_for_host(
fake.HOST_NAME)
mock_extract_host.assert_called_once_with(
fake.HOST_NAME, level='backend_name')
mock_get_client.assert_called_once_with(fake.BACKEND_NAME)
self.assertEqual(returned_client, self.mock_cmode_client)
def test_get_config_for_backend(self):
self.mock_object(data_motion, "CONF")
CONF.set_override("netapp_vserver", 'fake_vserver',

View File

@ -1917,12 +1917,14 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
vserver_client.offline_volume.assert_called_with(fake.SHARE_NAME)
vserver_client.delete_volume.assert_called_with(fake.SHARE_NAME)
def test_create_export(self):
@ddt.data(None, fake.MANILA_HOST_NAME_2)
def test_create_export(self, share_host):
protocol_helper = mock.Mock()
callback = (lambda export_address, export_path='fake_export_path':
':'.join([export_address, export_path]))
protocol_helper.create_share.return_value = callback
expected_host = share_host if share_host else fake.SHARE['host']
self.mock_object(self.library,
'_get_helper',
mock.Mock(return_value=protocol_helper))
@ -1937,11 +1939,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
result = self.library._create_export(fake.SHARE,
fake.SHARE_SERVER,
fake.VSERVER1,
vserver_client)
vserver_client,
share_host=share_host)
self.assertEqual(fake.NFS_EXPORTS, result)
mock_get_export_addresses_with_metadata.assert_called_once_with(
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
fake.SHARE, fake.SHARE_SERVER, fake.LIFS, expected_host)
protocol_helper.create_share.assert_called_once_with(
fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True,
ensure_share_already_exists=False, replica=False)
@ -1969,7 +1972,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=[fake.LIF_ADDRESSES[1]]))
result = self.library._get_export_addresses_with_metadata(
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
fake.SHARE, fake.SHARE_SERVER, fake.LIFS, fake.SHARE['host'])
self.assertEqual(fake.INTERFACE_ADDRESSES_WITH_METADATA, result)
mock_get_aggregate_node.assert_called_once_with(fake.POOL_NAME)
@ -1986,7 +1989,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=[fake.LIF_ADDRESSES[1]]))
result = self.library._get_export_addresses_with_metadata(
fake.SHARE, fake.SHARE_SERVER, fake.LIFS)
fake.SHARE, fake.SHARE_SERVER, fake.LIFS, fake.SHARE['host'])
expected = copy.deepcopy(fake.INTERFACE_ADDRESSES_WITH_METADATA)
for key, value in expected.items():

View File

@ -520,6 +520,10 @@ SHARE_SERVER = {
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
'host': SERVER_HOST,
'share_network_subnet': {
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id'
}
}
SHARE_SERVER_2 = {
@ -531,6 +535,10 @@ SHARE_SERVER_2 = {
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
'host': SERVER_HOST_2,
'share_network_subnet': {
'neutron_net_id': 'fake_neutron_net_id_2',
'neutron_subnet_id': 'fake_neutron_subnet_id_2'
}
}
VSERVER_INFO = {

View File

@ -0,0 +1,5 @@
---
features:
- |
The NetApp ONTAP driver now supports nondisruptive share server migration
for clusters with version >= 9.10.