[NetApp] Share server migration through SVM migrate

Implements share server migration using a proper mechanism
provided by ONTAP. In case the driver identifies that the ONTAP
version matches the version where this mechanism is available,
ONTAP will automatically choose to use this instead of SVM DR.

- Implemented new methods for migrating a share server using a
proper mechanism provided by ONTAP.

- Add a new flag to the driver called `cleanup_source_server`,
which allows admins to specify whether the source share server
should be immediately cleaned up after the migration cutover.

- The NetApp now does not need to create an actual share server in
the backend prior to the migration, in case SVM Migrate is being
used.

- The NetApp ONTAP driver can now reuse network allocations from
the source share server in case a share network change wasn't
identified.

Change-Id: Idf1581d933d11280287f6801fd4aa886a627f66f
Depends-On: I48bafd92fe7a4d4ae0bafd5bf1961dace56b6005
This commit is contained in:
silvacarloss 2021-05-13 11:41:49 -03:00 committed by Carlos Eduardo
parent ca9213ef40
commit 9e3af2e074
14 changed files with 2253 additions and 334 deletions

View File

@ -23,12 +23,14 @@ import re
from lxml import etree
from oslo_log import log
from oslo_serialization import jsonutils
import requests
from requests import auth
import six
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import rest_endpoints
from manila.share.drivers.netapp import utils
LOG = log.getLogger(__name__)
@ -62,28 +64,23 @@ EPOLICYNOTFOUND = '18251'
EEVENTNOTFOUND = '18253'
ESCOPENOTFOUND = '18259'
ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS = '18815'
ENFS_V4_0_ENABLED_MIGRATION_FAILURE = '13172940'
EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER = '13172984'
STYLE_LOGIN_PASSWORD = 'basic_auth'
TRANSPORT_TYPE_HTTP = 'http'
TRANSPORT_TYPE_HTTPS = 'https'
STYLE_CERTIFICATE = 'certificate_auth'
class NaServer(object):
class BaseClient(object):
"""Encapsulates server connection logic."""
TRANSPORT_TYPE_HTTP = 'http'
TRANSPORT_TYPE_HTTPS = 'https'
SERVER_TYPE_FILER = 'filer'
SERVER_TYPE_DFM = 'dfm'
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
URL_DFM = 'apis/XMLrequest'
NETAPP_NS = 'http://www.netapp.com/filer/admin'
STYLE_LOGIN_PASSWORD = 'basic_auth'
STYLE_CERTIFICATE = 'certificate_auth'
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, style=None,
ssl_cert_path=None, username=None, password=None, port=None,
trace=False, api_trace_pattern=None):
super(BaseClient, self).__init__()
self._host = host
self.set_server_type(server_type)
self.set_transport_type(transport_type)
self.set_style(style)
if port:
@ -99,9 +96,21 @@ class NaServer(object):
# Note(felipe_rodrigues): it will verify with the mozila CA roots,
# given by certifi package.
self._ssl_verify = True
LOG.debug('Using NetApp controller: %s', self._host)
def get_style(self):
"""Get the authorization style for communicating with the server."""
return self._auth_style
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
if style.lower() not in (STYLE_LOGIN_PASSWORD, STYLE_CERTIFICATE):
raise ValueError('Unsupported authentication style')
self._auth_style = style.lower()
def get_transport_type(self):
"""Get the transport type protocol."""
return self._protocol
@ -112,38 +121,12 @@ class NaServer(object):
Supports http and https transport types.
"""
if transport_type.lower() not in (
NaServer.TRANSPORT_TYPE_HTTP,
NaServer.TRANSPORT_TYPE_HTTPS):
TRANSPORT_TYPE_HTTP, TRANSPORT_TYPE_HTTPS):
raise ValueError('Unsupported transport type')
self._protocol = transport_type.lower()
if self._protocol == NaServer.TRANSPORT_TYPE_HTTP:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(80)
else:
self.set_port(8088)
else:
if self._server_type == NaServer.SERVER_TYPE_FILER:
self.set_port(443)
else:
self.set_port(8488)
self._refresh_conn = True
def get_style(self):
"""Get the authorization style for communicating with the server."""
return self._auth_style
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD,
NaServer.STYLE_CERTIFICATE):
raise ValueError('Unsupported authentication style')
self._auth_style = style.lower()
def get_server_type(self):
"""Get the target server type."""
return self._server_type
def set_server_type(self, server_type):
@ -151,16 +134,7 @@ class NaServer(object):
Supports filer and dfm server types.
"""
if server_type.lower() not in (NaServer.SERVER_TYPE_FILER,
NaServer.SERVER_TYPE_DFM):
raise ValueError('Unsupported server type')
self._server_type = server_type.lower()
if self._server_type == NaServer.SERVER_TYPE_FILER:
self._url = NaServer.URL_FILER
else:
self._url = NaServer.URL_DFM
self._ns = NaServer.NETAPP_NS
self._refresh_conn = True
raise NotImplementedError()
def set_api_version(self, major, minor):
"""Set the API version."""
@ -216,14 +190,6 @@ class NaServer(object):
return self._timeout
return None
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self._vfiler
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self._vfiler = vfiler
def get_vserver(self):
"""Get the vserver to use in tunneling."""
return self._vserver
@ -242,10 +208,104 @@ class NaServer(object):
self._password = password
self._refresh_conn = True
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
pass
def _build_session(self):
if self._auth_style == STYLE_LOGIN_PASSWORD:
auth_handler = self._create_basic_auth_handler()
else:
auth_handler = self._create_certificate_auth_handler()
self._session = requests.Session()
self._session.auth = auth_handler
self._session.verify = self._ssl_verify
headers = self._build_headers()
self._session.headers = headers
def _build_headers(self):
raise NotImplementedError()
def _create_basic_auth_handler(self):
return auth.HTTPBasicAuth(self._username, self._password)
def _create_certificate_auth_handler(self):
raise NotImplementedError()
def __str__(self):
return "server: %s" % (self._host)
class ZapiClient(BaseClient):
SERVER_TYPE_FILER = 'filer'
SERVER_TYPE_DFM = 'dfm'
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
URL_DFM = 'apis/XMLrequest'
NETAPP_NS = 'http://www.netapp.com/filer/admin'
def __init__(self, host, server_type=SERVER_TYPE_FILER,
transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
super(ZapiClient, self).__init__(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self.set_server_type(server_type)
self._set_port()
def _set_port(self):
if self._protocol == TRANSPORT_TYPE_HTTP:
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self.set_port(80)
else:
self.set_port(8088)
else:
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self.set_port(443)
else:
self.set_port(8488)
def set_server_type(self, server_type):
"""Set the target server type.
Supports filer and dfm server types.
"""
if server_type.lower() not in (ZapiClient.SERVER_TYPE_FILER,
ZapiClient.SERVER_TYPE_DFM):
raise ValueError('Unsupported server type')
self._server_type = server_type.lower()
if self._server_type == ZapiClient.SERVER_TYPE_FILER:
self._url = ZapiClient.URL_FILER
else:
self._url = ZapiClient.URL_DFM
self._ns = ZapiClient.NETAPP_NS
self._refresh_conn = True
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self._vfiler
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self._vfiler = vfiler
def invoke_elem(self, na_element, enable_tunneling=False):
"""Invoke the API on the server."""
if na_element and not isinstance(na_element, NaElement):
ValueError('NaElement must be supplied to invoke API')
request_element = self._create_request(na_element, enable_tunneling)
request_d = request_element.to_string()
@ -282,7 +342,8 @@ class NaServer(object):
return response_element
def invoke_successfully(self, na_element, enable_tunneling=False):
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
@ -290,7 +351,12 @@ class NaServer(object):
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
result = self.invoke_elem(na_element, enable_tunneling)
if api_args:
na_element.translate_struct(api_args)
result = self.invoke_elem(
na_element, enable_tunneling=enable_tunneling)
if result.has_attr('status') and result.get_attr('status') == 'passed':
return result
code = (result.get_attr('errno')
@ -336,7 +402,8 @@ class NaServer(object):
raise ValueError('ontapi version has to be atleast 1.15'
' to send request to vserver')
def _parse_response(self, response):
@staticmethod
def _parse_response(response):
"""Get the NaElement for the response."""
if not response:
raise NaApiError('No response received')
@ -354,23 +421,277 @@ class NaServer(object):
host = '[%s]' % host
return '%s://%s:%s/%s' % (self._protocol, host, self._port, self._url)
def _build_session(self):
if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD:
auth_handler = self._create_basic_auth_handler()
def _build_headers(self):
return {'Content-Type': 'text/xml'}
class RestClient(BaseClient):
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
super(RestClient, self).__init__(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self._set_port()
def _set_port(self):
if self._protocol == TRANSPORT_TYPE_HTTP:
self.set_port(80)
else:
auth_handler = self._create_certificate_auth_handler()
self.set_port(443)
self._session = requests.Session()
self._session.auth = auth_handler
self._session.verify = self._ssl_verify
self._session.headers = {
'Content-Type': 'text/xml', 'charset': 'utf-8'}
def _get_request_info(self, api_name, session):
"""Returns the request method and url to be used in the REST call."""
def _create_basic_auth_handler(self):
return auth.HTTPBasicAuth(self._username, self._password)
request_methods = {
'post': session.post,
'get': session.get,
'put': session.put,
'delete': session.delete,
'patch': session.patch,
}
rest_call = rest_endpoints.endpoints.get(api_name)
return request_methods[rest_call['method']], rest_call['url']
def _create_certificate_auth_handler(self):
raise NotImplementedError()
def _add_query_params_to_url(self, url, query):
"""Populates the URL with specified filters."""
filters = ""
for k, v in query.items():
filters += "%(key)s=%(value)s&" % {"key": k, "value": v}
url += "?" + filters
return url
def invoke_elem(self, na_element, api_args=None):
"""Invoke the API on the server."""
if na_element and not isinstance(na_element, NaElement):
raise ValueError('NaElement must be supplied to invoke API')
api_name = na_element.get_name()
api_name_matches_regex = (re.match(self._api_trace_pattern, api_name)
is not None)
data = api_args.get("body") if api_args else {}
if (not hasattr(self, '_session') or not self._session
or self._refresh_conn):
self._build_session()
request_method, action_url = self._get_request_info(
api_name, self._session)
url_params = api_args.get("url_params") if api_args else None
if url_params:
action_url = action_url % url_params
if self._trace and api_name_matches_regex:
message = ("Request: %(method)s %(url)s. Request body "
"%(body)s") % {
"method": request_method,
"url": action_url,
"body": data
}
LOG.debug(message)
query = api_args.get("query") if api_args else None
if query:
action_url = self._add_query_params_to_url(
action_url, api_args['query'])
url = self._get_base_url() + action_url
data = jsonutils.dumps(data) if data else data
try:
if hasattr(self, '_timeout'):
response = request_method(
url, data=data, timeout=self._timeout)
else:
response = request_method(url, data=data)
except requests.HTTPError as e:
raise NaApiError(e.errno, e.strerror)
except requests.URLRequired as e:
raise exception.StorageCommunicationException(six.text_type(e))
except Exception as e:
raise NaApiError(message=e)
response = (
jsonutils.loads(response.content) if response.content else None)
if self._trace and api_name_matches_regex:
LOG.debug("Response: %s", response)
return response
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=False):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
result = self.invoke_elem(na_element, api_args=api_args)
if not result.get('error'):
return result
result_error = result.get('error')
code = (result_error.get('code')
or 'ESTATUSFAILED')
if code == ESIS_CLONE_NOT_LICENSED:
msg = 'Clone operation failed: FlexClone not licensed.'
else:
msg = (result_error.get('message')
or 'Execution status is failed due to unknown reason')
raise NaApiError(code, msg)
def _get_base_url(self):
host = self._host
if ':' in host:
host = '[%s]' % host
return '%s://%s:%s/api/' % (self._protocol, host, self._port)
def _build_headers(self):
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
return headers
class NaServer(object):
"""Encapsulates server connection logic."""
def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP,
style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None,
password=None, port=None, trace=False,
api_trace_pattern=utils.API_TRACE_PATTERN):
self.zapi_client = ZapiClient(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern)
self.rest_client = RestClient(
host, transport_type=transport_type, style=style,
ssl_cert_path=ssl_cert_path, username=username, password=password,
port=port, trace=trace, api_trace_pattern=api_trace_pattern
)
self._host = host
LOG.debug('Using NetApp controller: %s', self._host)
def get_transport_type(self, use_zapi_client=True):
"""Get the transport type protocol."""
return self.get_client(use_zapi=use_zapi_client).get_transport_type()
def set_transport_type(self, transport_type):
"""Set the transport type protocol for API.
Supports http and https transport types.
"""
self.zapi_client.set_transport_type(transport_type)
self.rest_client.set_transport_type(transport_type)
def get_style(self, use_zapi_client=True):
"""Get the authorization style for communicating with the server."""
return self.get_client(use_zapi=use_zapi_client).get_style()
def set_style(self, style):
"""Set the authorization style for communicating with the server.
Supports basic_auth for now. Certificate_auth mode to be done.
"""
self.zapi_client.set_style(style)
self.rest_client.set_style(style)
def get_server_type(self, use_zapi_client=True):
"""Get the target server type."""
return self.get_client(use_zapi=use_zapi_client).get_server_type()
def set_server_type(self, server_type):
"""Set the target server type.
Supports filer and dfm server types.
"""
self.zapi_client.set_server_type(server_type)
self.rest_client.set_server_type(server_type)
def set_api_version(self, major, minor):
"""Set the API version."""
self.zapi_client.set_api_version(major, minor)
self.rest_client.set_api_version(1, 0)
def set_system_version(self, system_version):
"""Set the ONTAP system version."""
self.zapi_client.set_system_version(system_version)
self.rest_client.set_system_version(system_version)
def get_api_version(self, use_zapi_client=True):
"""Gets the API version tuple."""
return self.get_client(use_zapi=use_zapi_client).get_api_version()
def get_system_version(self, use_zapi_client=True):
"""Gets the ONTAP system version."""
return self.get_client(use_zapi=use_zapi_client).get_system_version()
def set_port(self, port):
"""Set the server communication port."""
self.zapi_client.set_port(port)
self.rest_client.set_port(port)
def get_port(self, use_zapi_client=True):
"""Get the server communication port."""
return self.get_client(use_zapi=use_zapi_client).get_port()
def set_timeout(self, seconds):
"""Sets the timeout in seconds."""
self.zapi_client.set_timeout(seconds)
self.rest_client.set_timeout(seconds)
def get_timeout(self, use_zapi_client=True):
"""Gets the timeout in seconds if set."""
return self.get_client(use_zapi=use_zapi_client).get_timeout()
def get_vfiler(self):
"""Get the vfiler to use in tunneling."""
return self.zapi_client.get_vfiler()
def set_vfiler(self, vfiler):
"""Set the vfiler to use if tunneling gets enabled."""
self.zapi_client.set_vfiler(vfiler)
def get_vserver(self, use_zapi_client=True):
"""Get the vserver to use in tunneling."""
return self.get_client(use_zapi=use_zapi_client).get_vserver()
def set_vserver(self, vserver):
"""Set the vserver to use if tunneling gets enabled."""
self.zapi_client.set_vserver(vserver)
self.rest_client.set_vserver(vserver)
def set_username(self, username):
"""Set the user name for authentication."""
self.zapi_client.set_username(username)
self.rest_client.set_username(username)
def set_password(self, password):
"""Set the password for authentication."""
self.zapi_client.set_password(password)
self.rest_client.set_password(password)
def get_client(self, use_zapi=True):
"""Chooses the client to be used in the request."""
if use_zapi:
return self.zapi_client
return self.rest_client
def invoke_successfully(self, na_element, api_args=None,
enable_tunneling=False, use_zapi=True):
"""Invokes API and checks execution status as success.
Need to set enable_tunneling to True explicitly to achieve it.
This helps to use same connection instance to enable or disable
tunneling. The vserver or vfiler should be set before this call
otherwise tunneling remains disabled.
"""
return self.get_client(use_zapi=use_zapi).invoke_successfully(
na_element, api_args=api_args, enable_tunneling=enable_tunneling)
def __str__(self):
return "server: %s" % (self._host)

View File

@ -81,12 +81,13 @@ class NetAppBaseClient(object):
return string.split('}', 1)[1]
return string
def send_request(self, api_name, api_args=None, enable_tunneling=True):
def send_request(self, api_name, api_args=None, enable_tunneling=True,
use_zapi=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
return self.connection.invoke_successfully(
request, api_args=api_args, enable_tunneling=enable_tunneling,
use_zapi=use_zapi)
@na_utils.trace
def get_licenses(self):

View File

@ -74,6 +74,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
ontapi_1_120 = ontapi_version >= (1, 120)
ontapi_1_140 = ontapi_version >= (1, 140)
ontapi_1_150 = ontapi_version >= (1, 150)
ontap_9_10 = self.get_system_version()['version-tuple'] >= (9, 10, 0)
self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x)
@ -95,6 +96,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
supported=ontapi_1_150)
self.features.add_feature('LDAP_LDAP_SERVERS',
supported=ontapi_1_120)
self.features.add_feature('SVM_MIGRATE', supported=ontap_9_10)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
@ -2853,6 +2855,15 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
@na_utils.trace
def get_volume_junction_path(self, volume_name, is_style_cifs=False):
"""Gets a volume junction path."""
# api_args = {
# 'volume': volume_name,
# 'is-style-cifs': six.text_type(is_style_cifs).lower(),
# }
# result = self.send_request('volume-get-volume-path', api_args)
# volume = self.get_volume(volume_name)
# return volume['junction-path']
# NOTE(carloss): UNDO THIS AFTER TESTS WITH NEW ONTAP VERSION!
api_args = {
'volume': volume_name,
'is-style-cifs': six.text_type(is_style_cifs).lower(),
@ -3598,7 +3609,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
# NOTE(cknight): Cannot use deepcopy on the connection context
node_client = copy.copy(self)
node_client.connection = copy.copy(self.connection)
node_client.connection = copy.copy(self.connection.get_client())
node_client.connection.set_timeout(25)
try:
@ -4637,7 +4648,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise
@na_utils.trace
def get_snapmirror_policies(self, vserver_name):
def get_snapmirror_policies(self, vserver_name,
desired_attribute='policy-name'):
"""Get all SnapMirror policies associated to a vServer."""
api_args = {
@ -4649,6 +4661,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
'desired-attributes': {
'snapmirror-policy-info': {
'policy-name': None,
'type': None
},
},
}
@ -4656,7 +4669,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [policy_info.get_child_content('policy-name')
return [policy_info.get_child_content(desired_attribute)
for policy_info in attributes_list.get_children()]
@na_utils.trace
@ -5433,3 +5446,178 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
raise exception.NetAppException(msg)
return fpolicy_status
@na_utils.trace
def is_svm_migrate_supported(self):
"""Checks if the cluster supports SVM Migrate."""
return self.features.SVM_MIGRATE
# ------------------------ REST CALLS ONLY ------------------------
@na_utils.trace
def _format_request(self, request_data, headers={}, query={},
url_params={}):
"""Receives the request data and formats it into a request pattern.
:param request_data: the body to be sent to the request.
:param headers: additional headers to the request.
:param query: filters to the request.
"""
request = {
"body": request_data,
"headers": headers,
"query": query,
"url_params": url_params
}
return request
@na_utils.trace
def svm_migration_start(
self, source_cluster_name, source_share_server_name,
dest_aggregates, dest_ipspace=None, check_only=False,
auto_source_cleanup=False):
"""Send a request to start the SVM migration in the backend.
:param source_cluster_name: the name of the source cluster.
:param source_share_server_name: the name of the source server.
:param dest_aggregates: the aggregates where volumes will be placed in
the migration.
:param dest_ipspace: created IPspace for the migration.
:param check_only: If the call will only check the feasibility.
:param auto_source_cleanup: whether the source share server must be
deleted after the cutover or not.
"""
request = {
"auto_cutover": False,
"auto_source_cleanup": auto_source_cleanup,
"check_only": check_only,
"source": {
"cluster": {"name": source_cluster_name},
"svm": {"name": source_share_server_name},
},
"destination": {
"volume_placement": {
"aggregates": dest_aggregates,
},
},
}
if dest_ipspace:
ipspace_data = {
"ipspace": {
"uuid": dest_ipspace,
}
}
request["destination"].update(ipspace_data)
api_args = self._format_request(request)
try:
result = self.send_request(
'svm-migration-start', api_args=api_args, use_zapi=False)
except netapp_api.NaApiError:
msg = "Could not start share server migration."
raise exception.NetAppException(msg)
return result
@na_utils.trace
def get_migration_check_job_state(self, job_id):
try:
job = self.get_job(job_id)
return job.get("state")
except netapp_api.NaApiError as e:
if e.code == netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE:
msg = _(
'NFS v4.0 is not supported while migrating vservers.')
LOG.error(msg)
raise exception.NetAppException(message=e.message)
if e.code == netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER:
msg = _('Both source and destination clusters must be AFF '
'systems.')
LOG.error(msg)
raise exception.NetAppException(message=e.message)
msg = (_('Failed to check migration support. Reason: '
'%s' % e.message))
raise exception.NetAppException(msg)
@na_utils.trace
def svm_migrate_complete(self, migration_id):
"""Send a request to complete the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {
"action": "cutover"
}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(
request, url_params=url_params)
job = self.send_request(
'svm-migration-complete', api_args=api_args, use_zapi=False)
return job
@na_utils.trace
def svm_migrate_cancel(self, migration_id):
"""Send a request to cancel the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(request, url_params=url_params)
job = self.send_request(
'svm-migration-cancel', api_args=api_args, use_zapi=False)
return job
@na_utils.trace
def svm_migration_get(self, migration_id):
"""Send a request to get the progress of the SVM migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(request, url_params=url_params)
result = self.send_request(
'svm-migration-get', api_args=api_args, use_zapi=False)
return result
@na_utils.trace
def svm_migrate_pause(self, migration_id):
"""Send a request to pause a migration.
:param migration_id: the id of the migration provided by the storage.
"""
request = {
"action": "pause"
}
url_params = {
"svm_migration_id": migration_id
}
api_args = self._format_request(
request, url_params=url_params)
job = self.send_request(
'svm-migration-pause', api_args=api_args, use_zapi=False)
return job
@na_utils.trace
def get_job(self, job_uuid):
request = {}
url_params = {
"job_uuid": job_uuid
}
api_args = self._format_request(request, url_params=url_params)
result = self.send_request(
'get-job', api_args=api_args, use_zapi=False)
return result

View File

@ -0,0 +1,49 @@
# Copyright 2021 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ENDPOINT_MIGRATION_ACTIONS = 'svm/migrations/%(svm_migration_id)s'
ENDPOINT_MIGRATIONS = 'svm/migrations'
ENDPOINT_JOB_ACTIONS = 'cluster/jobs/%(job_uuid)s'
endpoints = {
'system-get-version': {
'method': 'get',
'url': 'cluster?fields=version',
},
'svm-migration-start': {
'method': 'post',
'url': ENDPOINT_MIGRATIONS
},
'svm-migration-complete': {
'method': 'patch',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'svm-migration-cancel': {
'method': 'delete',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'svm-migration-get': {
'method': 'get',
'url': ENDPOINT_MIGRATION_ACTIONS
},
'get-job': {
'method': 'get',
'url': ENDPOINT_JOB_ACTIONS
},
'svm-migration-pause': {
'method': 'patch',
'url': ENDPOINT_MIGRATION_ACTIONS
},
}

View File

@ -299,9 +299,11 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
new_share_network, shares_request_spec)
def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
self.library.share_server_migration_start(
context, src_share_server, dest_share_server, shares, snapshots)
dest_share_server, shares, snapshots,
cleanup_source_server=False):
return self.library.share_server_migration_start(
context, src_share_server, dest_share_server, shares, snapshots,
cleanup_source_server)
def share_server_migration_continue(self, context, src_share_server,
dest_share_server, shares, snapshots):
@ -355,3 +357,13 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
return self.library.check_update_share_server_security_service(
context, share_server, network_info, new_security_service,
current_security_service=current_security_service)
def is_ip_reusage_supported_on_server_migration(
self, source_host, dest_host):
return self.library.is_ip_reusage_supported_on_server_migration(
source_host, dest_host)
def server_migration_mechanism_can_reuse_share_server(
self, source_host, dest_host):
return self.library.server_migration_mechanism_can_reuse_share_server(
source_host, dest_host)

View File

@ -285,7 +285,8 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
return self.library.get_share_status(share_instance, share_server)
def share_server_migration_start(self, context, src_share_server,
dest_share_server, shares, snapshots):
dest_share_server, shares, snapshots,
cleanup_source_server):
raise NotImplementedError
def share_server_migration_continue(self, context, src_share_server,
@ -306,8 +307,9 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
new_share_network, shares_request_spec):
raise NotImplementedError
def share_server_migration_get_progress(self, context, src_share_server,
dest_share_server):
def share_server_migration_get_progress(
self, context, src_share_server, dest_share_server, shares,
snapshots):
raise NotImplementedError
def choose_share_server_compatible_with_share(self, context, share_servers,
@ -331,3 +333,11 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
share_instance_rules, new_security_service,
current_security_service=None):
raise NotImplementedError
def is_ip_reusage_supported_on_server_migration(
self, source_host, dest_host):
raise NotImplementedError
def server_migration_mechanism_can_reuse_share_server(
self, source_host, dest_host):
raise NotImplementedError

View File

@ -24,6 +24,7 @@ import re
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
@ -306,10 +307,12 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return 'ipspace_' + network_id.replace('-', '_')
@na_utils.trace
def _create_ipspace(self, network_info):
def _create_ipspace(self, network_info, client=None):
"""If supported, create an IPspace for a new Vserver."""
if not self._client.features.IPSPACES:
client = client if client else self._client
if not client.features.IPSPACES:
return None
if (network_info['network_allocations'][0]['network_type']
@ -324,7 +327,7 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return client_cmode.DEFAULT_IPSPACE
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
self._client.create_ipspace(ipspace_name)
client.create_ipspace(ipspace_name)
return ipspace_name
@ -903,6 +906,153 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
manage_existing(share, driver_options,
share_server=share_server))
def _check_compatibility_using_svm_dr(
self, src_client, dest_client, shares_request_spec, pools):
method = 'svm_dr'
if (not src_client.is_svm_dr_supported()
or not dest_client.is_svm_dr_supported()):
msg = _("Cannot perform server migration because at least one of "
"the backends doesn't support SVM DR.")
LOG.error(msg)
return method, False
# Check capacity
server_total_size = (shares_request_spec.get('shares_size', 0) +
shares_request_spec.get('snapshots_size', 0))
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
# configured and greater than 1, we'll consider thin provisioning
# enable for all shares.
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
if self.configuration.netapp_server_migration_check_capacity is True:
if not self._check_capacity_compatibility(pools, thin_provisioning,
server_total_size):
msg = _("Cannot perform server migration because destination "
"host doesn't have enough free space.")
LOG.error(msg)
return method, False
return method, True
@na_utils.trace
def _get_job_uuid(self, job):
job = job.get("job")
return job.get("uuid")
def _wait_for_operation_status(
self, operation_id, get_operation, desired_status='success',
timeout=None):
timeout = (
self.configuration.netapp_server_migration_state_change_timeout
if not timeout else timeout)
interval = 10
retries = int(timeout / interval) or 1
@utils.retry(exception.ShareBackendException, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_status():
# Get the job based on its id
operation = get_operation(operation_id)
status = operation.get("status") or operation.get("state")
if status != desired_status:
msg = _("Operation %(operation_id)s didn't reach status "
"%(desired_status)s. Current_status is %(status)s")
msg_payload = {
'operation_id': operation_id,
'desired_status': desired_status,
'status': status
}
LOG.debug(msg, msg_payload)
# Failed, no need to retry
if status == 'error':
msg = _('Operation %(operation_id)s is in error status.'
'Reason: %(message)s')
raise exception.NetAppException(
msg % {'operation_id': operation_id,
'message': operation.get('message')})
# Didn't fail, so we can retry
raise exception.ShareBackendException(msg)
elif status == desired_status:
msg = _('Operation %(operation_id)s reached status %(status)s')
LOG.debug(
msg, {'operation_id': operation_id, 'status': status})
return
try:
wait_for_status()
except exception.NetAppException as e:
raise exception.NetAppException(message=e.message)
except exception.ShareBackendException:
msg_args = {'operation_id': operation_id, 'status': desired_status}
msg = _('Timed out while waiting for operation %(operation_id)s '
'to reach status %(status)s') % msg_args
LOG.debug(msg)
def _check_compatibility_for_svm_migrate(
self, source_cluster_name, source_share_server_name,
source_share_server, dest_aggregates, dest_client):
"""Checks if the migration can be performed using SVM Migrate.
1. Send the request to the backed to check if the migration is possible
2. Wait until the job finishes checking the migration status
"""
src_server_id = source_share_server['id']
# 1. Sends the request to the backend
try:
job = dest_client.svm_migration_start(
source_cluster_name, source_share_server_name, dest_aggregates,
check_only=True)
except Exception:
LOG.debug('Failed to check compatibility for migration.')
raise
job_id = self._get_job_uuid(job)
def _wait_migration_check_result():
job_state = dest_client.get_migration_check_job_state(job_id)
LOG.debug(
'Waiting on share server migration check result for share '
'server %s to complete. Current status is: %s.',
src_server_id, job_state)
if job_state == 'success':
raise loopingcall.LoopingCallDone()
try:
# 2. Wait until the job to check the migration status concludes
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_migration_check_result)
timer.start(interval=3, timeout=360).wait()
return True
except loopingcall.LoopingCallTimeOut:
msg = (
_('Timeout waiting migration check for share server %s to '
'be performed.'))
raise exception.NetAppException(msg % src_server_id)
except exception.NetAppException:
return False
def _check_for_migration_support(
self, src_client, dest_client, source_share_server,
shares_request_spec, src_cluster_name, pools):
if (dest_client.is_svm_migrate_supported()
and src_client.is_svm_migrate_supported()):
source_share_server_name = self._get_vserver_name(
source_share_server['id'])
try:
result = self._check_compatibility_for_svm_migrate(
src_cluster_name, source_share_server_name,
source_share_server, self._find_matching_aggregates(),
dest_client)
return 'svm_migrate', result
except Exception:
LOG.error('Failed to check the migration compatibility.')
return 'svm_migrate', False
return self._check_compatibility_using_svm_dr(
src_client, dest_client, shares_request_spec, pools)
@na_utils.trace
def share_server_migration_check_compatibility(
self, context, source_share_server, dest_host, old_share_network,
@ -958,16 +1108,18 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
LOG.error(msg)
return not_compatible
pools = self._get_pools()
# Check for SVM DR support
# NOTE(dviroel): These clients can only be used for non-tunneling
# requests.
dst_client = data_motion.get_client_for_backend(dest_backend_name,
vserver_name=None)
if (not src_client.is_svm_dr_supported()
or not dst_client.is_svm_dr_supported()):
msg = _("Cannot perform server migration because at leat one of "
"the backends doesn't support SVM DR.")
LOG.error(msg)
migration_method, compatibility = self._check_for_migration_support(
src_client, dst_client, source_share_server, shares_request_spec,
src_cluster_name, pools)
if not compatibility:
return not_compatible
# Blocking different security services for now
@ -985,7 +1137,6 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
LOG.error(msg)
return not_compatible
pools = self._get_pools()
# Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support'
specs_to_validate = ('netapp_flexvol_encryption',
'revert_to_snapshot_support')
@ -1000,25 +1151,14 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return not_compatible
# TODO(dviroel): disk_type extra-spec
# Check capacity
server_total_size = (shares_request_spec.get('shares_size', 0) +
shares_request_spec.get('snapshots_size', 0))
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
# configured and greater than 1, we'll consider thin provisioning
# enable for all shares.
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
if self.configuration.netapp_server_migration_check_capacity is True:
if not self._check_capacity_compatibility(pools, thin_provisioning,
server_total_size):
msg = _("Cannot perform server migration because destination "
"host doesn't have enough free space.")
LOG.error(msg)
return not_compatible
nondisruptive = (
(migration_method == 'svm_migrate')
and (old_share_network['id'] == new_share_network['id']))
compatibility = {
'compatible': True,
'writable': True,
'nondisruptive': False,
'nondisruptive': nondisruptive,
'preserve_snapshots': True,
'share_network_id': new_share_network['id'],
'migration_cancel': True,
@ -1027,9 +1167,9 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return compatibility
def share_server_migration_start(self, context, source_share_server,
dest_share_server, share_intances,
snapshot_instances):
@na_utils.trace
def _migration_start_using_svm_dr(
self, source_share_server, dest_share_server):
"""Start share server migration using SVM DR.
1. Create vserver peering between source and destination
@ -1078,6 +1218,149 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
msg = _('Could not initialize SnapMirror between %(src)s and '
'%(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)
return None
def _get_client_for_svm_migrate(self, share_server_to_get_client):
backend_name = share_utils.extract_host(
share_server_to_get_client['host'], level='backend_name')
client = data_motion.get_client_for_backend(backend_name)
return client
@na_utils.trace
def _ensure_cluster_has_migrate_policy(
self, cluster_client):
cluster_vserver_name = cluster_client.get_cluster_name()
cluster_policy_types = cluster_client.get_snapmirror_policies(
cluster_vserver_name, desired_attribute='type')
type_migrate = 'migrate'
if type_migrate not in cluster_policy_types:
policy_name = self._get_backend_snapmirror_policy_name_svm(
cluster_vserver_name)
cluster_client.create_snapmirror_policy(
policy_name, type=type_migrate, discard_network_info=False,
preserve_snapshots=False)
@na_utils.trace
def _migration_start_using_svm_migrate(
self, context, source_share_server, dest_share_server, src_client,
dest_client, cleanup_source_server):
"""Start share server migration using SVM Migrate.
1. Check if share network reusage is supported
2. Create a new ipspace, port and broadcast domain to the dest server
3. If there is need to create SnapMirror migrate policy, do it
4. Send the request start the share server migration
5. Read the job id and get the id of the migration
6. Set the migration uuid in the backend details
"""
# 1. Check if share network reusage is supported
# NOTE(carloss): If share network was not changed, SVM migrate can
# reuse the network allocation from the source share server, so as
# Manila haven't made new allocations, we can just get allocation data
# from the source share server
if (source_share_server['share_network_subnet_id'] ==
dest_share_server['share_network_subnet_id']):
share_server_to_get_network_info = source_share_server
else:
share_server_to_get_network_info = dest_share_server
# Reuse network information from the source share server in the svm
# migrate if the there was no share network changes
network_info = {
'network_allocations':
share_server_to_get_network_info['network_allocations'],
'neutron_subnet_id':
share_server_to_get_network_info['share_network_subnet'].get(
'neutron_subnet_id')
}
# 2. Create new ipspace, port and broadcast domain
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['network_allocations'][0]['segmentation_id']
destination_ipspace = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(
network_info, client=dest_client)
self._create_port_and_broadcast_domain(
destination_ipspace, network_info)
# 3. Ensure source and destination clusters contain a snapmirror policy
# with 'migrate' type
self._ensure_cluster_has_migrate_policy(src_client)
self._ensure_cluster_has_migrate_policy(dest_client)
# TODO(carloss): start using ipspace name
ipspace = dest_client.get_ipspaces(ipspace_name=destination_ipspace)
# Prepare the migration request
src_cluster_name = src_client.get_cluster_name()
source_share_server_name = self._get_vserver_name(
source_share_server['id'])
# 4. Send the migration request to ONTAP
try:
result = dest_client.svm_migration_start(
src_cluster_name, source_share_server_name,
self._find_matching_aggregates(),
dest_ipspace=ipspace[0]['uuid'],
auto_source_cleanup=cleanup_source_server)
# 5. Read the job id and get the id of the migration
result_job = result.get("job")
job_details = dest_client.get_job(result_job.get("uuid"))
job_description = job_details.get('description')
migration_uuid = job_description.split('/')[-1]
except Exception:
# remove ipspace, ports and broadcast domain
dest_client.delete_ipspace(destination_ipspace)
# remove the created snapmirror policy
# self._client.delete_snapmirror_policy(policy_name)
msg = _("Unable to start the migration for share server %s."
% source_share_server['id'])
raise exception.NetAppException(msg)
# 6. Returns migration data to be saved as backend details
server_info = {
"backend_details": {
"migration_operation_id": migration_uuid
}
}
return server_info
@na_utils.trace
def share_server_migration_start(
self, context, source_share_server, dest_share_server,
share_intances, snapshot_instances, cleanup_source_server):
"""Start share server migration.
This method will choose the best migration strategy to perform the
migration, based on the storage functionalities support.
"""
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_client = data_motion.get_client_for_backend(
dest_backend_name, vserver_name=None)
__, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
use_svm_migrate = (
src_client.is_svm_migrate_supported()
and dest_client.is_svm_migrate_supported())
if use_svm_migrate:
result = self._migration_start_using_svm_migrate(
context, source_share_server, dest_share_server, src_client,
dest_client, cleanup_source_server)
else:
result = self._migration_start_using_svm_dr(
source_share_server, dest_share_server)
msg_args = {
'src': source_share_server['id'],
@ -1086,6 +1369,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
msg = _('Starting share server migration from %(src)s to %(dest)s.')
LOG.info(msg, msg_args)
return result
def _get_snapmirror_svm(self, source_share_server, dest_share_server):
dm_session = data_motion.DataMotionSession()
try:
@ -1104,9 +1389,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return snapmirrors
@na_utils.trace
def share_server_migration_continue(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances):
def _share_server_migration_continue_svm_dr(
self, source_share_server, dest_share_server):
"""Continues a share server migration using SVM DR."""
snapmirrors = self._get_snapmirror_svm(source_share_server,
dest_share_server)
@ -1141,10 +1425,29 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
return False
@na_utils.trace
def share_server_migration_complete(self, context, source_share_server,
def _share_server_migration_continue_svm_migrate(self, dest_share_server,
migration_id):
dest_client = self._get_client_for_svm_migrate(dest_share_server)
result = dest_client.svm_migration_get(migration_id)
# TODO(carloss): implement the failure scenarios here as well
return result.get("state") == 'ready_for_cutover'
@na_utils.trace
def share_server_migration_continue(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances, new_network_alloc):
"""Completes share server migration using SVM DR.
snapshot_instances):
migration_id = self._get_share_server_migration_id(
dest_share_server)
if migration_id:
return self._share_server_migration_continue_svm_migrate(
dest_share_server, migration_id)
return self._share_server_migration_continue_svm_dr(
source_share_server, dest_share_server)
def _share_server_migration_complete_svm_dr(
self, source_share_server, dest_share_server, src_vserver,
src_client, share_instances, new_net_allocations):
"""Perform steps to complete the SVM DR migration.
1. Do a last SnapMirror update.
2. Quiesce, abort and then break the relationship.
@ -1152,9 +1455,12 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
4. Configure network interfaces in the destination vserver
5. Start the destinarion vserver
6. Delete and release the snapmirror
7. Build the list of export_locations for each share
8. Release all resources from the source share server
"""
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
dm_session = data_motion.DataMotionSession()
try:
# 1. Start an update to try to get a last minute transfer before we
@ -1165,15 +1471,6 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
# Ignore any errors since the current source may be unreachable
pass
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
dest_vserver, dest_client = self._get_vserver(
share_server=dest_share_server, backend_name=dest_backend_name)
try:
# 2. Attempt to quiesce, abort and then break SnapMirror
dm_session.quiesce_and_break_snapmirror_svm(source_share_server,
@ -1195,12 +1492,13 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
# NOTE(dviroel): Security service and NFS configuration should be
# handled by SVM DR, so no changes will be made here.
vlan = new_network_alloc['segmentation_id']
vlan = new_net_allocations['segmentation_id']
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_network_for_destination_vserver():
self._setup_network_for_vserver(
dest_vserver, dest_client, new_network_alloc, ipspace_name,
dest_vserver, dest_client, new_net_allocations,
ipspace_name,
enable_nfs=False,
security_services=None)
@ -1237,7 +1535,75 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
dm_session.delete_snapmirror_svm(source_share_server,
dest_share_server)
# 7. Build a dict with shares/snapshot location updates
def _share_server_migration_complete_svm_migrate(
self, migration_id, dest_share_server):
"""Completes share server migration using SVM DR.
1. Call functions to conclude the migration for SVM Migrate
2. Waits until the job gets a success status
3. Wait until the migration cancellation reach the desired status
"""
dest_client = self._get_client_for_svm_migrate(dest_share_server)
ongoing_migration = dest_client.svm_migration_get(migration_id)
completion_status = (
"ready_for_source_cleanup"
if ongoing_migration.get("auto_source_cleanup") is False
else "migrate_complete")
try:
# Triggers the migration completion
job = dest_client.svm_migrate_complete(migration_id)
job_id = self._get_job_uuid(job)
# Wait until the job is completed
self._wait_for_operation_status(
job_id, dest_client.get_job)
# Wait until the migration is entirely finished
self._wait_for_operation_status(
migration_id, dest_client.svm_migration_get,
desired_status=completion_status)
except exception.NetAppException:
msg = _(
"Failed to complete the migration for "
"share server %s") % dest_share_server['id']
raise exception.NetAppException(msg)
@na_utils.trace
def share_server_migration_complete(self, context, source_share_server,
dest_share_server, share_instances,
snapshot_instances, new_network_alloc):
"""Completes share server migration.
1. Call functions to conclude the migration for SVM DR or SVM Migrate
2. Build the list of export_locations for each share
3. Release all resources from the source share server
"""
src_backend_name = share_utils.extract_host(
source_share_server['host'], level='backend_name')
src_vserver, src_client = self._get_vserver(
share_server=source_share_server, backend_name=src_backend_name)
dest_backend_name = share_utils.extract_host(
dest_share_server['host'], level='backend_name')
migration_id = self._get_share_server_migration_id(dest_share_server)
share_server_to_get_vserver_name_from = dest_share_server
# 1. Call functions to conclude the migration for SVM DR or SVM Migrate
if migration_id:
self._share_server_migration_complete_svm_migrate(
migration_id, dest_share_server)
share_server_to_get_vserver_name_from = source_share_server
else:
self._share_server_migration_complete_svm_dr(
source_share_server, dest_share_server, src_vserver,
src_client, share_instances, new_network_alloc)
dest_vserver, dest_client = self._get_vserver(
share_server=share_server_to_get_vserver_name_from,
backend_name=dest_backend_name)
# 2. Build a dict with shares/snapshot location updates
# NOTE(dviroel): For SVM DR, the share names aren't modified, only the
# export_locations are updated due to network changes.
share_updates = {}
@ -1262,24 +1628,31 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'in the destination vserver.') % msg_args
raise exception.NetAppException(message=msg)
export_locations = self._create_export(
instance, dest_share_server, dest_vserver, dest_client,
clear_current_export_policy=False,
ensure_share_already_exists=True)
changed_network_on_migration = (
source_share_server['share_network_subnet_id'] !=
dest_share_server['share_network_subnet_id'])
share_updates.update({
instance['id']: {
'export_locations': export_locations,
'pool_name': volume.get('aggregate')
}})
new_share_data = {
'pool_name': volume.get('aggregate')
}
if changed_network_on_migration or not migration_id:
export_locations = self._create_export(
instance, dest_share_server, dest_vserver, dest_client,
clear_current_export_policy=False,
ensure_share_already_exists=True)
new_share_data.update({'export_locations': export_locations})
share_updates.update({instance['id']: new_share_data})
# NOTE(dviroel): Nothing to update in snapshot instances since the
# provider location didn't change.
# 8. Release source share resources
for instance in share_instances:
self._delete_share(instance, src_vserver, src_client,
remove_export=True)
if not migration_id:
# 3. Release source share resources
for instance in share_instances:
self._delete_share(instance, src_vserver, src_client,
remove_export=True)
# NOTE(dviroel): source share server deletion must be triggered by
# the manager after finishing the migration
@ -1288,10 +1661,14 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'share_updates': share_updates,
}
def share_server_migration_cancel(self, context, source_share_server,
dest_share_server, shares, snapshots):
"""Cancel a share server migration that is using SVM DR."""
def _get_share_server_migration_id(self, dest_share_server):
return dest_share_server['backend_details'].get(
'migration_operation_id')
@na_utils.trace
def _migration_cancel_using_svm_dr(
self, source_share_server, dest_share_server, shares):
"""Cancel a share server migration that is using SVM DR."""
dm_session = data_motion.DataMotionSession()
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
@ -1318,6 +1695,54 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
'and %(dest)s vservers.') % msg_args
raise exception.NetAppException(message=msg)
@na_utils.trace
def _migration_cancel_using_svm_migrate(self, migration_id,
dest_share_server):
"""Cancel a share server migration that is using SVM migrate."""
dest_client = self._get_client_for_svm_migrate(dest_share_server)
migration_information = dest_client.svm_migration_get(migration_id)
dest_ipspace_name = (
migration_information["destination"]["ipspace"]["name"])
try:
job = dest_client.svm_migrate_pause(migration_id)
job_id = self._get_job_uuid(job)
self._wait_for_operation_status(job_id, dest_client.get_job)
self._wait_for_operation_status(
migration_id, dest_client.svm_migration_get,
desired_status="migrate_paused")
except exception.NetAppException:
msg = _("Failed to pause the share server migration.")
raise exception.NetAppException(message=msg)
try:
job = dest_client.svm_migrate_cancel(migration_id)
job_id = self._get_job_uuid(job)
self._wait_for_operation_status(
job_id, dest_client.get_job)
except exception.NetAppException:
msg = _("Failed to cancel the share server migration.")
raise exception.NetAppException(message=msg)
if (dest_ipspace_name and dest_ipspace_name not in CLUSTER_IPSPACES
and not dest_client.ipspace_has_data_vservers(
dest_ipspace_name)):
dest_client.delete_ipspace(dest_ipspace_name)
return
@na_utils.trace
def share_server_migration_cancel(self, context, source_share_server,
dest_share_server, shares, snapshots):
"""Send the request to cancel the SVM migration."""
migration_id = self._get_share_server_migration_id(dest_share_server)
if migration_id:
self._migration_cancel_using_svm_migrate(
migration_id, dest_share_server)
return
self._migration_cancel_using_svm_dr(
source_share_server, dest_share_server, shares)
LOG.info('Share server migration was cancelled.')
def share_server_migration_get_progress(self, context, src_share_server,
@ -1520,3 +1945,37 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
LOG.info(msg)
return False
return True
@na_utils.trace
def _source_and_dest_host_supports_svm_migrate(
self, source_host, dest_host):
"""Checks the possibility to reuse server allocations in a migration"""
source_backend_name = share_utils.extract_host(
source_host, level='backend_name')
source_client = data_motion.get_client_for_backend(
source_backend_name, vserver_name=None)
dest_backend_name = share_utils.extract_host(
dest_host, level='backend_name')
dest_client = data_motion.get_client_for_backend(
dest_backend_name, vserver_name=None)
source_supports_svm_migrate = source_client.is_svm_migrate_supported()
dest_supports_svm_migrate = dest_client.is_svm_migrate_supported()
return source_supports_svm_migrate and dest_supports_svm_migrate
@na_utils.trace
def is_ip_reusage_supported_on_server_migration(
self, source_host, dest_host):
"""Checks the possibility to reuse server allocations in a migration"""
return self._source_and_dest_host_supports_svm_migrate(
source_host, dest_host)
@na_utils.trace
def server_migration_mechanism_can_reuse_share_server(
self, source_host, dest_host):
return self._source_and_dest_host_supports_svm_migrate(
source_host, dest_host)

View File

@ -87,6 +87,7 @@ VSERVER_INFO = {
'state': VSERVER_STATE,
}
SNAPMIRROR_POLICY_NAME = 'fake_snapmirror_policy'
SNAPMIRROR_POLICY_TYPE = 'async_mirror'
USER_NAME = 'fake_user'
@ -2742,12 +2743,14 @@ SNAPMIRROR_POLICY_GET_ITER_RESPONSE = etree.XML("""
<attributes-list>
<snapmirror-policy-info>
<policy-name>%(policy_name)s</policy-name>
<type>%(policy_type)s</type>
<vserver-name>%(vserver_name)s</vserver-name>
</snapmirror-policy-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % {
'policy_name': SNAPMIRROR_POLICY_NAME,
'policy_type': SNAPMIRROR_POLICY_TYPE,
'vserver_name': VSERVER_NAME,
})
@ -2899,6 +2902,7 @@ FAKE_NA_ELEMENT = api.NaElement(etree.XML(FAKE_VOL_XML))
FAKE_INVOKE_DATA = 'somecontent'
FAKE_XML_STR = 'abc'
FAKE_REST_CALL_STR = 'def'
FAKE_API_NAME = 'volume-get-iter'

View File

@ -19,6 +19,7 @@
Tests for NetApp API layer
"""
from oslo_serialization import jsonutils
from unittest import mock
import ddt
@ -26,6 +27,7 @@ import requests
from manila import exception
from manila.share.drivers.netapp.dataontap.client import api
from manila.share.drivers.netapp.dataontap.client import rest_endpoints
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ -174,11 +176,11 @@ class NetAppApiElementTransTests(test.TestCase):
@ddt.ddt
class NetAppApiServerTests(test.TestCase):
class NetAppApiServerZapiClientTests(test.TestCase):
"""Test case for NetApp API server methods"""
def setUp(self):
self.root = api.NaServer('127.0.0.1')
super(NetAppApiServerTests, self).setUp()
self.root = api.NaServer('127.0.0.1').zapi_client
super(NetAppApiServerZapiClientTests, self).setUp()
@ddt.data(None, fake.FAKE_XML_STR)
def test_invoke_elem_value_error(self, na_element):
@ -262,3 +264,206 @@ class NetAppApiServerTests(test.TestCase):
expected_log_count = 2 if log else 0
self.assertEqual(expected_log_count, api.LOG.debug.call_count)
@ddt.ddt
class NetAppApiServerRestClientTests(test.TestCase):
"""Test case for NetApp API Rest server methods"""
def setUp(self):
self.root = api.NaServer('127.0.0.1').rest_client
super(NetAppApiServerRestClientTests, self).setUp()
def test_invoke_elem_value_error(self):
"""Tests whether invalid NaElement parameter causes error"""
na_element = fake.FAKE_REST_CALL_STR
self.assertRaises(ValueError, self.root.invoke_elem, na_element)
def _setup_mocks_for_invoke_element(self, mock_post_action):
fake_action_url = '/endpoint'
fake_base_url = '10.0.0.3/api'
self.mock_object(api, 'LOG')
self.root._session = fake.FAKE_HTTP_SESSION
self.root._session.post = mock_post_action
self.mock_object(self.root, '_build_session')
self.mock_object(
self.root, '_get_request_info', mock.Mock(
return_value=(self.root._session.post, fake_action_url)))
self.mock_object(
self.root, '_get_base_url', mock.Mock(return_value=fake_base_url))
return fake_base_url
def test_invoke_elem_http_error(self):
"""Tests handling of HTTPError"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=requests.HTTPError()))
self.assertRaises(api.NaApiError, self.root.invoke_elem,
na_element)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
def test_invoke_elem_urlerror(self):
"""Tests handling of URLError"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=requests.URLRequired()))
self.assertRaises(exception.StorageCommunicationException,
self.root.invoke_elem,
na_element)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
def test_invoke_elem_unknown_exception(self):
"""Tests handling of Unknown Exception"""
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self._setup_mocks_for_invoke_element(
mock_post_action=mock.Mock(side_effect=Exception))
exception = self.assertRaises(api.NaApiError, self.root.invoke_elem,
na_element)
self.assertEqual('unknown', exception.code)
self.assertTrue(self.root._get_base_url.called)
self.root._get_request_info.assert_called_once_with(
element_name, self.root._session)
@ddt.data(
{'trace_enabled': False,
'trace_pattern': '(.*)',
'log': False,
'query': None,
'body': {'fake_key': 'fake_value'}
},
{'trace_enabled': True,
'trace_pattern': '(?!(volume)).*',
'log': False,
'query': None,
'body': {'fake_key': 'fake_value'}
},
{'trace_enabled': True,
'trace_pattern': '(.*)',
'log': True,
'query': {'name': 'fake_name'},
'body': {'fake_key': 'fake_value'}
},
{'trace_enabled': True,
'trace_pattern': '^volume-(info|get-iter)$',
'log': True,
'query': {'type': 'fake_type'},
'body': {'fake_key': 'fake_value'}
}
)
@ddt.unpack
def test_invoke_elem_valid(self, trace_enabled, trace_pattern, log, query,
body):
"""Tests the method invoke_elem with valid parameters"""
self.root._session = fake.FAKE_HTTP_SESSION
response = mock.Mock()
response.content = 'fake_response'
self.root._session.post = mock.Mock(return_value=response)
na_element = fake.FAKE_NA_ELEMENT
element_name = fake.FAKE_NA_ELEMENT.get_name()
self.root._trace = trace_enabled
self.root._api_trace_pattern = trace_pattern
fake_base_url = '10.0.0.3/api'
fake_action_url = '/endpoint'
expected_url = fake_base_url + fake_action_url
api_args = {
"body": body,
"query": query
}
self.mock_object(api, 'LOG')
mock_build_session = self.mock_object(self.root, '_build_session')
mock_get_req_info = self.mock_object(
self.root, '_get_request_info', mock.Mock(
return_value=(self.root._session.post, fake_action_url)))
mock_add_query_params = self.mock_object(
self.root, '_add_query_params_to_url', mock.Mock(
return_value=fake_action_url))
mock_get_base_url = self.mock_object(
self.root, '_get_base_url', mock.Mock(return_value=fake_base_url))
mock_json_loads = self.mock_object(
jsonutils, 'loads', mock.Mock(return_value='fake_response'))
mock_json_dumps = self.mock_object(
jsonutils, 'dumps', mock.Mock(return_value=body))
result = self.root.invoke_elem(na_element, api_args=api_args)
self.assertEqual('fake_response', result)
expected_log_count = 2 if log else 0
self.assertEqual(expected_log_count, api.LOG.debug.call_count)
self.assertTrue(mock_build_session.called)
mock_get_req_info.assert_called_once_with(
element_name, self.root._session)
if query:
mock_add_query_params.assert_called_once_with(
fake_action_url, query)
self.assertTrue(mock_get_base_url.called)
self.root._session.post.assert_called_once_with(
expected_url, data=body)
mock_json_loads.assert_called_once_with('fake_response')
mock_json_dumps.assert_called_once_with(body)
@ddt.data(
('svm-migration-start', rest_endpoints.ENDPOINT_MIGRATIONS, 'post'),
('svm-migration-complete', rest_endpoints.ENDPOINT_MIGRATION_ACTIONS,
'patch')
)
@ddt.unpack
def test__get_request_info(self, api_name, expected_url, expected_method):
self.root._session = fake.FAKE_HTTP_SESSION
for http_method in ['post', 'get', 'put', 'delete', 'patch']:
setattr(self.root._session, http_method, mock.Mock())
method, url = self.root._get_request_info(api_name, self.root._session)
self.assertEqual(method, getattr(self.root._session, expected_method))
self.assertEqual(expected_url, url)
@ddt.data(
{'is_ipv6': False, 'protocol': 'http', 'port': '80'},
{'is_ipv6': False, 'protocol': 'https', 'port': '443'},
{'is_ipv6': True, 'protocol': 'http', 'port': '80'},
{'is_ipv6': True, 'protocol': 'https', 'port': '443'})
@ddt.unpack
def test__get_base_url(self, is_ipv6, protocol, port):
self.root._host = '10.0.0.3' if not is_ipv6 else 'FF01::1'
self.root._protocol = protocol
self.root._port = port
host_formated_for_url = (
'[%s]' % self.root._host if is_ipv6 else self.root._host)
# example of the expected format: http://10.0.0.3:80/api/
expected_result = (
protocol + '://' + host_formated_for_url + ':' + port + '/api/')
base_url = self.root._get_base_url()
self.assertEqual(expected_result, base_url)
@ddt.data(
{"name": "name_to_be_queried"},
{"type": "type_to_be_queried", "name": "name_to_be_queried"}
)
def test__add_query_params_to_url(self, query):
url = 'endpoint/to/get/data'
filters = "?"
for k, v in query.items():
filters += "%(key)s=%(value)s&" % {"key": k, "value": v}
expected_formated_url = url + filters
formatted_url = self.root._add_query_params_to_url(url, query)
self.assertEqual(expected_formated_url, formatted_url)

View File

@ -42,6 +42,8 @@ class NetAppBaseClientTestCase(test.TestCase):
self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.connection.zapi_client = mock.Mock()
self.connection.rest_client = mock.Mock()
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE)
@ -97,16 +99,23 @@ class NetAppBaseClientTestCase(test.TestCase):
self.assertEqual('tag_name', result)
def test_send_request(self):
@ddt.data(True, False)
def test_send_request(self, use_zapi):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api')
self.client.send_request('fake-api', use_zapi=use_zapi)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
self.assertTrue(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
self.assertEqual(
use_zapi,
self.connection.invoke_successfully.call_args[1][
'use_zapi'])
def test_send_request_no_tunneling(self):
@ -117,20 +126,32 @@ class NetAppBaseClientTestCase(test.TestCase):
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertFalse(self.connection.invoke_successfully.call_args[0][1])
self.assertFalse(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
def test_send_request_with_args(self):
@ddt.data(True, False)
def test_send_request_with_args(self, use_zapi):
element = netapp_api.NaElement('fake-api')
api_args = {'arg1': 'data1', 'arg2': 'data2'}
element.translate_struct(api_args)
self.client.send_request('fake-api', api_args=api_args)
self.client.send_request('fake-api', api_args=api_args,
use_zapi=use_zapi)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
self.assertEqual(
api_args, self.connection.invoke_successfully.call_args[1][
'api_args'])
self.assertTrue(
self.connection.invoke_successfully.call_args[1][
'enable_tunneling'])
self.assertEqual(
use_zapi,
self.connection.invoke_successfully.call_args[1][
'use_zapi'])
def test_get_licenses(self):

View File

@ -7588,13 +7588,18 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.client.send_request.assert_called_once_with(
'snapmirror-policy-delete', expected_api_args)
def test_get_snapmirror_policies(self):
@ddt.data('type', 'policy-name')
def test_get_snapmirror_policies(self, desired_attribute):
api_response = netapp_api.NaElement(
fake.SNAPMIRROR_POLICY_GET_ITER_RESPONSE)
self.mock_object(self.client, 'send_iter_request',
mock.Mock(return_value=api_response))
result_elem = (
[fake.SNAPMIRROR_POLICY_TYPE]
if desired_attribute == 'type' else [fake.SNAPMIRROR_POLICY_NAME])
result = self.client.get_snapmirror_policies(fake.VSERVER_NAME)
result = self.client.get_snapmirror_policies(
fake.VSERVER_NAME, desired_attribute=desired_attribute)
expected_api_args = {
'query': {
@ -7605,13 +7610,14 @@ class NetAppClientCmodeTestCase(test.TestCase):
'desired-attributes': {
'snapmirror-policy-info': {
'policy-name': None,
'type': None,
},
},
}
self.client.send_iter_request.assert_called_once_with(
'snapmirror-policy-get-iter', expected_api_args)
self.assertEqual([fake.SNAPMIRROR_POLICY_NAME], result)
self.assertEqual(result_elem, result)
@ddt.data(True, False, None)
def test_start_vserver(self, force):
@ -8220,3 +8226,249 @@ class NetAppClientCmodeTestCase(test.TestCase):
self.assertEqual(expected, result)
self.client.send_iter_request.assert_called_once_with(
'fpolicy-policy-status-get-iter', expected_args)
@ddt.data(
{"body": {"fake_body_key": "fake_value"},
"headers": {"fake_header_key": "fake_header_value"},
"query": {},
"url_params": {"key_to_add_in_url": "value_to_add_in_url"}
},
{"body": {},
"headers": {"fake_header_key": "fake_header_value"},
"query": {"key_to_search": "value_to_search"},
"url_params": {"key_to_add_in_url": "value_to_add_in_url"}
},
)
@ddt.unpack
def test__format_request(self, body, headers, query, url_params):
expected_result = {
"body": body,
"headers": headers,
"query": query,
"url_params": url_params
}
result = self.client._format_request(
body, headers=headers, query=query, url_params=url_params)
for k, v in expected_result.items():
self.assertIn(k, result)
self.assertEqual(result.get(k), v)
@ddt.data(
{"dest_ipspace": None, "check_only": True,
"auto_source_cleanup": False},
{"dest_ipspace": "fake_dest_ipspace", "check_only": False,
"auto_source_cleanup": True},
)
@ddt.unpack
def test_svm_migration_start(self, dest_ipspace, check_only,
auto_source_cleanup):
fake_job_id = 'job_id'
migrate_start_api_response = {
"job": {
"uuid": fake_job_id
}
}
api_args = {
"auto_cutover": False,
"auto_source_cleanup": auto_source_cleanup,
"check_only": check_only,
"source": {
"cluster": {"name": fake.CLUSTER_NAME},
"svm": {"name": fake.VSERVER_NAME},
},
"destination": {
"volume_placement": {
"aggregates": [fake.SHARE_AGGREGATE_NAME],
},
},
}
if dest_ipspace:
ipspace_data = {
"ipspace": {"uuid": dest_ipspace}
}
api_args['destination'].update(ipspace_data)
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=api_args))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=migrate_start_api_response))
result = self.client.svm_migration_start(
fake.CLUSTER_NAME, fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME],
dest_ipspace=dest_ipspace, check_only=check_only,
auto_source_cleanup=auto_source_cleanup)
self.client._format_request.assert_called_once_with(api_args)
self.client.send_request.assert_called_once_with(
'svm-migration-start', api_args=api_args, use_zapi=False)
self.assertEqual(result, migrate_start_api_response)
@ddt.data({"check_only": False}, {"check_only": True})
def test_share_server_migration_start_failed(self, check_only):
api_args = {}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=api_args))
self.mock_object(
self.client, 'send_request',
mock.Mock(side_effect=netapp_api.NaApiError(message='fake')))
self.assertRaises(
exception.NetAppException,
self.client.svm_migration_start,
fake.CLUSTER_NAME, fake.VSERVER_NAME,
[fake.SHARE_AGGREGATE_NAME],
check_only=check_only
)
def test_svm_migrate_complete(self):
migration_id = 'fake_migration_id'
request = {
'action': 'cutover'
}
expected_url_params = {
'svm_migration_id': migration_id
}
job_info = {
'state': 'success'
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=job_info))
self.client.svm_migrate_complete(migration_id)
self.client._format_request.assert_called_once_with(
request, url_params=expected_url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-complete', api_args=request, use_zapi=False)
def test_get_job(self):
request = {}
job_uuid = 'fake_job_uuid'
url_params = {
'job_uuid': job_uuid
}
job_info = {
'state': 'success'
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=job_info))
result = self.client.get_job(job_uuid)
self.assertEqual(job_info, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'get-job', api_args=request, use_zapi=False)
def test_svm_migrate_cancel(self):
request = {}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
job_info = {
'state': 'success'
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=job_info))
result = self.client.svm_migrate_cancel(migration_id)
self.assertEqual(job_info, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-cancel', api_args=request, use_zapi=False)
def test_svm_migration_get(self):
request = {}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
job_info = {
'state': 'success'
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=job_info))
result = self.client.svm_migration_get(migration_id)
self.assertEqual(job_info, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-get', api_args=request, use_zapi=False)
def test_svm_migrate_pause(self):
request = {
"action": "pause"
}
migration_id = 'fake_migration_uuid'
url_params = {
"svm_migration_id": migration_id
}
job_info = {
'state': 'success'
}
self.mock_object(self.client, '_format_request',
mock.Mock(return_value=request))
self.mock_object(self.client, 'send_request',
mock.Mock(return_value=job_info))
result = self.client.svm_migrate_pause(migration_id)
self.assertEqual(job_info, result)
self.client._format_request.assert_called_once_with(
request, url_params=url_params)
self.client.send_request.assert_called_once_with(
'svm-migration-pause', api_args=request, use_zapi=False)
def test_migration_check_job_state(self):
job_id = 'job_id'
fake_job = {
"state": "success"
}
self.mock_object(self.client, 'get_job',
mock.Mock(return_value=fake_job))
result = self.client.get_migration_check_job_state(
job_id
)
self.assertEqual(result, fake_job.get("state"))
self.client.get_job.assert_called_once_with(job_id)
@ddt.data(netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE,
netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER)
def test_migration_check_job_state_failed(self, error_code):
job_id = 'job_id'
self.mock_object(
self.client, 'get_job',
mock.Mock(side_effect=exception.NetAppException(code=error_code)))
self.assertRaises(
exception.NetAppException,
self.client.get_migration_check_job_state,
job_id
)
self.client.get_job.assert_called_once_with(job_id)

View File

@ -1774,13 +1774,22 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertEqual(not_compatible, result)
def _init_mocks_for_svm_dr_check_compatibility(
self, src_svm_dr_supported=True, dest_svm_dr_supported=True,
check_capacity_result=True):
self.mock_object(self.mock_src_client, 'is_svm_dr_supported',
mock.Mock(return_value=src_svm_dr_supported))
self.mock_object(self.mock_dest_client, 'is_svm_dr_supported',
mock.Mock(return_value=dest_svm_dr_supported))
self.mock_object(self.library, '_check_capacity_compatibility',
mock.Mock(return_value=check_capacity_result))
def _configure_mocks_share_server_migration_check_compatibility(
self, have_cluster_creds=True,
src_cluster_name=fake.CLUSTER_NAME,
dest_cluster_name=fake.CLUSTER_NAME_2,
src_svm_dr_support=True, dest_svm_dr_support=True,
check_capacity_result=True,
pools=fake.POOLS):
pools=fake.POOLS, is_svm_dr=True, failure_scenario=False):
migration_method = 'svm_dr' if is_svm_dr else 'svm_migrate'
self.library._have_cluster_creds = have_cluster_creds
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(self.fake_src_vserver,
@ -1791,14 +1800,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=dest_cluster_name))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.mock_dest_client))
self.mock_object(self.mock_src_client, 'is_svm_dr_supported',
mock.Mock(return_value=src_svm_dr_support))
self.mock_object(self.mock_dest_client, 'is_svm_dr_supported',
mock.Mock(return_value=dest_svm_dr_support))
self.mock_object(self.library, '_check_for_migration_support',
mock.Mock(return_value=(
migration_method, not failure_scenario)))
self.mock_object(self.library, '_get_pools',
mock.Mock(return_value=pools))
self.mock_object(self.library, '_check_capacity_compatibility',
mock.Mock(return_value=check_capacity_result))
def test_share_server_migration_check_compatibility_dest_with_pool(
self):
@ -1832,30 +1838,40 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertTrue(self.mock_src_client.get_cluster_name.called)
self.assertTrue(self.client.get_cluster_name.called)
def test_share_server_migration_check_compatibility_svm_dr_not_supported(
self):
not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
self._configure_mocks_share_server_migration_check_compatibility(
dest_svm_dr_support=False,
)
@ddt.data(
{'src_svm_dr_supported': False,
'dest_svm_dr_supported': False,
'check_capacity_result': False
},
{'src_svm_dr_supported': True,
'dest_svm_dr_supported': True,
'check_capacity_result': False
},
)
@ddt.unpack
def test__check_compatibility_svm_dr_not_compatible(
self, src_svm_dr_supported, dest_svm_dr_supported,
check_capacity_result):
server_total_size = (fake.SHARE_REQ_SPEC.get('shares_size', 0) +
fake.SHARE_REQ_SPEC.get('snapshots_size', 0))
result = self.library.share_server_migration_check_compatibility(
None, self.fake_src_share_server,
self.fake_dest_share_server['host'],
None, None, None)
self._init_mocks_for_svm_dr_check_compatibility(
src_svm_dr_supported=src_svm_dr_supported,
dest_svm_dr_supported=dest_svm_dr_supported,
check_capacity_result=check_capacity_result)
self.assertEqual(not_compatible, result)
self.library._get_vserver.assert_called_once_with(
self.fake_src_share_server,
backend_name=self.fake_src_backend_name
)
self.assertTrue(self.mock_src_client.get_cluster_name.called)
self.assertTrue(self.client.get_cluster_name.called)
data_motion.get_client_for_backend.assert_called_once_with(
self.fake_dest_backend_name, vserver_name=None
)
method, result = self.library._check_compatibility_using_svm_dr(
self.mock_src_client, self.mock_dest_client, fake.SHARE_REQ_SPEC,
fake.POOLS)
self.assertEqual(method, 'svm_dr')
self.assertEqual(result, False)
self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
if check_capacity_result and not src_svm_dr_supported:
self.assertFalse(self.mock_dest_client.is_svm_dr_supported.called)
self.library._check_capacity_compatibility.assert_called_once_with(
fake.POOLS, True, server_total_size)
def test_share_server_migration_check_compatibility_different_sec_service(
self):
@ -1882,8 +1898,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
data_motion.get_client_for_backend.assert_called_once_with(
self.fake_dest_backend_name, vserver_name=None
)
self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
@ddt.data('netapp_flexvol_encryption', 'revert_to_snapshot_support')
def test_share_server_migration_check_compatibility_invalid_capabilities(
@ -1912,41 +1926,50 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
data_motion.get_client_for_backend.assert_called_once_with(
self.fake_dest_backend_name, vserver_name=None
)
self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
def test_share_server_migration_check_compatibility_capacity_false(
self):
not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE
self._configure_mocks_share_server_migration_check_compatibility(
check_capacity_result=False
)
@ddt.data((True, "svm_migrate"), (False, "svm_dr"))
@ddt.unpack
def test__check_for_migration_support(
self, svm_migrate_supported, expected_migration_method):
mock_dest_is_svm_migrate_supported = self.mock_object(
self.mock_dest_client, 'is_svm_migrate_supported',
mock.Mock(return_value=svm_migrate_supported))
mock_src_is_svm_migrate_supported = self.mock_object(
self.mock_src_client, 'is_svm_migrate_supported',
mock.Mock(return_value=svm_migrate_supported))
mock_find_matching_aggregates = self.mock_object(
self.library, '_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
mock_get_vserver_name = self.mock_object(
self.library, '_get_vserver_name',
mock.Mock(return_value=fake.VSERVER1))
mock_svm_migration_check_svm_mig = self.mock_object(
self.library, '_check_compatibility_for_svm_migrate',
mock.Mock(return_value=True))
mock_svm_migration_check_svm_dr = self.mock_object(
self.library, '_check_compatibility_using_svm_dr',
mock.Mock(side_effect=[('svm_dr', True)]))
result = self.library.share_server_migration_check_compatibility(
None, self.fake_src_share_server,
self.fake_dest_share_server['host'],
fake.SHARE_NETWORK, fake.SHARE_NETWORK,
fake.SERVER_MIGRATION_REQUEST_SPEC)
migration_method, result = self.library._check_for_migration_support(
self.mock_src_client, self.mock_dest_client, fake.SHARE_SERVER,
fake.SHARE_REQ_SPEC, fake.CLUSTER_NAME, fake.POOLS)
self.assertEqual(not_compatible, result)
self.library._get_vserver.assert_called_once_with(
self.fake_src_share_server,
backend_name=self.fake_src_backend_name
)
self.assertTrue(self.mock_src_client.get_cluster_name.called)
self.assertTrue(self.client.get_cluster_name.called)
data_motion.get_client_for_backend.assert_called_once_with(
self.fake_dest_backend_name, vserver_name=None
)
self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
total_size = (fake.SERVER_MIGRATION_REQUEST_SPEC['shares_size'] +
fake.SERVER_MIGRATION_REQUEST_SPEC['snapshots_size'])
self.library._check_capacity_compatibility.assert_called_once_with(
fake.POOLS,
self.library.configuration.max_over_subscription_ratio > 1,
total_size
)
self.assertIs(True, result)
self.assertEqual(migration_method, expected_migration_method)
mock_dest_is_svm_migrate_supported.assert_called_once()
if svm_migrate_supported:
mock_src_is_svm_migrate_supported.assert_called_once()
mock_find_matching_aggregates.assert_called_once()
mock_get_vserver_name.assert_called_once_with(
fake.SHARE_SERVER['id'])
mock_svm_migration_check_svm_mig.assert_called_once_with(
fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER,
fake.AGGREGATES, self.mock_dest_client)
else:
mock_svm_migration_check_svm_dr.assert_called_once_with(
self.mock_src_client, self.mock_dest_client,
fake.SHARE_REQ_SPEC, fake.POOLS)
def test_share_server_migration_check_compatibility_compatible(self):
compatible = {
@ -1958,7 +1981,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'migration_get_progress': False,
'share_network_id': fake.SHARE_NETWORK['id']
}
self._configure_mocks_share_server_migration_check_compatibility()
self._configure_mocks_share_server_migration_check_compatibility(
is_svm_dr=True)
result = self.library.share_server_migration_check_compatibility(
None, self.fake_src_share_server,
@ -1976,23 +2000,107 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
data_motion.get_client_for_backend.assert_called_once_with(
self.fake_dest_backend_name, vserver_name=None
)
self.assertTrue(self.mock_src_client.is_svm_dr_supported.called)
self.assertTrue(self.mock_dest_client.is_svm_dr_supported.called)
total_size = (fake.SERVER_MIGRATION_REQUEST_SPEC['shares_size'] +
fake.SERVER_MIGRATION_REQUEST_SPEC['snapshots_size'])
self.library._check_capacity_compatibility.assert_called_once_with(
fake.POOLS,
self.library.configuration.max_over_subscription_ratio > 1,
total_size
def test__get_job_uuid(self):
fake_uuid = 'fake_uuid'
fake_job = {
"job": {
"uuid": fake_uuid,
"description": "fake_description"
}
}
self.assertEqual(
self.library._get_job_uuid(fake_job),
fake_uuid
)
def test__wait_for_operation_status(self):
fake_job_id = 'fake_job_id'
returned_jobs = [
{'status': 'fake_status_1'},
{'status': 'fake_status_2'},
{'status': 'success'},
]
self.mock_object(self.mock_dest_client, 'get_job',
mock.Mock(side_effect=returned_jobs))
self.library._wait_for_operation_status(
fake_job_id, self.mock_dest_client.get_job
)
self.assertEqual(self.mock_dest_client.get_job.call_count, 3)
def test__wait_for_operation_status_error(self):
fake_job_id = 'fake_job_id'
returned_jobs = [
{'status': 'fake_status_1'},
{'status': 'fake_status_2'},
{'status': 'error', 'message': 'failure'},
]
self.mock_object(self.mock_dest_client, 'get_job',
mock.Mock(side_effect=returned_jobs))
self.assertRaises(
exception.NetAppException,
self.library._wait_for_operation_status,
fake_job_id,
self.mock_dest_client.get_job
)
@ddt.data(
{'src_supports_svm_migrate': True, 'dest_supports_svm_migrate': True},
{'src_supports_svm_migrate': True, 'dest_supports_svm_migrate': False},
{'src_supports_svm_migrate': False, 'dest_supports_svm_migrate': True},
{'src_supports_svm_migrate': False, 'dest_supports_svm_migrate': False}
)
@ddt.unpack
def test_share_server_migration_start(self, src_supports_svm_migrate,
dest_supports_svm_migrate):
fake_migration_data = {'fake_migration_key': 'fake_migration_value'}
self.mock_object(
self.library, '_get_vserver',
mock.Mock(
side_effect=[(self.fake_src_vserver, self.mock_src_client)]))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.mock_dest_client))
mock_start_using_svm_migrate = self.mock_object(
self.library, '_migration_start_using_svm_migrate',
mock.Mock(return_value=fake_migration_data))
mock_start_using_svm_dr = self.mock_object(
self.library, '_migration_start_using_svm_dr',
mock.Mock(return_value=fake_migration_data))
self.mock_src_client.is_svm_migrate_supported.return_value = (
src_supports_svm_migrate)
self.mock_dest_client.is_svm_migrate_supported.return_value = (
dest_supports_svm_migrate)
src_and_dest_support_svm_migrate = all(
[src_supports_svm_migrate, dest_supports_svm_migrate])
result = self.library.share_server_migration_start(
None, self.fake_src_share_server, self.fake_dest_share_server,
[fake.SHARE_INSTANCE], [], False)
self.library._get_vserver.assert_called_once_with(
share_server=self.fake_src_share_server,
backend_name=self.fake_src_backend_name)
if src_and_dest_support_svm_migrate:
mock_start_using_svm_migrate.assert_called_once_with(
None, self.fake_src_share_server, self.fake_dest_share_server,
self.mock_src_client, self.mock_dest_client, False)
else:
mock_start_using_svm_dr.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
self.assertEqual(result, fake_migration_data)
@ddt.data({'vserver_peered': True, 'src_cluster': fake.CLUSTER_NAME},
{'vserver_peered': False, 'src_cluster': fake.CLUSTER_NAME},
{'vserver_peered': False,
'src_cluster': fake.CLUSTER_NAME_2})
{'vserver_peered': False, 'src_cluster': fake.CLUSTER_NAME_2})
@ddt.unpack
def test_share_server_migration_start(self, vserver_peered,
src_cluster):
def test__migration_start_using_svm_dr(self, vserver_peered, src_cluster):
dest_cluster = fake.CLUSTER_NAME
dm_session_mock = mock.Mock()
self.mock_object(self.library, '_get_vserver',
@ -2009,9 +2117,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=dm_session_mock))
self.library.share_server_migration_start(
None, self.fake_src_share_server, self.fake_dest_share_server,
[fake.SHARE_INSTANCE], [])
self.library._migration_start_using_svm_dr(
self.fake_src_share_server, self.fake_dest_share_server)
self.library._get_vserver.assert_has_calls([
mock.call(share_server=self.fake_src_share_server,
@ -2061,10 +2168,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
side_effect=exception.NetAppException(message='fake')))
self.assertRaises(exception.NetAppException,
self.library.share_server_migration_start,
None, self.fake_src_share_server,
self.fake_dest_share_server,
[fake.SHARE_INSTANCE], [])
self.library._migration_start_using_svm_dr,
self.fake_src_share_server,
self.fake_dest_share_server)
self.library._get_vserver.assert_has_calls([
mock.call(share_server=self.fake_src_share_server,
@ -2085,6 +2191,149 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.fake_src_share_server, self.fake_dest_share_server
)
@ddt.data(
{'network_change_during_migration': True, 'source_cleanup': True},
{'network_change_during_migration': False, 'source_cleanup': False})
@ddt.unpack
def test__migration_start_using_svm_migrate(
self, network_change_during_migration, source_cleanup):
self.fake_src_share_server['share_network_subnet_id'] = 'fake_sns_id'
self.fake_dest_share_server['share_network_subnet_id'] = 'fake_sns_id'
node_name = fake.CLUSTER_NODES[0]
fake_migration_id = 'fake_migration_id'
fake_job_id = 'fake_job_id'
migration_job = {
'job': {'uuid': fake_job_id}
}
expected_server_info = {
'backend_details': {
'migration_operation_id': fake_migration_id
}
}
fake_job = {
"description": "POST /api/endpoint/%s" % fake_migration_id
}
server_to_get_network_info = (
self.fake_dest_share_server
if network_change_during_migration else self.fake_src_share_server)
if network_change_during_migration:
self.fake_dest_share_server['share_network_subnet_id'] = (
'different_sns_id')
segmentation_id = (
server_to_get_network_info['network_allocations'][0][
'segmentation_id'])
network_info = {
'network_allocations':
server_to_get_network_info['network_allocations'],
'neutron_subnet_id':
server_to_get_network_info['share_network_subnet'][
'neutron_subnet_id']
}
mock_list_cluster_nodes = self.mock_object(
self.library._client, 'list_cluster_nodes',
mock.Mock(return_value=fake.CLUSTER_NODES))
mock_get_data_port = self.mock_object(
self.library, '_get_node_data_port',
mock.Mock(return_value=fake.NODE_DATA_PORT))
mock_get_ipspace = self.mock_object(
self.library._client, 'get_ipspace_name_for_vlan_port',
mock.Mock(return_value=fake.IPSPACE))
mock_create_port = self.mock_object(
self.library, '_create_port_and_broadcast_domain')
mock_ensure_cluster_has_policy = self.mock_object(
self.library, '_ensure_cluster_has_migrate_policy')
mock_get_ipspaces = self.mock_object(
self.mock_dest_client, 'get_ipspaces',
mock.Mock(return_value=[{'uuid': fake.IPSPACE_ID}]))
mock_get_vserver_name = self.mock_object(
self.library, '_get_vserver_name',
mock.Mock(return_value=fake.VSERVER1))
mock_get_cluster_name = self.mock_object(
self.mock_src_client, 'get_cluster_name',
mock.Mock(return_value=fake.CLUSTER_NAME))
mock_get_aggregates = self.mock_object(
self.library, '_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
mock_svm_migration_start = self.mock_object(
self.mock_dest_client, 'svm_migration_start',
mock.Mock(return_value=migration_job))
mock_get_job = self.mock_object(
self.mock_dest_client, 'get_job', mock.Mock(return_value=fake_job))
server_info = self.library._migration_start_using_svm_migrate(
None, self.fake_src_share_server, self.fake_dest_share_server,
self.mock_src_client, self.mock_dest_client, source_cleanup)
self.assertTrue(mock_list_cluster_nodes.called)
mock_get_data_port.assert_called_once_with(node_name)
mock_get_ipspace.assert_called_once_with(
node_name, fake.NODE_DATA_PORT, segmentation_id)
mock_create_port.assert_called_once_with(
fake.IPSPACE, network_info)
mock_ensure_cluster_has_policy.assert_has_calls(
[mock.call(self.mock_src_client), mock.call(self.mock_dest_client)]
)
mock_get_ipspaces.assert_called_once_with(ipspace_name=fake.IPSPACE)
mock_get_vserver_name.assert_called_once_with(
self.fake_src_share_server['id'])
self.assertTrue(mock_get_cluster_name.called)
mock_svm_migration_start.assert_called_once_with(
fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES,
dest_ipspace=fake.IPSPACE_ID, auto_source_cleanup=source_cleanup)
self.assertTrue(mock_get_aggregates.called)
self.assertEqual(expected_server_info, server_info)
mock_get_job.assert_called_once_with(fake_job_id)
@ddt.data(
['migrate', 'async_mirror'], ['async_mirror'],)
def test__ensure_cluster_has_migrate_policy(self, policy_list):
fake_policy_name = 'cluster_policy_migrate'
mock_get_cluster_name = self.mock_object(
self.mock_src_client, 'get_cluster_name',
mock.Mock(return_value=fake.CLUSTER_NAME))
mock_get_snapmirror_policies = self.mock_object(
self.mock_src_client, 'get_snapmirror_policies',
mock.Mock(return_value=policy_list))
mock_get_policy_name = self.mock_object(
self.library, '_get_backend_snapmirror_policy_name_svm',
mock.Mock(return_value=fake_policy_name))
mock_create_policy = self.mock_object(
self.mock_src_client, 'create_snapmirror_policy')
self.library._ensure_cluster_has_migrate_policy(self.mock_src_client)
mock_get_cluster_name.assert_called_once()
mock_get_snapmirror_policies.assert_called_once_with(
fake.CLUSTER_NAME, desired_attribute='type')
if 'migrate' not in policy_list:
mock_get_policy_name.assert_called_once_with(fake.CLUSTER_NAME)
mock_create_policy.assert_called_once_with(
fake_policy_name, type='migrate', discard_network_info=False,
preserve_snapshots=False)
def test__get_client_for_svm_migrate(self):
mock_extract_host = self.mock_object(
share_utils, 'extract_host',
mock.Mock(return_value=fake.BACKEND_NAME))
mock_get_client = self.mock_object(
data_motion, 'get_client_for_backend',
mock.Mock(return_value=self.fake_src_share_server))
returned_client = self.library._get_client_for_svm_migrate(
self.fake_src_share_server)
mock_extract_host.assert_called_once_with(
self.fake_src_share_server['host'], level='backend_name')
mock_get_client.assert_called_once_with(fake.BACKEND_NAME)
self.assertEqual(returned_client, self.fake_src_share_server)
def test__get_snapmirror_svm(self):
dm_session_mock = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
@ -2158,56 +2407,31 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.fake_src_share_server, self.fake_dest_share_server
)
def test_share_server_migration_complete(self):
def test__migration_complete_svm_dr(self):
dm_session_mock = mock.Mock()
self.mock_object(self.library, '_get_vserver',
mock.Mock(return_value=(self.fake_dest_vserver,
self.mock_dest_client)))
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=dm_session_mock))
self.mock_object(self.library, '_get_vserver',
mock.Mock(side_effect=[
(self.fake_src_vserver, self.mock_src_client),
(self.fake_dest_vserver, self.mock_dest_client)]))
fake_ipspace = 'fake_ipspace'
self.mock_object(self.mock_dest_client, 'get_vserver_ipspace',
mock.Mock(return_value=fake_ipspace))
fake_share_name = self.library._get_backend_share_name(
fake.SHARE_INSTANCE['id'])
self.mock_object(self.library, '_setup_network_for_vserver')
fake_volume = copy.deepcopy(fake.CLIENT_GET_VOLUME_RESPONSE)
self.mock_object(self.mock_dest_client, 'get_volume',
mock.Mock(return_value=fake_volume))
self.mock_object(self.library, '_create_export',
mock.Mock(return_value=fake.NFS_EXPORTS))
self.mock_object(self.library, '_delete_share')
mock_update_share_attrs = self.mock_object(
self.library, '_update_share_attributes_after_server_migration')
result = self.library.share_server_migration_complete(
None,
self.fake_src_share_server,
self.fake_dest_share_server,
[fake.SHARE_INSTANCE], [],
fake.NETWORK_INFO
self.library._share_server_migration_complete_svm_dr(
self.fake_src_share_server, self.fake_dest_share_server,
self.fake_src_vserver, self.mock_src_client,
[fake.SHARE_INSTANCE], fake.NETWORK_INFO
)
expected_share_updates = {
fake.SHARE_INSTANCE['id']: {
'export_locations': fake.NFS_EXPORTS,
'pool_name': fake_volume['aggregate']
}
}
expected_result = {
'share_updates': expected_share_updates,
}
self.assertEqual(expected_result, result)
self.library._get_vserver.assert_called_once_with(
share_server=self.fake_dest_share_server,
backend_name=self.fake_dest_backend_name
)
dm_session_mock.update_snapmirror_svm.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
self.library._get_vserver.assert_has_calls([
mock.call(share_server=self.fake_src_share_server,
backend_name=self.fake_src_backend_name),
mock.call(share_server=self.fake_dest_share_server,
backend_name=self.fake_dest_backend_name)])
quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm
quiesce_break_mock.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
@ -2234,14 +2458,103 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
dm_session_mock.delete_snapmirror_svm.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
@ddt.data(
{'is_svm_dr': True, 'network_change': True},
{'is_svm_dr': False, 'network_change': True},
{'is_svm_dr': True, 'network_change': False},
{'is_svm_dr': False, 'network_change': False},
)
@ddt.unpack
def test_share_server_migration_complete(self, is_svm_dr, network_change):
self.mock_object(self.library, '_get_vserver',
mock.Mock(side_effect=[
(self.fake_src_vserver, self.mock_src_client),
(self.fake_dest_vserver, self.mock_dest_client)]))
mock_complete_svm_migrate = self.mock_object(
self.library, '_share_server_migration_complete_svm_migrate')
mock_complete_svm_dr = self.mock_object(
self.library, '_share_server_migration_complete_svm_dr')
fake_share_name = self.library._get_backend_share_name(
fake.SHARE_INSTANCE['id'])
fake_volume = copy.deepcopy(fake.CLIENT_GET_VOLUME_RESPONSE)
self.mock_object(self.mock_dest_client, 'get_volume',
mock.Mock(return_value=fake_volume))
self.mock_object(self.library, '_create_export',
mock.Mock(return_value=fake.NFS_EXPORTS))
self.mock_object(self.library, '_delete_share')
mock_update_share_attrs = self.mock_object(
self.library, '_update_share_attributes_after_server_migration')
sns_id = 'fake_sns_id'
new_sns_id = 'fake_sns_id_2'
self.fake_src_share_server['share_network_subnet_id'] = sns_id
self.fake_dest_share_server['share_network_subnet_id'] = (
sns_id if not network_change else new_sns_id)
share_instances = [fake.SHARE_INSTANCE]
migration_id = 'fake_migration_id'
if not is_svm_dr:
self.fake_dest_share_server['backend_details'][
'migration_operation_id'] = (
migration_id)
should_recreate_export = is_svm_dr or network_change
share_server_to_get_vserver_name = (
self.fake_dest_share_server
if is_svm_dr else self.fake_src_share_server)
result = self.library.share_server_migration_complete(
None,
self.fake_src_share_server,
self.fake_dest_share_server,
share_instances, [],
fake.NETWORK_INFO
)
expected_share_updates = {
fake.SHARE_INSTANCE['id']: {
'pool_name': fake_volume['aggregate']
}
}
if should_recreate_export:
expected_share_updates[fake.SHARE_INSTANCE['id']].update(
{'export_locations': fake.NFS_EXPORTS})
expected_result = {
'share_updates': expected_share_updates,
}
self.assertEqual(expected_result, result)
self.library._get_vserver.assert_has_calls([
mock.call(share_server=self.fake_src_share_server,
backend_name=self.fake_src_backend_name),
mock.call(share_server=share_server_to_get_vserver_name,
backend_name=self.fake_dest_backend_name)])
if is_svm_dr:
mock_complete_svm_dr.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server,
self.fake_src_vserver, self.mock_src_client,
share_instances, fake.NETWORK_INFO
)
self.library._delete_share.assert_called_once_with(
fake.SHARE_INSTANCE, self.fake_src_vserver,
self.mock_src_client, remove_export=True)
else:
mock_complete_svm_migrate.assert_called_once_with(
migration_id, self.fake_dest_share_server)
if should_recreate_export:
create_export_calls = [
mock.call(
instance, self.fake_dest_share_server,
self.fake_dest_vserver, self.mock_dest_client,
clear_current_export_policy=False,
ensure_share_already_exists=True)
for instance in share_instances
]
self.library._create_export.assert_has_calls(create_export_calls)
self.mock_dest_client.get_volume.assert_called_once_with(
fake_share_name)
mock_update_share_attrs.assert_called_once_with(
fake.SHARE_INSTANCE, self.mock_src_client,
fake_volume['aggregate'], self.mock_dest_client)
self.library._delete_share.assert_called_once_with(
fake.SHARE_INSTANCE, self.fake_src_vserver,
self.mock_src_client, remove_export=True)
def test_share_server_migration_complete_failure_breaking(self):
dm_session_mock = mock.Mock()
@ -2293,12 +2606,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(side_effect=[
(self.fake_src_vserver, self.mock_src_client),
(self.fake_dest_vserver, self.mock_dest_client)]))
fake_ipspace = 'fake_ipspace'
self.mock_object(self.mock_dest_client, 'get_vserver_ipspace',
mock.Mock(return_value=fake_ipspace))
self.mock_object(self.library,
'_share_server_migration_complete_svm_dr')
fake_share_name = self.library._get_backend_share_name(
fake.SHARE_INSTANCE['id'])
self.mock_object(self.library, '_setup_network_for_vserver')
self.mock_object(self.mock_dest_client, 'get_volume',
mock.Mock(side_effect=exception.NetAppException))
@ -2310,45 +2621,16 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
[fake.SHARE_INSTANCE], [],
fake.NETWORK_INFO)
dm_session_mock.update_snapmirror_svm.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
self.library._get_vserver.assert_has_calls([
mock.call(share_server=self.fake_src_share_server,
backend_name=self.fake_src_backend_name),
mock.call(share_server=self.fake_dest_share_server,
backend_name=self.fake_dest_backend_name)])
quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm
quiesce_break_mock.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
dm_session_mock.wait_for_vserver_state.assert_called_once_with(
self.fake_dest_vserver, self.mock_dest_client, subtype='default',
state='running', operational_state='stopped',
timeout=(self.library.configuration.
netapp_server_migration_state_change_timeout)
)
self.mock_src_client.stop_vserver.assert_called_once_with(
self.fake_src_vserver
)
self.mock_dest_client.get_vserver_ipspace.assert_called_once_with(
self.fake_dest_vserver
)
self.library._setup_network_for_vserver.assert_called_once_with(
self.fake_dest_vserver, self.mock_dest_client, fake.NETWORK_INFO,
fake_ipspace, enable_nfs=False, security_services=None
)
self.mock_dest_client.start_vserver.assert_called_once_with(
self.fake_dest_vserver
)
dm_session_mock.delete_snapmirror_svm.assert_called_once_with(
self.fake_src_share_server, self.fake_dest_share_server
)
self.mock_dest_client.get_volume.assert_called_once_with(
fake_share_name)
@ddt.data([], ['fake_snapmirror'])
def test_share_server_migration_cancel(self, snapmirrors):
def test_share_server_migration_cancel_svm_dr(self, snapmirrors):
dm_session_mock = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=dm_session_mock))
@ -2359,11 +2641,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=snapmirrors))
self.mock_object(self.library, '_delete_share')
self.library.share_server_migration_cancel(
None,
self.library._migration_cancel_using_svm_dr(
self.fake_src_share_server,
self.fake_dest_share_server,
[fake.SHARE_INSTANCE], []
[fake.SHARE_INSTANCE]
)
self.library._get_vserver.assert_called_once_with(
@ -2380,7 +2661,110 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.SHARE_INSTANCE, self.fake_dest_vserver, self.mock_dest_client,
remove_export=False)
def test_share_server_migration_cancel_snapmirror_failure(self):
@ddt.data(True, False)
def test__migration_cancel_using_svm_migrate(self, has_ipspace):
pause_job_uuid = 'fake_pause_job_id'
cancel_job_uuid = 'fake_cancel_job_id'
ipspace_name = 'fake_ipspace_name'
migration_id = 'fake_migration_id'
pause_job = {
'uuid': pause_job_uuid
}
cancel_job = {
'uuid': cancel_job_uuid
}
migration_information = {
"destination": {
"ipspace": {
"name": ipspace_name
}
}
}
if has_ipspace:
migration_information["destination"]["ipspace"]["name"] = (
ipspace_name)
self.mock_object(self.library, '_get_job_uuid',
mock.Mock(
side_effect=[pause_job_uuid, cancel_job_uuid]))
self.mock_object(self.library, '_get_client_for_svm_migrate',
mock.Mock(return_value=self.mock_dest_client))
self.mock_object(self.mock_dest_client, 'svm_migration_get',
mock.Mock(return_value=migration_information))
self.mock_object(self.mock_dest_client, 'svm_migrate_pause',
mock.Mock(return_value=pause_job))
self.mock_object(self.library, '_wait_for_operation_status')
self.mock_object(self.mock_dest_client, 'svm_migrate_cancel',
mock.Mock(return_value=cancel_job))
self.mock_object(self.mock_dest_client, 'ipspace_has_data_vservers',
mock.Mock(return_value=False))
self.mock_object(self.mock_dest_client, 'delete_ipspace')
self.library._migration_cancel_using_svm_migrate(
migration_id, self.fake_dest_share_server)
self.library._get_job_uuid.assert_has_calls(
[mock.call(pause_job), mock.call(cancel_job)]
)
self.library._get_client_for_svm_migrate.assert_called_once_with(
self.fake_dest_share_server)
self.mock_dest_client.svm_migration_get.assert_called_once_with(
migration_id)
self.mock_dest_client.svm_migrate_pause.assert_called_once_with(
migration_id)
self.library._wait_for_operation_status.assert_has_calls(
[mock.call(pause_job_uuid, self.mock_dest_client.get_job),
mock.call(migration_id, self.mock_dest_client.svm_migration_get,
desired_status="migrate_paused"),
mock.call(cancel_job_uuid, self.mock_dest_client.get_job)]
)
self.mock_dest_client.svm_migrate_cancel.assert_called_once_with(
migration_id)
if has_ipspace:
self.mock_dest_client.delete_ipspace.assert_called_once_with(
ipspace_name)
@ddt.data(
(mock.Mock(side_effect=exception.NetAppException()), mock.Mock()),
(mock.Mock(), mock.Mock(side_effect=exception.NetAppException()))
)
@ddt.unpack
def test__migration_cancel_using_svm_migrate_error(
self, mock_pause, mock_cancel):
pause_job_uuid = 'fake_pause_job_id'
cancel_job_uuid = 'fake_cancel_job_id'
migration_id = 'fake_migration_id'
migration_information = {
"destination": {
"ipspace": {
"name": "ipspace_name"
}
}
}
self.mock_object(self.library, '_get_job_uuid',
mock.Mock(
side_effect=[pause_job_uuid, cancel_job_uuid]))
self.mock_object(self.library, '_get_client_for_svm_migrate',
mock.Mock(return_value=self.mock_dest_client))
self.mock_object(self.mock_dest_client, 'svm_migration_get',
mock.Mock(return_value=migration_information))
self.mock_object(self.mock_dest_client, 'svm_migrate_pause',
mock_pause)
self.mock_object(self.library, '_wait_for_operation_status')
self.mock_object(self.mock_dest_client, 'svm_migrate_cancel',
mock_cancel)
self.assertRaises(
exception.NetAppException,
self.library._migration_cancel_using_svm_migrate,
migration_id,
self.fake_dest_share_server
)
def test_share_server_migration_cancel_svm_dr_snapmirror_failure(self):
dm_session_mock = mock.Mock()
self.mock_object(data_motion, "DataMotionSession",
mock.Mock(return_value=dm_session_mock))
@ -2393,11 +2777,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(side_effect=exception.NetAppException))
self.assertRaises(exception.NetAppException,
self.library.share_server_migration_cancel,
None,
self.library._migration_cancel_using_svm_dr,
self.fake_src_share_server,
self.fake_dest_share_server,
[fake.SHARE_INSTANCE], [])
[fake.SHARE_INSTANCE])
self.library._get_vserver.assert_called_once_with(
share_server=self.fake_dest_share_server,

View File

@ -520,6 +520,10 @@ SHARE_SERVER = {
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
'host': SERVER_HOST,
'share_network_subnet': {
'neutron_net_id': 'fake_neutron_net_id',
'neutron_subnet_id': 'fake_neutron_subnet_id'
}
}
SHARE_SERVER_2 = {
@ -531,6 +535,10 @@ SHARE_SERVER_2 = {
'network_allocations': (USER_NETWORK_ALLOCATIONS +
ADMIN_NETWORK_ALLOCATIONS),
'host': SERVER_HOST_2,
'share_network_subnet': {
'neutron_net_id': 'fake_neutron_net_id_2',
'neutron_subnet_id': 'fake_neutron_subnet_id_2'
}
}
VSERVER_INFO = {

View File

@ -0,0 +1,6 @@
---
features:
- |
The NetApp ONTAP share back end driver now supports a non-disruptive
migration of share servers. This new mechanism also supports automatic
cleanups and network reusage.