[NetApp] Adds support for replication with DHSS=True

This patch adds support for replication with DHSS=True on
NetApp driver. It now handles peering operations between
share servers and shares.

Change-Id: I93888bcc6a0ca672671cf2aa254ceb23c4cbf692
This commit is contained in:
Lucio Seki 2019-07-05 15:27:00 -03:00 committed by Douglas Viroel
parent 7a3fb6b325
commit 15b085ede1
11 changed files with 513 additions and 47 deletions

View File

@ -3085,6 +3085,23 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
else:
raise
@na_utils.trace
def get_cluster_name(self):
"""Gets cluster name."""
api_args = {
'desired-attributes': {
'cluster-identity-info': {
'cluster-name': None,
}
}
}
result = self.send_request('cluster-identity-get', api_args,
enable_tunneling=False)
attributes = result.get_child_by_name('attributes')
cluster_identity = attributes.get_child_by_name(
'cluster-identity-info')
return cluster_identity.get_child_content('cluster-name')
@na_utils.trace
def create_cluster_peer(self, addresses, username=None, password=None,
passphrase=None):
@ -3102,7 +3119,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
if passphrase:
api_args['passphrase'] = passphrase
self.send_request('cluster-peer-create', api_args)
self.send_request('cluster-peer-create', api_args,
enable_tunneling=False)
@na_utils.trace
def get_cluster_peers(self, remote_cluster_name=None):
@ -3162,7 +3180,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
"""Deletes a cluster peer relationship."""
api_args = {'cluster-name': cluster_name}
self.send_request('cluster-peer-delete', api_args)
self.send_request('cluster-peer-delete', api_args,
enable_tunneling=False)
@na_utils.trace
def get_cluster_peer_policy(self):
@ -3221,7 +3240,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
self.send_request('cluster-peer-policy-modify', api_args)
@na_utils.trace
def create_vserver_peer(self, vserver_name, peer_vserver_name):
def create_vserver_peer(self, vserver_name, peer_vserver_name,
peer_cluster_name=None):
"""Creates a Vserver peer relationship for SnapMirrors."""
api_args = {
'vserver': vserver_name,
@ -3230,21 +3250,26 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
{'vserver-peer-application': 'snapmirror'},
],
}
self.send_request('vserver-peer-create', api_args)
if peer_cluster_name:
api_args['peer-cluster'] = peer_cluster_name
self.send_request('vserver-peer-create', api_args,
enable_tunneling=False)
@na_utils.trace
def delete_vserver_peer(self, vserver_name, peer_vserver_name):
"""Deletes a Vserver peer relationship."""
api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
self.send_request('vserver-peer-delete', api_args)
self.send_request('vserver-peer-delete', api_args,
enable_tunneling=False)
@na_utils.trace
def accept_vserver_peer(self, vserver_name, peer_vserver_name):
"""Accepts a pending Vserver peer relationship."""
api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
self.send_request('vserver-peer-accept', api_args)
self.send_request('vserver-peer-accept', api_args,
enable_tunneling=False)
@na_utils.trace
def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None):

View File

@ -353,8 +353,10 @@ class DataMotionSession(object):
1. Delete all snapmirrors involving the replica, but maintain
snapmirror metadata and snapshots for efficiency
2. Ensure a new source -> replica snapmirror exists
3. Resync new source -> replica snapmirror relationship
2. For DHSS=True scenarios, creates a new vserver peer relationship if
it does not exists
3. Ensure a new source -> replica snapmirror exists
4. Resync new source -> replica snapmirror relationship
"""
replica_volume_name, replica_vserver, replica_backend = (
@ -362,7 +364,7 @@ class DataMotionSession(object):
replica_client = get_client_for_backend(replica_backend,
vserver_name=replica_vserver)
new_src_volume_name, new_src_vserver, __ = (
new_src_volume_name, new_src_vserver, new_src_backend = (
self.get_backend_info_for_share(new_source_replica))
# 1. delete
@ -376,14 +378,31 @@ class DataMotionSession(object):
self.delete_snapmirror(other_replica, replica, release=False)
self.delete_snapmirror(replica, other_replica, release=False)
# 2. create
# 2. vserver operations when driver handles share servers
replica_config = get_backend_configuration(replica_backend)
if replica_config.driver_handles_share_servers:
# create vserver peering if does not exists
if not replica_client.get_vserver_peers(replica_vserver,
new_src_vserver):
new_src_client = get_client_for_backend(
new_src_backend, vserver_name=new_src_vserver)
# Cluster name is needed for setting up the vserver peering
new_src_cluster_name = new_src_client.get_cluster_name()
replica_client.create_vserver_peer(
replica_vserver, new_src_vserver,
peer_cluster_name=new_src_cluster_name)
new_src_client.accept_vserver_peer(new_src_vserver,
replica_vserver)
# 3. create
# TODO(ameade): Update the schedule if needed.
replica_client.create_snapmirror(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name,
schedule='hourly')
# 3. resync
# 4. resync
replica_client.resync_snapmirror(new_src_vserver,
new_src_volume_name,
replica_vserver,

View File

@ -133,42 +133,57 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
def _teardown_server(self, server_details, **kwargs):
self.library.teardown_server(server_details, **kwargs)
def create_replica(self, context, replica_list, replica, access_rules,
def create_replica(self, context, replica_list, new_replica, access_rules,
replica_snapshots, **kwargs):
raise NotImplementedError()
return self.library.create_replica(context, replica_list, new_replica,
access_rules, replica_snapshots)
def delete_replica(self, context, replica_list, replica_snapshots,
replica, **kwargs):
raise NotImplementedError()
self.library.delete_replica(context, replica_list, replica,
replica_snapshots)
def promote_replica(self, context, replica_list, replica, access_rules,
share_server=None):
raise NotImplementedError()
return self.library.promote_replica(context, replica_list, replica,
access_rules,
share_server=share_server)
def update_replica_state(self, context, replica_list, replica,
access_rules, replica_snapshots,
share_server=None):
raise NotImplementedError()
return self.library.update_replica_state(context, replica_list,
replica, access_rules,
replica_snapshots,
share_server)
def create_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
raise NotImplementedError()
return self.library.create_replicated_snapshot(
context, replica_list, replica_snapshots,
share_server=share_server)
def delete_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
raise NotImplementedError()
return self.library.delete_replicated_snapshot(
context, replica_list, replica_snapshots,
share_server=share_server)
def update_replicated_snapshot(self, context, replica_list,
share_replica, replica_snapshots,
replica_snapshot, share_server=None):
raise NotImplementedError()
return self.library.update_replicated_snapshot(
replica_list, share_replica, replica_snapshots, replica_snapshot,
share_server=share_server)
def revert_to_replicated_snapshot(self, context, active_replica,
replica_list, active_replica_snapshot,
replica_snapshots, share_access_rules,
snapshot_access_rules,
share_server=None):
raise NotImplementedError()
**kwargs):
return self.library.revert_to_replicated_snapshot(
context, active_replica, replica_list, active_replica_snapshot,
replica_snapshots, **kwargs)
def migration_check_compatibility(self, context, source_share,
destination_share, share_server=None,

View File

@ -294,8 +294,7 @@ class NetAppCmodeFileStorageLibrary(object):
},
}
if (self.configuration.replication_domain and
not self.configuration.driver_handles_share_servers):
if self.configuration.replication_domain:
data['replication_type'] = 'dr'
data['replication_domain'] = self.configuration.replication_domain
@ -1463,7 +1462,7 @@ class NetAppCmodeFileStorageLibrary(object):
'netapp_disk_type': disk_types,
})
def _find_active_replica(self, replica_list):
def find_active_replica(self, replica_list):
# NOTE(ameade): Find current active replica. There can only be one
# active replica (SnapMirror source volume) at a time in cDOT.
for r in replica_list:
@ -1478,7 +1477,7 @@ class NetAppCmodeFileStorageLibrary(object):
def create_replica(self, context, replica_list, new_replica,
access_rules, share_snapshots, share_server=None):
"""Creates the new replica on this backend and sets up SnapMirror."""
active_replica = self._find_active_replica(replica_list)
active_replica = self.find_active_replica(replica_list)
dm_session = data_motion.DataMotionSession()
# 1. Create the destination share
@ -1532,7 +1531,7 @@ class NetAppCmodeFileStorageLibrary(object):
def update_replica_state(self, context, replica_list, replica,
access_rules, share_snapshots, share_server=None):
"""Returns the status of the given replica on this backend."""
active_replica = self._find_active_replica(replica_list)
active_replica = self.find_active_replica(replica_list)
share_name = self._get_backend_share_name(replica['id'])
vserver, vserver_client = self._get_vserver(share_server=share_server)
@ -1624,7 +1623,7 @@ class NetAppCmodeFileStorageLibrary(object):
:param share_server: ShareServer class instance of replica
:return: Updated replica_list
"""
orig_active_replica = self._find_active_replica(replica_list)
orig_active_replica = self.find_active_replica(replica_list)
dm_session = data_motion.DataMotionSession()
@ -1640,7 +1639,7 @@ class NetAppCmodeFileStorageLibrary(object):
LOG.exception("Could not communicate with the backend "
"for replica %s during promotion.",
replica['id'])
new_active_replica = copy.deepcopy(replica)
new_active_replica = replica.copy()
new_active_replica['replica_state'] = (
constants.STATUS_ERROR)
new_active_replica['status'] = constants.STATUS_ERROR
@ -1760,7 +1759,7 @@ class NetAppCmodeFileStorageLibrary(object):
dm_session.break_snapmirror(orig_active_replica, replica)
# 3. Setup access rules
new_active_replica = copy.deepcopy(replica)
new_active_replica = replica.copy()
helper = self._get_helper(replica)
helper.set_client(vserver_client)
try:
@ -1817,7 +1816,7 @@ class NetAppCmodeFileStorageLibrary(object):
def create_replicated_snapshot(self, context, replica_list,
snapshot_instances, share_server=None):
active_replica = self._find_active_replica(replica_list)
active_replica = self.find_active_replica(replica_list)
active_snapshot = [x for x in snapshot_instances
if x['share_id'] == active_replica['id']][0]
snapshot_name = self._get_backend_snapshot_name(active_snapshot['id'])
@ -1849,7 +1848,7 @@ class NetAppCmodeFileStorageLibrary(object):
def delete_replicated_snapshot(self, context, replica_list,
snapshot_instances, share_server=None):
active_replica = self._find_active_replica(replica_list)
active_replica = self.find_active_replica(replica_list)
active_snapshot = [x for x in snapshot_instances
if x['share_id'] == active_replica['id']][0]
@ -1878,7 +1877,7 @@ class NetAppCmodeFileStorageLibrary(object):
def update_replicated_snapshot(self, replica_list, share_replica,
snapshot_instances, snapshot_instance,
share_server=None):
active_replica = self._find_active_replica(replica_list)
active_replica = self.find_active_replica(replica_list)
vserver, vserver_client = self._get_vserver(share_server=share_server)
share_name = self._get_backend_share_name(
snapshot_instance['share_id'])

View File

@ -29,11 +29,12 @@ from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils
from manila.share import utils as share_utils
from manila import utils
LOG = log.getLogger(__name__)
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
SEGMENTED_NETWORK_TYPES = ('vlan',)
@ -383,6 +384,10 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
vlan_id = None
def _delete_vserver_without_lock():
# NOTE(dviroel): Attempt to delete all vserver peering
# created by replication
self._delete_vserver_peers(vserver)
self._client.delete_vserver(vserver,
vserver_client,
security_services=security_services)
@ -414,6 +419,13 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
except exception.NetAppException:
LOG.exception("Deleting Vserver VLAN failed.")
@na_utils.trace
def _delete_vserver_peers(self, vserver):
vserver_peers = self._get_vserver_peers(vserver=vserver)
for peer in vserver_peers:
self._delete_vserver_peer(peer.get('vserver'),
peer.get('peer-vserver'))
def get_configured_ip_versions(self):
versions = [4]
options = self._client.get_net_options()
@ -421,6 +433,70 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
versions.append(6)
return versions
@na_utils.trace
def create_replica(self, context, replica_list, new_replica,
access_rules, share_snapshots, share_server=None):
"""Creates the new replica on this backend and sets up SnapMirror.
It creates the peering between the associated vservers before creating
the share replica and setting up the SnapMirror.
"""
# 1. Retrieve source and destination vservers from both replicas,
# active and and new_replica
src_vserver, dst_vserver = self._get_vservers_from_replicas(
context, replica_list, new_replica)
# 2. Retrieve the active replica host's client and cluster name
src_replica = self.find_active_replica(replica_list)
src_replica_host = share_utils.extract_host(
src_replica['host'], level='backend_name')
src_replica_client = data_motion.get_client_for_backend(
src_replica_host, vserver_name=src_vserver)
# Cluster name is needed for setting up the vserver peering
src_replica_cluster_name = src_replica_client.get_cluster_name()
# 3. Retrieve new replica host's client
new_replica_host = share_utils.extract_host(
new_replica['host'], level='backend_name')
new_replica_client = data_motion.get_client_for_backend(
new_replica_host, vserver_name=dst_vserver)
if not self._get_vserver_peers(dst_vserver, src_vserver):
# 3.1. Request vserver peer creation from new_replica's host
# to active replica's host
new_replica_client.create_vserver_peer(
dst_vserver, src_vserver,
peer_cluster_name=src_replica_cluster_name)
# 3.2. Accepts the vserver peering using active replica host's
# client
src_replica_client.accept_vserver_peer(src_vserver, dst_vserver)
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self).
create_replica(context, replica_list, new_replica,
access_rules, share_snapshots))
def delete_replica(self, context, replica_list, replica, share_snapshots,
share_server=None):
"""Removes the replica on this backend and destroys SnapMirror.
Removes the replica, destroys the SnapMirror and delete the vserver
peering if needed.
"""
vserver, peer_vserver = self._get_vservers_from_replicas(
context, replica_list, replica)
super(NetAppCmodeMultiSVMFileStorageLibrary, self).delete_replica(
context, replica_list, replica, share_snapshots)
# Check if there are no remaining SnapMirror connections and if a
# vserver peering exists and delete it.
snapmirrors = self._get_snapmirrors(vserver, peer_vserver)
snapmirrors_from_peer = self._get_snapmirrors(peer_vserver, vserver)
peers = self._get_vserver_peers(peer_vserver, vserver)
if not (snapmirrors or snapmirrors_from_peer) and peers:
self._delete_vserver_peer(peer_vserver, vserver)
def manage_server(self, context, share_server, identifier, driver_options):
"""Manages a vserver by renaming it and returning backend_details."""
new_vserver_name = self._get_vserver_name(share_server['id'])
@ -454,3 +530,26 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
if not self._client.vserver_exists(identifier):
return self._get_vserver_name(identifier)
return identifier
def _get_snapmirrors(self, vserver, peer_vserver):
return self._client.get_snapmirrors(
source_vserver=vserver, source_volume=None,
destination_vserver=peer_vserver, destination_volume=None)
def _get_vservers_from_replicas(self, context, replica_list, new_replica):
active_replica = self.find_active_replica(replica_list)
dm_session = data_motion.DataMotionSession()
vserver = dm_session.get_vserver_from_share(active_replica)
peer_vserver = dm_session.get_vserver_from_share(new_replica)
return vserver, peer_vserver
def _get_vserver_peers(self, vserver=None, peer_vserver=None):
return self._client.get_vserver_peers(vserver, peer_vserver)
def _create_vserver_peer(self, context, vserver, peer_vserver):
self._client.create_vserver_peer(vserver, peer_vserver)
def _delete_vserver_peer(self, vserver, peer_vserver):
self._client.delete_vserver_peer(vserver, peer_vserver)

View File

@ -39,6 +39,7 @@ NODE_NAME = 'fake_node1'
NODE_NAMES = ('fake_node1', 'fake_node2')
VSERVER_NAME = 'fake_vserver'
VSERVER_NAME_2 = 'fake_vserver_2'
VSERVER_PEER_NAME = 'fake_vserver_peer'
ADMIN_VSERVER_NAME = 'fake_admin_vserver'
NODE_VSERVER_NAME = 'fake_node_vserver'
NFS_VERSIONS = ['nfs3', 'nfs4.0']
@ -2249,6 +2250,23 @@ CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML("""
</results>
""")
CLUSTER_GET_CLUSTER_NAME = etree.XML("""
<results status="passed">
<attributes>
<cluster-identity-info>
<cluster-contact />
<cluster-location>-</cluster-location>
<cluster-name>%(cluster_name)s</cluster-name>
<cluster-serial-number>1-80-000000</cluster-serial-number>
<cluster-uuid>fake_uuid</cluster-uuid>
<rdb-uuid>fake_rdb</rdb-uuid>
</cluster-identity-info>
</attributes>
</results>
""" % {
'cluster_name': CLUSTER_NAME,
})
VSERVER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>

View File

@ -5451,7 +5451,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
'passphrase': 'fake_passphrase',
}
self.client.send_request.assert_has_calls([
mock.call('cluster-peer-create', cluster_peer_create_args)])
mock.call('cluster-peer-create', cluster_peer_create_args,
enable_tunneling=False)])
def test_get_cluster_peers(self):
@ -5524,7 +5525,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
cluster_peer_delete_args = {'cluster-name': fake.CLUSTER_NAME}
self.client.send_request.assert_has_calls([
mock.call('cluster-peer-delete', cluster_peer_delete_args)])
mock.call('cluster-peer-delete', cluster_peer_delete_args,
enable_tunneling=False)])
def test_get_cluster_peer_policy(self):
@ -5585,21 +5587,28 @@ class NetAppClientCmodeTestCase(test.TestCase):
mock.call('cluster-peer-policy-modify',
cluster_peer_policy_modify_args)])
def test_create_vserver_peer(self):
@ddt.data(None, 'cluster_name')
def test_create_vserver_peer(self, cluster_name):
self.mock_object(self.client, 'send_request')
self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer')
self.client.create_vserver_peer(fake.VSERVER_NAME,
fake.VSERVER_PEER_NAME,
peer_cluster_name=cluster_name)
vserver_peer_create_args = {
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_peer',
'vserver': fake.VSERVER_NAME,
'peer-vserver': fake.VSERVER_PEER_NAME,
'applications': [
{'vserver-peer-application': 'snapmirror'},
],
}
if cluster_name:
vserver_peer_create_args['peer-cluster'] = cluster_name
self.client.send_request.assert_has_calls([
mock.call('vserver-peer-create', vserver_peer_create_args)])
mock.call('vserver-peer-create', vserver_peer_create_args,
enable_tunneling=False)])
def test_delete_vserver_peer(self):
@ -5612,7 +5621,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
'peer-vserver': 'fake_vserver_peer',
}
self.client.send_request.assert_has_calls([
mock.call('vserver-peer-delete', vserver_peer_delete_args)])
mock.call('vserver-peer-delete', vserver_peer_delete_args,
enable_tunneling=False)])
def test_accept_vserver_peer(self):
@ -5625,7 +5635,8 @@ class NetAppClientCmodeTestCase(test.TestCase):
'peer-vserver': 'fake_vserver_peer',
}
self.client.send_request.assert_has_calls([
mock.call('vserver-peer-accept', vserver_peer_accept_args)])
mock.call('vserver-peer-accept', vserver_peer_accept_args,
enable_tunneling=False)])
def test_get_vserver_peers(self):
@ -6637,3 +6648,22 @@ class NetAppClientCmodeTestCase(test.TestCase):
'qos-policy-group-delete-iter',
qos_policy_group_delete_iter_args, False)
self.assertIs(failed, client_cmode.LOG.debug.called)
def test_get_cluster_name(self):
api_response = netapp_api.NaElement(
fake.CLUSTER_GET_CLUSTER_NAME)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
api_args = {
'desired-attributes': {
'cluster-identity-info': {
'cluster-name': None,
}
}
}
result = self.client.get_cluster_name()
self.assertEqual(fake.CLUSTER_NAME, result)
self.client.send_request.assert_called_once_with(
'cluster-identity-get', api_args, enable_tunneling=False)

View File

@ -476,6 +476,56 @@ class NetAppCDOTDataMotionSessionTestCase(test.TestCase):
self.fake_dest_vol_name
)
def test_change_snapmirror_source_dhss_true(self):
fake_new_src_share = copy.deepcopy(self.fake_src_share)
fake_new_src_share['id'] = 'd02d497a-236c-4852-812a-0d39373e312a'
fake_new_src_share_name = 'share_d02d497a_236c_4852_812a_0d39373e312a'
fake_new_src_share_server = fake_new_src_share['share_server']
fake_new_src_ss_name = (
fake_new_src_share_server['backend_details']['vserver_name'])
self.mock_object(self.dm_session, 'delete_snapmirror')
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[self.mock_dest_client,
self.mock_src_client]))
mock_backend_config = na_fakes.create_configuration()
mock_backend_config.driver_handles_share_servers = True
self.mock_object(data_motion, 'get_backend_configuration',
mock.Mock(return_value=mock_backend_config))
self.mock_object(self.mock_dest_client, 'get_vserver_peers',
mock.Mock(return_value=[]))
peer_cluster_name = 'new_src_cluster_name'
self.mock_object(self.mock_src_client, 'get_cluster_name',
mock.Mock(return_value=peer_cluster_name))
self.dm_session.change_snapmirror_source(
self.fake_dest_share, self.fake_src_share, fake_new_src_share,
[self.fake_dest_share, self.fake_src_share, fake_new_src_share])
self.assertEqual(4, self.dm_session.delete_snapmirror.call_count)
self.mock_dest_client.get_vserver_peers.assert_called_once_with(
self.dest_vserver, fake_new_src_ss_name
)
self.assertTrue(self.mock_src_client.get_cluster_name.called)
self.mock_dest_client.create_vserver_peer.assert_called_once_with(
self.dest_vserver, fake_new_src_ss_name,
peer_cluster_name=peer_cluster_name
)
self.mock_src_client.accept_vserver_peer.assert_called_once_with(
fake_new_src_ss_name, self.dest_vserver
)
self.dm_session.delete_snapmirror.assert_called_with(
mock.ANY, mock.ANY, release=False
)
self.mock_dest_client.create_snapmirror.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name, schedule='hourly'
)
self.mock_dest_client.resync_snapmirror.assert_called_once_with(
mock.ANY, fake_new_src_share_name, mock.ANY,
self.fake_dest_vol_name
)
def test_get_snapmirrors(self):
self.mock_object(self.mock_dest_client, 'get_snapmirrors')

View File

@ -22,12 +22,15 @@ import mock
from oslo_log import log
from oslo_serialization import jsonutils
from manila.common import constants
from manila import context
from manila import exception
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm
from manila.share.drivers.netapp import utils as na_utils
from manila.share import utils as share_utils
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as c_fake
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
@ -61,6 +64,26 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._client = mock.Mock()
self.library._client.get_ontapi_version.return_value = (1, 21)
self.client = self.library._client
self.fake_new_replica = copy.deepcopy(fake.SHARE)
self.fake_new_ss = copy.deepcopy(fake.SHARE_SERVER)
self.fake_new_vserver_name = 'fake_new_vserver'
self.fake_new_ss['backend_details']['vserver_name'] = (
self.fake_new_vserver_name
)
self.fake_new_replica['share_server'] = self.fake_new_ss
self.fake_new_replica_host = 'fake_new_host'
self.fake_replica = copy.deepcopy(fake.SHARE)
self.fake_replica['id'] = fake.SHARE_ID2
fake_ss = copy.deepcopy(fake.SHARE_SERVER)
self.fake_vserver = 'fake_vserver'
fake_ss['backend_details']['vserver_name'] = (
self.fake_vserver
)
self.fake_replica['share_server'] = fake_ss
self.fake_replica_host = 'fake_host'
self.fake_new_client = mock.Mock()
self.fake_client = mock.Mock()
def test_check_for_setup_error_cluster_creds_no_vserver(self):
self.library._have_cluster_creds = True
@ -793,7 +816,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.assertFalse(mock_delete_vserver.called)
self.assertTrue(lib_multi_svm.LOG.warning.called)
def test_delete_vserver_no_ipspace(self):
@ddt.data(True, False)
def test_delete_vserver_no_ipspace(self, lock):
self.mock_object(self.library._client,
'get_vserver_ipspace',
@ -812,19 +836,24 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
'get_network_interfaces',
mock.Mock(return_value=net_interfaces))
security_services = fake.NETWORK_INFO['security_services']
self.mock_object(self.library, '_delete_vserver_peers')
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
security_services=security_services,
needs_lock=lock)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._delete_vserver_peers.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
fake.VSERVER1, vserver_client, security_services=security_services)
self.assertFalse(self.library._client.delete_ipspace.called)
mock_delete_vserver_vlans.assert_called_once_with(
net_interfaces_with_vlans)
def test_delete_vserver_ipspace_has_data_vservers(self):
@ddt.data(True, False)
def test_delete_vserver_ipspace_has_data_vservers(self, lock):
self.mock_object(self.library._client,
'get_vserver_ipspace',
@ -838,18 +867,22 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=True))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
self.mock_object(self.library, '_delete_vserver_peers')
self.mock_object(
vserver_client, 'get_network_interfaces',
mock.Mock(return_value=c_fake.NETWORK_INTERFACES_MULTIPLE))
security_services = fake.NETWORK_INFO['security_services']
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
security_services=security_services,
needs_lock=lock)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
fake.VSERVER1, vserver_client, security_services=security_services)
self.library._delete_vserver_peers.assert_called_once_with(
fake.VSERVER1)
self.assertFalse(self.library._client.delete_ipspace.called)
mock_delete_vserver_vlans.assert_called_once_with(
[c_fake.NETWORK_INTERFACES_MULTIPLE[0]])
@ -869,6 +902,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
mock.Mock(return_value=False))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
self.mock_object(self.library, '_delete_vserver_peers')
self.mock_object(vserver_client,
'get_network_interfaces',
mock.Mock(return_value=interfaces))
@ -877,7 +911,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
self.library._delete_vserver_peers.assert_called_once_with(
fake.VSERVER1
)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
@ -886,6 +922,23 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
fake.IPSPACE)
mock_delete_vserver_vlans.assert_called_once_with(interfaces)
def test__delete_vserver_peers(self):
self.mock_object(self.library,
'_get_vserver_peers',
mock.Mock(return_value=fake.VSERVER_PEER))
self.mock_object(self.library, '_delete_vserver_peer')
self.library._delete_vserver_peers(fake.VSERVER1)
self.library._get_vserver_peers.assert_called_once_with(
vserver=fake.VSERVER1
)
self.library._delete_vserver_peer.asser_called_once_with(
fake.VSERVER_PEER[0]['vserver'],
fake.VSERVER_PEER[0]['peer-vserver']
)
def test_delete_vserver_vlans(self):
self.library._delete_vserver_vlans(c_fake.NETWORK_INTERFACES)
@ -912,3 +965,146 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
self.library._client.delete_vlan.assert_called_once_with(
node, port, vlan)
self.assertEqual(1, mock_exception_log.call_count)
@ddt.data([], [{'vserver': c_fake.VSERVER_NAME,
'peer-vserver': c_fake.VSERVER_PEER_NAME,
'applications': [
{'vserver-peer-application': 'snapmirror'}]
}])
def test_create_replica(self, vserver_peers):
fake_cluster_name = 'fake_cluster'
self.mock_object(self.library, '_get_vservers_from_replicas',
mock.Mock(return_value=(self.fake_vserver,
self.fake_new_vserver_name)))
self.mock_object(self.library, 'find_active_replica',
mock.Mock(return_value=self.fake_replica))
self.mock_object(share_utils, 'extract_host',
mock.Mock(side_effect=[self.fake_new_replica_host,
self.fake_replica_host]))
self.mock_object(data_motion, 'get_client_for_backend',
mock.Mock(side_effect=[self.fake_new_client,
self.fake_client]))
self.mock_object(self.library, '_get_vserver_peers',
mock.Mock(return_value=vserver_peers))
self.mock_object(self.fake_new_client, 'get_cluster_name',
mock.Mock(return_value=fake_cluster_name))
self.mock_object(self.fake_client, 'create_vserver_peer')
self.mock_object(self.fake_new_client, 'accept_vserver_peer')
lib_base_model_update = {
'export_locations': [],
'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
'access_rules_status': constants.STATUS_ACTIVE,
}
self.mock_object(lib_base.NetAppCmodeFileStorageLibrary,
'create_replica',
mock.Mock(return_value=lib_base_model_update))
model_update = self.library.create_replica(
None, [self.fake_replica], self.fake_new_replica, [], [],
share_server=None)
self.assertDictMatch(lib_base_model_update, model_update)
self.library._get_vservers_from_replicas.assert_called_once_with(
None, [self.fake_replica], self.fake_new_replica
)
self.library.find_active_replica.assert_called_once_with(
[self.fake_replica]
)
self.assertEqual(2, share_utils.extract_host.call_count)
self.assertEqual(2, data_motion.get_client_for_backend.call_count)
self.library._get_vserver_peers.assert_called_once_with(
self.fake_new_vserver_name, self.fake_vserver
)
self.fake_new_client.get_cluster_name.assert_called_once_with()
if not vserver_peers:
self.fake_client.create_vserver_peer.assert_called_once_with(
self.fake_new_vserver_name, self.fake_vserver,
peer_cluster_name=fake_cluster_name
)
self.fake_new_client.accept_vserver_peer.assert_called_once_with(
self.fake_vserver, self.fake_new_vserver_name
)
base_class = lib_base.NetAppCmodeFileStorageLibrary
base_class.create_replica.assert_called_once_with(
None, [self.fake_replica], self.fake_new_replica, [], []
)
def test_delete_replica(self):
base_class = lib_base.NetAppCmodeFileStorageLibrary
vserver_peers = copy.deepcopy(fake.VSERVER_PEER)
vserver_peers[0]['vserver'] = self.fake_vserver
vserver_peers[0]['peer-vserver'] = self.fake_new_vserver_name
self.mock_object(self.library, '_get_vservers_from_replicas',
mock.Mock(return_value=(self.fake_vserver,
self.fake_new_vserver_name)))
self.mock_object(base_class, 'delete_replica')
self.mock_object(self.library, '_get_snapmirrors',
mock.Mock(return_value=[]))
self.mock_object(self.library, '_get_vserver_peers',
mock.Mock(return_value=vserver_peers))
self.mock_object(self.library, '_delete_vserver_peer')
self.library.delete_replica(None, [self.fake_replica],
self.fake_new_replica, [],
share_server=None)
self.library._get_vservers_from_replicas.assert_called_once_with(
None, [self.fake_replica], self.fake_new_replica
)
base_class.delete_replica.assert_called_once_with(
None, [self.fake_replica], self.fake_new_replica, []
)
self.library._get_snapmirrors.assert_has_calls(
[mock.call(self.fake_vserver, self.fake_new_vserver_name),
mock.call(self.fake_new_vserver_name, self.fake_vserver)]
)
self.library._get_vserver_peers.assert_called_once_with(
self.fake_new_vserver_name, self.fake_vserver
)
self.library._delete_vserver_peer.assert_called_once_with(
self.fake_new_vserver_name, self.fake_vserver
)
def test_get_vservers_from_replicas(self):
self.mock_object(self.library, 'find_active_replica',
mock.Mock(return_value=self.fake_replica))
vserver, peer_vserver = self.library._get_vservers_from_replicas(
None, [self.fake_replica], self.fake_new_replica)
self.library.find_active_replica.assert_called_once_with(
[self.fake_replica]
)
self.assertEqual(self.fake_vserver, vserver)
self.assertEqual(self.fake_new_vserver_name, peer_vserver)
def test_get_vserver_peers(self):
self.mock_object(self.library._client, 'get_vserver_peers')
self.library._get_vserver_peers(
vserver=self.fake_vserver, peer_vserver=self.fake_new_vserver_name)
self.library._client.get_vserver_peers.assert_called_once_with(
self.fake_vserver, self.fake_new_vserver_name
)
def test_create_vserver_peer(self):
self.mock_object(self.library._client, 'create_vserver_peer')
self.library._create_vserver_peer(
None, vserver=self.fake_vserver,
peer_vserver=self.fake_new_vserver_name)
self.library._client.create_vserver_peer.assert_called_once_with(
self.fake_vserver, self.fake_new_vserver_name
)
def test_delete_vserver_peer(self):
self.mock_object(self.library._client, 'delete_vserver_peer')
self.library._delete_vserver_peer(
vserver=self.fake_vserver, peer_vserver=self.fake_new_vserver_name)
self.library._client.delete_vserver_peer.assert_called_once_with(
self.fake_vserver, self.fake_new_vserver_name
)

View File

@ -365,6 +365,13 @@ SHARE_SERVER = {
ADMIN_NETWORK_ALLOCATIONS),
}
VSERVER_PEER = [{
'vserver': VSERVER1,
'peer-vserver': VSERVER2,
'peer-state': 'peered',
'peer-cluster': 'fake_cluster'
}]
SNAPSHOT = {
'id': SNAPSHOT_ID,
'project_id': TENANT_ID,

View File

@ -0,0 +1,8 @@
---
features:
- |
The NetApp driver now supports replication with
``driver_handles_share_servers`` set to True, in addition to the mode where
the driver does not handle the creation and management of share servers.
For replication to work across ONTAP clusters, clusters must be peered in
advance.