[NetApp] Fix CIFS promote back issue
This change fixes the NetApp promote back issue when using CIFS protocol. When promoting a replica, the NetApp ONTAP driver attempts to create a new CIFS share entity (an access point as defined in [1]) for the new active replica. This behavior causes a failure since the storage identifies that a current backend CIFS share with the same name exists, considering that the reffered replica was once the active one. This issue is addressed by removing the related CIFS share entity when the replica gets promoted. [1] https://library.netapp.com/ecmdocs/ECMP1401220/html/GUID-1898D717-A510-4B3D-B2E3-CCDDD5BD0089.html Closes-Bug: #1879368 Change-Id: Id9bdd5df0ff05ea08881dd2c83397f0a367d9945
This commit is contained in:
parent
82f01bd17f
commit
c3aed22f94
@ -1960,9 +1960,29 @@ class NetAppCmodeFileStorageLibrary(object):
|
|||||||
replica_list)
|
replica_list)
|
||||||
new_replica_list.append(r)
|
new_replica_list.append(r)
|
||||||
|
|
||||||
# Unmount the original active replica.
|
|
||||||
orig_active_vserver = dm_session.get_vserver_from_share(
|
orig_active_vserver = dm_session.get_vserver_from_share(
|
||||||
orig_active_replica)
|
orig_active_replica)
|
||||||
|
|
||||||
|
# Cleanup the original active share if necessary
|
||||||
|
orig_active_replica_backend = (
|
||||||
|
share_utils.extract_host(orig_active_replica['host'],
|
||||||
|
level='backend_name'))
|
||||||
|
orig_active_replica_name = self._get_backend_share_name(
|
||||||
|
orig_active_replica['id'])
|
||||||
|
orig_active_vserver_client = data_motion.get_client_for_backend(
|
||||||
|
orig_active_replica_backend, vserver_name=orig_active_vserver)
|
||||||
|
|
||||||
|
orig_active_replica_helper = self._get_helper(orig_active_replica)
|
||||||
|
orig_active_replica_helper.set_client(orig_active_vserver_client)
|
||||||
|
|
||||||
|
try:
|
||||||
|
orig_active_replica_helper.cleanup_demoted_replica(
|
||||||
|
orig_active_replica, orig_active_replica_name)
|
||||||
|
except exception.StorageCommunicationException:
|
||||||
|
LOG.exception("Could not cleanup the original active replica %s.",
|
||||||
|
orig_active_replica['id'])
|
||||||
|
|
||||||
|
# Unmount the original active replica.
|
||||||
self._unmount_orig_active_replica(orig_active_replica,
|
self._unmount_orig_active_replica(orig_active_replica,
|
||||||
orig_active_vserver)
|
orig_active_vserver)
|
||||||
|
|
||||||
|
@ -71,3 +71,7 @@ class NetAppBaseHelper(object):
|
|||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_share_name_for_share(self, share):
|
def get_share_name_for_share(self, share):
|
||||||
"""Returns the flexvol name that hosts a share."""
|
"""Returns the flexvol name that hosts a share."""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def cleanup_demoted_replica(self, share, share_name):
|
||||||
|
"""Do some cleanup regarding the former active replica"""
|
||||||
|
@ -163,3 +163,13 @@ class NetAppCmodeCIFSHelper(base.NetAppBaseHelper):
|
|||||||
return match.group('host_ip'), match.group('share_name')
|
return match.group('host_ip'), match.group('share_name')
|
||||||
else:
|
else:
|
||||||
return '', ''
|
return '', ''
|
||||||
|
|
||||||
|
@na_utils.trace
|
||||||
|
def cleanup_demoted_replica(self, share, share_name):
|
||||||
|
"""Cleans up some things regarding a demoted replica."""
|
||||||
|
# NOTE(carloss): This is necessary due to bug 1879368. If we do not
|
||||||
|
# remove this CIFS share, in case the demoted replica is promoted
|
||||||
|
# back, the promotion will fail due to a duplicated entry for the
|
||||||
|
# share, since a create share request is sent to the backend every
|
||||||
|
# time a promotion occurs.
|
||||||
|
self._client.remove_cifs_share(share_name)
|
||||||
|
@ -182,3 +182,7 @@ class NetAppCmodeNFSHelper(base.NetAppBaseHelper):
|
|||||||
else:
|
else:
|
||||||
self._client.rename_nfs_export_policy(actual_export_policy,
|
self._client.rename_nfs_export_policy(actual_export_policy,
|
||||||
expected_export_policy)
|
expected_export_policy)
|
||||||
|
|
||||||
|
@na_utils.trace
|
||||||
|
def cleanup_demoted_replica(self, share, share_name):
|
||||||
|
return
|
||||||
|
@ -3513,9 +3513,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
|||||||
'_get_vserver',
|
'_get_vserver',
|
||||||
mock.Mock(return_value=(fake.VSERVER1,
|
mock.Mock(return_value=(fake.VSERVER1,
|
||||||
mock.Mock())))
|
mock.Mock())))
|
||||||
|
protocol_helper = mock.Mock()
|
||||||
self.mock_object(self.library,
|
self.mock_object(self.library,
|
||||||
'_get_helper',
|
'_get_helper',
|
||||||
mock.Mock(return_value=mock.Mock()))
|
mock.Mock(return_value=protocol_helper))
|
||||||
self.mock_object(self.library, '_create_export',
|
self.mock_object(self.library, '_create_export',
|
||||||
mock.Mock(return_value='fake_export_location'))
|
mock.Mock(return_value='fake_export_location'))
|
||||||
self.mock_object(self.library, '_unmount_orig_active_replica')
|
self.mock_object(self.library, '_unmount_orig_active_replica')
|
||||||
@ -3526,6 +3527,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
|||||||
mock.Mock(return_value=mock_dm_session))
|
mock.Mock(return_value=mock_dm_session))
|
||||||
self.mock_object(mock_dm_session, 'get_vserver_from_share',
|
self.mock_object(mock_dm_session, 'get_vserver_from_share',
|
||||||
mock.Mock(return_value=fake.VSERVER1))
|
mock.Mock(return_value=fake.VSERVER1))
|
||||||
|
self.mock_object(self.client, 'cleanup_demoted_replica')
|
||||||
|
|
||||||
replicas = self.library.promote_replica(
|
replicas = self.library.promote_replica(
|
||||||
None, [self.fake_replica, self.fake_replica_2],
|
None, [self.fake_replica, self.fake_replica_2],
|
||||||
@ -3551,6 +3553,44 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
|||||||
self.library._unmount_orig_active_replica.assert_called_once_with(
|
self.library._unmount_orig_active_replica.assert_called_once_with(
|
||||||
self.fake_replica, fake.VSERVER1)
|
self.fake_replica, fake.VSERVER1)
|
||||||
self.library._handle_qos_on_replication_change.assert_called_once()
|
self.library._handle_qos_on_replication_change.assert_called_once()
|
||||||
|
protocol_helper.cleanup_demoted_replica.assert_called_once_with(
|
||||||
|
self.fake_replica, fake.SHARE['name'])
|
||||||
|
|
||||||
|
def test_promote_replica_cleanup_demoted_storage_error(self):
|
||||||
|
self.mock_object(self.library,
|
||||||
|
'_get_vserver',
|
||||||
|
mock.Mock(return_value=(fake.VSERVER1,
|
||||||
|
mock.Mock())))
|
||||||
|
protocol_helper = mock.Mock()
|
||||||
|
self.mock_object(self.library,
|
||||||
|
'_get_helper',
|
||||||
|
mock.Mock(return_value=protocol_helper))
|
||||||
|
self.mock_object(self.library, '_create_export',
|
||||||
|
mock.Mock(return_value='fake_export_location'))
|
||||||
|
self.mock_object(self.library, '_unmount_orig_active_replica')
|
||||||
|
self.mock_object(self.library, '_handle_qos_on_replication_change')
|
||||||
|
|
||||||
|
mock_dm_session = mock.Mock()
|
||||||
|
self.mock_object(data_motion, "DataMotionSession",
|
||||||
|
mock.Mock(return_value=mock_dm_session))
|
||||||
|
self.mock_object(mock_dm_session, 'get_vserver_from_share',
|
||||||
|
mock.Mock(return_value=fake.VSERVER1))
|
||||||
|
self.mock_object(
|
||||||
|
protocol_helper, 'cleanup_demoted_replica',
|
||||||
|
mock.Mock(side_effect=exception.StorageCommunicationException))
|
||||||
|
mock_log = self.mock_object(lib_base.LOG, 'exception')
|
||||||
|
|
||||||
|
self.library.promote_replica(
|
||||||
|
None, [self.fake_replica, self.fake_replica_2],
|
||||||
|
self.fake_replica_2, [], share_server=None)
|
||||||
|
|
||||||
|
mock_dm_session.change_snapmirror_source.assert_called_once_with(
|
||||||
|
self.fake_replica, self.fake_replica, self.fake_replica_2,
|
||||||
|
mock.ANY
|
||||||
|
)
|
||||||
|
protocol_helper.cleanup_demoted_replica.assert_called_once_with(
|
||||||
|
self.fake_replica, fake.SHARE['name'])
|
||||||
|
mock_log.assert_called_once()
|
||||||
|
|
||||||
def test_promote_replica_destination_unreachable(self):
|
def test_promote_replica_destination_unreachable(self):
|
||||||
self.mock_object(self.library,
|
self.mock_object(self.library,
|
||||||
|
@ -214,3 +214,9 @@ class NetAppClusteredCIFSHelperTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(ip, result_ip)
|
self.assertEqual(ip, result_ip)
|
||||||
self.assertEqual(share_name, result_share_name)
|
self.assertEqual(share_name, result_share_name)
|
||||||
|
|
||||||
|
def test_cleanup_demoted_replica(self):
|
||||||
|
self.helper.cleanup_demoted_replica(fake.CIFS_SHARE, fake.SHARE_NAME)
|
||||||
|
|
||||||
|
self.mock_client.remove_cifs_share.assert_called_once_with(
|
||||||
|
fake.SHARE_NAME)
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Fixed an issue while promoting back share replicas created using CIFS
|
||||||
|
protocol. Please refer to the
|
||||||
|
`Launchpad bug #1879368 <https://bugs.launchpad.net/manila/+bug/1879368>`_
|
||||||
|
for more details.
|
Loading…
Reference in New Issue
Block a user