Merge "NetApp - Fixed detach issue for multi-attached volume"

This commit is contained in:
Zuul
2025-08-08 16:03:06 +00:00
committed by Gerrit Code Review
8 changed files with 235 additions and 13 deletions

View File

@@ -768,6 +768,21 @@ test_volume.qos = {'qos_policy_group': None}
test_volume.host = 'fakehost@backbackend#fakepool'
test_volume.name = 'fakename'
test_volume.size = SIZE
test_volume.multiattach = False
class test_namespace_volume(object):
def __getitem__(self, key):
return getattr(self, key)
test_namespace_volume = test_namespace_volume()
test_namespace_volume.name = NAMESPACE_NAME
test_namespace_volume.size = SIZE
test_namespace_volume.id = VOLUME_ID
test_namespace_volume.host = HOST_STRING
test_namespace_volume.attach_status = DETACHED
class test_snapshot(object):

View File

@@ -19,7 +19,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Mock unit tests for the NetApp block storage library"""
from concurrent.futures import ThreadPoolExecutor
import copy
import itertools
from unittest import mock
@@ -544,8 +544,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
volume = copy.deepcopy(fake.test_volume)
target_info = self.library.terminate_connection_fc(volume,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY)
@@ -570,12 +570,91 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_has_luns_mapped_to_initiators.return_value = False
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
fake.FC_I_T_MAP, 4)
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
volume = copy.deepcopy(fake.test_volume)
target_info = self.library.terminate_connection_fc(volume,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc_multiattach(
self,
mock_get_lun_attr,
mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
volume = copy.deepcopy(fake.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME},
{'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME},
]
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR)
mock_unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc_last_attachment(
self,
mock_get_lun_attr,
mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
volume = copy.deepcopy(fake.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME},
]
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR)
mock_unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr')
def test_terminate_connection_fc_multiattach_cleanup(
self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
volume = copy.deepcopy(fake.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME},
{'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME},
]
connector = fake.FC_CONNECTOR
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
def terminate_connection(*args, **kwargs):
self.library.terminate_connection_fc(volume, connector)
# Run the termination operation in parallel using ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=2) as executor:
list(executor.map(terminate_connection, range(2)))
# Ensure that the LUN maps are cleaned up correctly for both
# parallel operations
self.assertEqual(mock_unmap_lun.call_count, 2)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_no_lookup_service(

View File

@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Mock unit tests for the NetApp block storage library"""
from concurrent.futures import ThreadPoolExecutor
import copy
from unittest import mock
from unittest.mock import patch
import uuid
import ddt
@@ -921,10 +922,45 @@ class NetAppNVMeStorageLibraryTestCase(test.TestCase):
self.mock_object(self.library, '_get_namespace_attr',
return_value=fake.NAMESPACE_METADATA)
self.mock_object(self.library, '_unmap_namespace')
self.library.terminate_connection(fake.NAMESPACE_VOLUME, connector)
self.mock_object(na_utils, 'is_multiattach_to_host',
return_value=False)
namespace_volume = copy.deepcopy(fake.test_namespace_volume)
self.library.terminate_connection(namespace_volume, connector)
self.library._get_namespace_attr.assert_called_once_with(
fake.NAMESPACE_NAME, 'metadata')
namespace_volume.name, 'metadata')
host = connector['nqn'] if connector else None
self.library._unmap_namespace(fake.PATH_NAMESPACE, host)
if connector:
na_utils.is_multiattach_to_host.assert_called_once_with(
namespace_volume, connector)
@mock.patch.object(na_utils, 'is_multiattach_to_host',
return_value=False)
def test_terminate_connection_parallel(self,
mock_is_multiattach_to_host):
def execute_terminate_connection(connector):
mock_log = patch('self.library.LOG').start()
self.library.terminate_connection(fake.NAMESPACE_VOLUME,
connector)
self.library._get_namespace_attr.assert_called_once_with(
fake.NAMESPACE_NAME,
'metadata')
host = connector['nqn'] if connector else None
self.library._unmap_namespace.assert_called_once_with(
fake.PATH_NAMESPACE, host)
if connector:
mock_is_multiattach_to_host.assert_called_once_with(
fake.NAMESPACE_VOLUME, connector)
else:
mock_log.debug.assert_called_with('Unmapping namespace '
'%(name)s from all hosts.',
{'name': fake.
NAMESPACE_VOLUME['name']})
mock_log.stop()
connector_list = [None, {'nqn': fake.HOST_NQN}]
with ThreadPoolExecutor(max_workers=2) as executor:
executor.map(execute_terminate_connection, connector_list)

View File

@@ -30,6 +30,7 @@ from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as zapi_fakes)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes
import cinder.tests.unit.volume.drivers.netapp.fakes as fake
from cinder import version
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
@@ -40,7 +41,6 @@ from cinder.volume import volume_types
@ddt.ddt
class NetAppDriverUtilsTestCase(test.TestCase):
@mock.patch.object(na_utils, 'LOG', mock.Mock())
def test_validate_instantiation_proxy(self):
kwargs = {'netapp_mode': 'proxy'}
@@ -840,6 +840,56 @@ class NetAppDriverUtilsTestCase(test.TestCase):
self.assertEqual('QOS_MIN_BLOCK_',
na_utils.qos_min_feature_name(False, None))
def test__is_multiattach_to_host_no_attachments(self):
volume = copy.deepcopy(fakes.test_volume)
volume.multiattach = True
volume.volume_attachment = []
result = na_utils.is_multiattach_to_host(volume,
{'host': fakes.HOST_NAME})
self.assertFalse(result)
def test__is_multiattach_to_host_multiattach_disabled(self):
volume = copy.deepcopy(fakes.test_volume)
result = na_utils.is_multiattach_to_host(volume,
{'host': fakes.HOST_NAME})
self.assertFalse(result)
def test__is_multiattach_to_host_single_attachment(self):
volume = copy.deepcopy(fakes.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fakes.ATTACHED,
'attached_host': fakes.HOST_NAME}
]
result = na_utils.is_multiattach_to_host(volume, fakes.FC_CONNECTOR)
self.assertFalse(result)
def test__is_multiattach_to_host_on_same_host(self):
volume = copy.deepcopy(fakes.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fakes.ATTACHED,
'attached_host': fakes.HOST_NAME
},
{'attach_status': fakes.ATTACHED,
'attached_host': fakes.HOST_NAME
}
]
result = na_utils.is_multiattach_to_host(volume,
{'host': fakes.HOST_NAME})
self.assertTrue(result)
def test__is_multiattach_to_host_on_different_host(self):
volume = copy.deepcopy(fakes.test_volume)
volume.multiattach = True
volume.volume_attachment = [
{'attach_status': fakes.ATTACHED, 'attached_host': "fake_host1"},
{'attach_status': fakes.ATTACHED, 'attached_host': "fake_host2"},
]
result = na_utils.is_multiattach_to_host(volume,
{'host': "fake_host1"})
self.assertFalse(result)
class OpenStackInfoTestCase(test.TestCase):

View File

@@ -34,6 +34,7 @@ from oslo_log import versionutils
from oslo_utils import excutils
from oslo_utils import units
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
@@ -1089,6 +1090,7 @@ class NetAppBlockStorageLibrary(
return target_info
@coordination.synchronized('netapp-terminate-fc-connection-{volume.id}')
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
@@ -1102,6 +1104,11 @@ class NetAppBlockStorageLibrary(
an empty dict for the 'data' key
"""
if connector and na_utils.is_multiattach_to_host(
volume,
connector
):
return
name = volume['name']
if connector is None:
initiators = []
@@ -1122,16 +1129,17 @@ class NetAppBlockStorageLibrary(
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if connector and not self._has_luns_mapped_to_initiators(initiators):
if (connector and
not self._has_luns_mapped_to_initiators(initiators)):
# No more exports for this host, so tear down zone.
LOG.info("Need to remove FC Zone, building initiator target map")
LOG.info("Need to remove FC Zone, "
"building initiator target map")
target_wwpns, initiator_target_map, num_paths = (
self._build_initiator_target_map(connector))
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map(self, connector):

View File

@@ -21,6 +21,7 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
@@ -749,6 +750,7 @@ class NetAppNVMeStorageLibrary(
for _path, _subsystem in namespace_unmap_list:
self.client.unmap_namespace(_path, _subsystem)
@coordination.synchronized('netapp-terminate-nvme-connection-{volume.id}')
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
@@ -756,6 +758,11 @@ class NetAppNVMeStorageLibrary(
no longer access it.
"""
if connector and na_utils.is_multiattach_to_host(
volume,
connector
):
return
name = volume['name']
host_nqn = None
if connector is None:

View File

@@ -34,6 +34,7 @@ from oslo_utils import netutils
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils
from cinder import version
from cinder.volume import qos_specs
@@ -561,6 +562,24 @@ def qos_min_feature_name(is_nfs, node_name):
return 'QOS_MIN_BLOCK_' + node_name
def is_multiattach_to_host(volume, connector):
# With multi-attach enabled, a single volume can be attached to multiple
# instances. If multiple instances are running on the same nova host, the
# volume should remain attached to the nova host until it is detached
# from the last instance on that host.
if not volume.multiattach or not volume.volume_attachment:
return False
attachment = [
attach_info
for attach_info in volume.volume_attachment
if attach_info['attach_status'] == fields.VolumeAttachStatus.ATTACHED
and attach_info['attached_host'] == connector.get('host')
]
LOG.debug('is_multiattach_to_host: attachment %s.', attachment)
return len(attachment) > 1
class hashabledict(dict):
"""A hashable dictionary that is comparable (i.e. in unit tests, etc.)"""
def __hash__(self):

View File

@@ -0,0 +1,8 @@
---
fixes:
- |
Volumes with multi-attach type can be connected to multiple instances.
Additional logic has been implemented for FCP/NVMe protocols to handle
the removal of cinder volumes from multiple instances. For more details,
please check
`Launchpad bug #2110274 <https://bugs.launchpad.net/cinder/+bug/2110274>`_