lvm: Only use initiators when comparing connector dicts

Ib5aa1b7578f7d3200185566ff5f8634dd519d020 previously fixed the premature
termination of multi attached iSCSI/LVM volumes by comparing the
provided connector dict against that stored with any remaining active
attachments. Only allowing the termination to proceed if a single active
attachment remained using the provided connector.

However this check fails if two instances on the same host are connected
to the same multi attach volume using different mountpoints as these are
now stored within the connector. This behaviour was introduced during
Queens by If3afe8d8bd6b8c327ccc7d1140053bccaf7e1ad7 and
I11ba269c3f7a2e7707b2b7e27d26eb7a2c948a82 to workaround differences
between the cinderv2 and cinderv3 APIs.

This change corrects this by only comparing the initiator key of the
connectors ensuring terminate connection is only called when a single
attachment remains on a given host using a specific initiator.

Closes-Bug: #1825957
Change-Id: Icabc4a67d3f5462fe24e4027e84e56a001e1b2b8
(cherry picked from commit c1388ce1c0)
This commit is contained in:
Lee Yarwood 2019-04-18 20:17:54 +01:00 committed by Eric Harney
parent e85c70cc3b
commit 4910ca4d73
2 changed files with 54 additions and 1 deletions

View File

@ -1043,3 +1043,55 @@ class LVMISCSITestCase(test_driver.BaseDriverTestCase):
host2_connector))
mock_term_conn.assert_has_calls([mock.call(vol, host1_connector),
mock.call(vol, host2_connector)])
def test_multiattach_terminate_connection_with_differing_mountpoints(self):
# Ensure that target_driver.terminate_connection is only called when a
# single active volume attachment remains per host for each volume
# regardless of the volumes being attached under differing mountpoints.
# As seen reported in bug #1825957.
connector_mountpoint_1 = {'ip': '10.0.0.2',
'host': 'fakehost1',
'mountpoint': '/dev/vdb',
'initiator': 'iqn.2012-07.org.fake:01'}
connector_mountpoint_2 = {'ip': '10.0.0.2',
'host': 'fakehost1',
'mountpoint': '/dev/vdc',
'initiator': 'iqn.2012-07.org.fake:01'}
attachment1 = fake_volume.volume_attachment_ovo(self.context)
attachment1.connector = connector_mountpoint_1
attachment2 = fake_volume.volume_attachment_ovo(self.context)
attachment2.connector = connector_mountpoint_2
# Create a multiattach volume object with two active attachments on
# the same host using different mountpoints within the connectors
vol = fake_volume.fake_volume_obj(self.context)
vol.multiattach = True
vol.volume_attachment.objects.append(attachment1)
vol.volume_attachment.objects.append(attachment2)
self.configuration = conf.Configuration(None)
vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None,
'default')
lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration,
db=db, vg_obj=vg_obj)
with mock.patch.object(lvm_driver.target_driver,
'terminate_connection') as mock_term_conn:
# Verify that terminate_connection is not called when there are
# multiple active attachments against the same host even when their
# mountpoints do not match.
self.assertTrue(lvm_driver.terminate_connection(
vol, connector_mountpoint_1))
mock_term_conn.assert_not_called()
# Verify that terminate_connection is called against either host
# when only one active attachment per host is present.
vol.volume_attachment.objects.remove(attachment1)
self.assertFalse(lvm_driver.terminate_connection(
vol, connector_mountpoint_2))
mock_term_conn.assert_called_with(vol, connector_mountpoint_2)

View File

@ -854,7 +854,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
# attachments on the same host are still accessing the volume.
attachments = volume.volume_attachment
if volume.multiattach:
if sum(1 for a in attachments if a.connector == connector) > 1:
if sum(1 for a in attachments if a.connector and
a.connector['initiator'] == connector['initiator']) > 1:
return True
self.target_driver.terminate_connection(volume, connector, **kwargs)