Use os-brick locking for volume attach and detach

Cinder introduced "shared_targets" and "service_uuid" fields in volumes
to allow volume consumers to protect themselves from unintended leftover
devices when handling iSCSI connections with shared targets.

Nova avoids races caused by automatic rescans on iSCSI volumes when
detaching a volume while Cinder is mapping another volume to the same
host by locking and only allowing one attach or one detach operation for
each server to happen at a given time if "shared_targets" is set to
True.

When using an up to date Open iSCSI initiator we don't need to use
locks, as it is possible to disable automatic LUN scans (which are the
real cause of the leftover devices), and OS-Brick already supports this
feature.

Currently Nova is blindly locking whenever "shared_targets" is set to
True, even when the iSCSI initiator and OS-Brick are already presenting
such races, which introduces unnecessary locking and serialization on
the connection of volumes.

This patch uses the new context manager introduced in OS-Brick to allow
Nova to abstract its code from all this storage internal details and to
only lock when it's really necessary.

Depends-On: I4970363301d5d1f4e7d0f07e09b34d15ee6884c3
Closes-Bug: #1800515
Change-Id: Ie9106d5832d6a728ea97a8dbb5ddb5dcc17a2ec4
This commit is contained in:
Gorka Eguileor 2018-10-30 11:51:03 +01:00 committed by Lee Yarwood
parent db5caf2ff8
commit 1b597f759f
4 changed files with 15 additions and 34 deletions

View File

@ -64,7 +64,7 @@ netifaces==0.10.4
networkx==1.11
numpy==1.14.2
openstacksdk==0.35.0
os-brick==2.6.1
os-brick==2.6.2
os-client-config==1.29.0
os-resource-classes==0.4.0
os-service-types==1.7.0

View File

@ -499,17 +499,16 @@ class TestDriverBlockDevice(test.NoDBTestCase):
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
mock.patch.object(self.virt_driver, 'get_volume_connector',
return_value=connector),
mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args)),
mock.patch('os_brick.initiator.utils.guard_connection'),
mock.patch.object(self.volume_api, 'attachment_delete'),
) as (mock_get_volume, mock_get_connector, mock_sync, vapi_attach_del):
) as (mock_get_volume, mock_get_connector, mock_guard,
vapi_attach_del):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
if include_shared_targets:
mock_sync.assert_called_once_with((uuids.service_uuid))
mock_guard.assert_called_once_with(volume)
vapi_attach_del.assert_called_once_with(elevated_context,
attachment_id)

View File

@ -16,6 +16,7 @@ import functools
import itertools
from os_brick import encryptors
from os_brick.initiator import utils as brick_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
@ -24,7 +25,6 @@ from oslo_utils import excutils
from nova import block_device
import nova.conf
from nova import exception
from nova import utils
CONF = nova.conf.CONF
@ -430,19 +430,10 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
def detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
volume = self._get_volume(context, volume_api, self.volume_id)
# Check to see if we need to lock based on the shared_targets value.
# Default to False if the volume does not expose that value to maintain
# legacy behavior.
if volume.get('shared_targets', False):
# Lock the detach call using the provided service_uuid.
@utils.synchronized(volume['service_uuid'])
def _do_locked_detach(*args, **_kwargs):
self._do_detach(*args, **_kwargs)
_do_locked_detach(context, instance, volume_api, virt_driver,
attachment_id, destroy_bdm)
else:
# We don't need to (or don't know if we need to) lock.
# Let OS-Brick handle high level locking that covers the local os-brick
# detach and the Cinder call to call unmap the volume. Not all volume
# backends or hosts require locking.
with brick_utils.guard_connection(volume):
self._do_detach(context, instance, volume_api, virt_driver,
attachment_id, destroy_bdm)
@ -635,19 +626,10 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
volume = self._get_volume(context, volume_api, self.volume_id)
volume_api.check_availability_zone(context, volume,
instance=instance)
# Check to see if we need to lock based on the shared_targets value.
# Default to False if the volume does not expose that value to maintain
# legacy behavior.
if volume.get('shared_targets', False):
# Lock the attach call using the provided service_uuid.
@utils.synchronized(volume['service_uuid'])
def _do_locked_attach(*args, **_kwargs):
self._do_attach(*args, **_kwargs)
_do_locked_attach(context, instance, volume, volume_api,
virt_driver, do_driver_attach)
else:
# We don't need to (or don't know if we need to) lock.
# Let OS-Brick handle high level locking that covers the call to
# Cinder that exports & maps the volume, and for the local os-brick
# attach. Not all volume backends or hosts require locking.
with brick_utils.guard_connection(volume):
self._do_attach(context, instance, volume, volume_api,
virt_driver, do_driver_attach)

View File

@ -54,7 +54,7 @@ rfc3986>=1.1.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
psutil>=3.2.2 # BSD
oslo.versionedobjects>=1.35.0 # Apache-2.0
os-brick>=2.6.1 # Apache-2.0
os-brick>=2.6.2 # Apache-2.0
os-resource-classes>=0.4.0 # Apache-2.0
os-traits>=0.16.0 # Apache-2.0
os-vif>=1.14.0 # Apache-2.0