Merge "Use volume shared_targets to lock during attach/detach"

This commit is contained in:
Zuul 2018-01-09 21:33:41 +00:00 committed by Gerrit Code Review
commit 90a92d33ed
4 changed files with 111 additions and 22 deletions

View File

@ -1325,7 +1325,7 @@ class CinderFixture(fixtures.Fixture):
def setUp(self):
super(CinderFixture, self).setUp()
def fake_get(self_api, context, volume_id):
def fake_get(self_api, context, volume_id, microversion=None):
# Check for the special swap volumes.
if volume_id in (CinderFixture.SWAP_OLD_VOL,
CinderFixture.SWAP_ERR_OLD_VOL):
@ -1499,7 +1499,7 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
def setUp(self):
super(CinderFixtureNewAttachFlow, self).setUp()
def fake_get(self_api, context, volume_id):
def fake_get(self_api, context, volume_id, microversion=None):
# Check for the special swap volumes.
if volume_id in (CinderFixture.SWAP_OLD_VOL,
CinderFixture.SWAP_ERR_OLD_VOL):

View File

@ -4834,7 +4834,7 @@ class ComputeTestCase(BaseTestCase,
bdm.create()
# stub out volume attach
def fake_volume_get(self, context, volume_id):
def fake_volume_get(self, context, volume_id, microversion=None):
return volume
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
@ -10575,7 +10575,8 @@ class ComputeAPITestCase(BaseTestCase):
@mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance')
def test_detach_volume_libvirt_is_down(self, mock_get):
@mock.patch.object(cinder.API, 'get', return_value={'id': uuids.volume_id})
def test_detach_volume_libvirt_is_down(self, mock_get_vol, mock_get):
# Ensure rollback during detach if libvirt goes down
called = {}
instance = self._create_fake_instance_obj()
@ -10624,7 +10625,7 @@ class ComputeAPITestCase(BaseTestCase):
# Stub out fake_volume_get so cinder api does not raise exception
# and manager gets to call bdm.destroy()
def fake_volume_get(self, context, volume_id):
def fake_volume_get(self, context, volume_id, microversion=None):
return {'id': volume_id}
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
@ -12500,7 +12501,7 @@ class EvacuateHostTestCase(BaseTestCase):
db.block_device_mapping_create(self.context, values)
def fake_volume_get(self, context, volume):
def fake_volume_get(self, context, volume, microversion=None):
return {'id': 'fake_volume_id'}
self.stub_out("nova.volume.cinder.API.get", fake_volume_get)

View File

@ -395,7 +395,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def test_volume_delete_attachment(self):
def test_volume_delete_attachment(self, include_shared_targets=False):
attachment_id = uuids.attachment
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
driver_bdm['attachment_id'] = attachment_id
@ -406,25 +406,40 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
volume = {'id': driver_bdm.volume_id,
'attach_status': 'attached',
'status': 'in-use'}
if include_shared_targets:
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
mock.patch.object(self.virt_driver, 'get_volume_connector',
return_value=connector),
mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args)),
mock.patch.object(self.volume_api, 'attachment_delete'),
) as (_, vapi_attach_del):
) as (mock_get_volume, mock_get_connector, mock_sync, vapi_attach_del):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
if include_shared_targets:
mock_sync.assert_called_once_with((uuids.service_uuid))
vapi_attach_del.assert_called_once_with(elevated_context,
attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, fail_check_av_zone=False,
driver_attach=False, fail_driver_attach=False,
volume_attach=True, fail_volume_attach=False,
access_mode='rw', availability_zone=None):
access_mode='rw', availability_zone=None,
include_shared_targets=False):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
@ -440,8 +455,21 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if include_shared_targets:
fake_volume['shared_targets'] = True
fake_volume['service_uuid'] = uuids.service_uuid
self.volume_api.get(
self.context, fake_volume['id'],
microversion='3.48').AndReturn(fake_volume)
else:
# First call to get() fails because the API isn't new enough.
self.volume_api.get(
self.context, fake_volume['id'], microversion='3.48').AndRaise(
exception.CinderAPIVersionNotAvailable(version='3.48'))
# So we fallback to the old call.
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if not fail_check_av_zone:
self.volume_api.check_availability_zone(self.context,
fake_volume,
@ -533,14 +561,15 @@ class TestDriverBlockDevice(test.NoDBTestCase):
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
def test_volume_attach(self, include_shared_targets=False):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
test_bdm, self.volume_bdm, volume,
include_shared_targets=include_shared_targets)
self.mox.ReplayAll()
@ -549,6 +578,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_with_shared_targets(self):
self.test_volume_attach(include_shared_targets=True)
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',

View File

@ -24,6 +24,7 @@ from oslo_utils import excutils
from nova import block_device
import nova.conf
from nova import exception
from nova import utils
CONF = nova.conf.CONF
@ -311,9 +312,23 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
instance=instance)
volume_api.roll_detaching(context, volume_id)
def detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
@staticmethod
def _get_volume(context, volume_api, volume_id):
# First try to get the volume at microversion 3.48 so we can get the
# shared_targets parameter exposed in that version. If that API version
# is not available, we just fallback.
try:
return volume_api.get(context, volume_id, microversion='3.48')
except exception.CinderAPIVersionNotAvailable:
return volume_api.get(context, volume_id)
def _do_detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
"""Private method that actually does the detach.
This is separate from the detach() method so the caller can optionally
lock this call.
"""
volume_id = self.volume_id
# Only attempt to detach and disconnect from the volume if the instance
@ -380,6 +395,25 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
else:
volume_api.attachment_delete(context, self['attachment_id'])
def detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
volume = self._get_volume(context, volume_api, self.volume_id)
# Check to see if we need to lock based on the shared_targets value.
# Default to False if the volume does not expose that value to maintain
# legacy behavior.
if volume.get('shared_targets', False):
# Lock the detach call using the provided service_uuid.
@utils.synchronized(volume['service_uuid'])
def _do_locked_detach(*args, **_kwargs):
self._do_detach(*args, **_kwargs)
_do_locked_detach(context, instance, volume_api, virt_driver,
attachment_id, destroy_bdm)
else:
# We don't need to (or don't know if we need to) lock.
self._do_detach(context, instance, volume_api, virt_driver,
attachment_id, destroy_bdm)
def _legacy_volume_attach(self, context, volume, connector, instance,
volume_api, virt_driver,
do_driver_attach=False):
@ -522,15 +556,15 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
# Delete the attachment to mark the volume as "available".
volume_api.attachment_delete(context, self['attachment_id'])
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_driver_attach=False, **kwargs):
volume = volume_api.get(context, self.volume_id)
volume_api.check_availability_zone(context, volume,
instance=instance)
def _do_attach(self, context, instance, volume, volume_api, virt_driver,
do_driver_attach):
"""Private method that actually does the attach.
This is separate from the attach() method so the caller can optionally
lock this call.
"""
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
if not self['attachment_id']:
self._legacy_volume_attach(context, volume, connector, instance,
volume_api, virt_driver,
@ -541,6 +575,28 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
self['attachment_id'],
do_driver_attach)
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_driver_attach=False, **kwargs):
volume = self._get_volume(context, volume_api, self.volume_id)
volume_api.check_availability_zone(context, volume,
instance=instance)
# Check to see if we need to lock based on the shared_targets value.
# Default to False if the volume does not expose that value to maintain
# legacy behavior.
if volume.get('shared_targets', False):
# Lock the attach call using the provided service_uuid.
@utils.synchronized(volume['service_uuid'])
def _do_locked_attach(*args, **_kwargs):
self._do_attach(*args, **_kwargs)
_do_locked_attach(context, instance, volume, volume_api,
virt_driver, do_driver_attach)
else:
# We don't need to (or don't know if we need to) lock.
self._do_attach(context, instance, volume, volume_api,
virt_driver, do_driver_attach)
@update_db
def refresh_connection_info(self, context, instance,
volume_api, virt_driver):