Block swap volume on volumes with >1 rw attachment
If we're swapping from a multiattach volume that has more than one read/write attachment, another server on the secondary attachment could be writing to the volume which is not getting copied into the volume to which we're swapping, so we could have data loss during the swap. This change does volume read/write attachment counting for the volume we're swapping from and if there is more than one read/write attachment on the volume, the swap volume operation fails with a 400 BadRequest error. Depends-On: https://review.openstack.org/573025/ Closes-Bug: #1775418 Change-Id: Icd7fcb87a09c35a13e4e14235feb30a289d22778
This commit is contained in:
parent
2e85453879
commit
5a1d159d14
@ -177,6 +177,9 @@ Policy defaults enable only users with the administrative role to perform
|
||||
this operation. Cloud providers can change these permissions through the
|
||||
``policy.json`` file.
|
||||
|
||||
Updating, or what is commonly referred to as "swapping", volume attachments
|
||||
with volumes that have more than one read/write attachment, is not supported.
|
||||
|
||||
Normal response codes: 202
|
||||
|
||||
Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409)
|
||||
|
@ -403,7 +403,8 @@ class VolumeAttachmentController(wsgi.Controller):
|
||||
new_volume)
|
||||
except exception.VolumeBDMNotFound as e:
|
||||
raise exc.HTTPNotFound(explanation=e.format_message())
|
||||
except exception.InvalidVolume as e:
|
||||
except (exception.InvalidVolume,
|
||||
exception.MultiattachSwapVolumeNotSupported) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except exception.InstanceIsLocked as e:
|
||||
raise exc.HTTPConflict(explanation=e.format_message())
|
||||
|
@ -4378,6 +4378,52 @@ class API(base.Base):
|
||||
else:
|
||||
self._detach_volume(context, instance, volume)
|
||||
|
||||
def _count_attachments_for_swap(self, ctxt, volume):
|
||||
"""Counts the number of attachments for a swap-related volume.
|
||||
|
||||
Attempts to only count read/write attachments if the volume attachment
|
||||
records exist, otherwise simply just counts the number of attachments
|
||||
regardless of attach mode.
|
||||
|
||||
:param ctxt: nova.context.RequestContext - user request context
|
||||
:param volume: nova-translated volume dict from nova.volume.cinder.
|
||||
:returns: count of attachments for the volume
|
||||
"""
|
||||
# This is a dict, keyed by server ID, to a dict of attachment_id and
|
||||
# mountpoint.
|
||||
attachments = volume.get('attachments', {})
|
||||
# Multiattach volumes can have more than one attachment, so if there
|
||||
# is more than one attachment, attempt to count the read/write
|
||||
# attachments.
|
||||
if len(attachments) > 1:
|
||||
count = 0
|
||||
for attachment in attachments.values():
|
||||
attachment_id = attachment['attachment_id']
|
||||
# Get the attachment record for this attachment so we can
|
||||
# get the attach_mode.
|
||||
# TODO(mriedem): This could be optimized if we had
|
||||
# GET /attachments/detail?volume_id=volume['id'] in Cinder.
|
||||
try:
|
||||
attachment_record = self.volume_api.attachment_get(
|
||||
ctxt, attachment_id)
|
||||
# Note that the attachment record from Cinder has
|
||||
# attach_mode in the top-level of the resource but the
|
||||
# nova.volume.cinder code translates it and puts the
|
||||
# attach_mode in the connection_info for some legacy
|
||||
# reason...
|
||||
if attachment_record.get(
|
||||
'connection_info', {}).get(
|
||||
# attachments are read/write by default
|
||||
'attach_mode', 'rw') == 'rw':
|
||||
count += 1
|
||||
except exception.VolumeAttachmentNotFound:
|
||||
# attachments are read/write by default so count it
|
||||
count += 1
|
||||
else:
|
||||
count = len(attachments)
|
||||
|
||||
return count
|
||||
|
||||
@check_instance_lock
|
||||
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
|
||||
vm_states.RESIZED])
|
||||
@ -4401,6 +4447,20 @@ class API(base.Base):
|
||||
except exception.InvalidInput as exc:
|
||||
raise exception.InvalidVolume(reason=exc.format_message())
|
||||
|
||||
# Disallow swapping from multiattach volumes that have more than one
|
||||
# read/write attachment. We know the old_volume has at least one
|
||||
# attachment since it's attached to this server. The new_volume
|
||||
# can't have any attachments because of the attach_status check above.
|
||||
# We do this count after calling "begin_detaching" to lock against
|
||||
# concurrent attachments being made while we're counting.
|
||||
try:
|
||||
if self._count_attachments_for_swap(context, old_volume) > 1:
|
||||
raise exception.MultiattachSwapVolumeNotSupported()
|
||||
except Exception: # This is generic to handle failures while counting
|
||||
# We need to reset the detaching status before raising.
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.volume_api.roll_detaching(context, old_volume['id'])
|
||||
|
||||
# Get the BDM for the attached (old) volume so we can tell if it was
|
||||
# attached with the new-style Cinder 3.44 API.
|
||||
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
|
||||
|
@ -308,6 +308,11 @@ class MultiattachToShelvedNotSupported(Invalid):
|
||||
"shelved-offloaded instances.")
|
||||
|
||||
|
||||
class MultiattachSwapVolumeNotSupported(Invalid):
|
||||
msg_fmt = _('Swapping multi-attach volumes with more than one read/write '
|
||||
'attachment is not supported.')
|
||||
|
||||
|
||||
class VolumeNotCreated(NovaException):
|
||||
msg_fmt = _("Volume %(volume_id)s did not finish being created"
|
||||
" even after we waited %(seconds)s seconds or %(attempts)s"
|
||||
|
@ -972,6 +972,81 @@ class VolumeAttachTestsV260(test.NoDBTestCase):
|
||||
'shelved-offloaded instances.', six.text_type(ex))
|
||||
|
||||
|
||||
class SwapVolumeMultiattachTestCase(test.NoDBTestCase):
|
||||
|
||||
@mock.patch('nova.api.openstack.common.get_instance')
|
||||
@mock.patch('nova.volume.cinder.API.begin_detaching')
|
||||
@mock.patch('nova.volume.cinder.API.roll_detaching')
|
||||
def test_swap_multiattach_multiple_readonly_attachments_fails(
|
||||
self, mock_roll_detaching, mock_begin_detaching,
|
||||
mock_get_instance):
|
||||
"""Tests that trying to swap from a multiattach volume with
|
||||
multiple read/write attachments will return an error.
|
||||
"""
|
||||
|
||||
def fake_volume_get(_context, volume_id):
|
||||
if volume_id == uuids.old_vol_id:
|
||||
return {
|
||||
'id': volume_id,
|
||||
'size': 1,
|
||||
'multiattach': True,
|
||||
'attachments': {
|
||||
uuids.server1: {
|
||||
'attachment_id': uuids.attachment_id1,
|
||||
'mountpoint': '/dev/vdb'
|
||||
},
|
||||
uuids.server2: {
|
||||
'attachment_id': uuids.attachment_id2,
|
||||
'mountpoint': '/dev/vdb'
|
||||
}
|
||||
}
|
||||
}
|
||||
if volume_id == uuids.new_vol_id:
|
||||
return {
|
||||
'id': volume_id,
|
||||
'size': 1,
|
||||
'attach_status': 'detached'
|
||||
}
|
||||
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||
|
||||
def fake_attachment_get(_context, attachment_id):
|
||||
return {'connection_info': {'attach_mode': 'rw'}}
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
ctxt, uuid=uuids.server1, vm_state=vm_states.ACTIVE,
|
||||
task_state=None, launched_at=datetime.datetime(2018, 6, 6))
|
||||
mock_get_instance.return_value = instance
|
||||
controller = volumes_v21.VolumeAttachmentController()
|
||||
with test.nested(
|
||||
mock.patch.object(controller.volume_api, 'get',
|
||||
side_effect=fake_volume_get),
|
||||
mock.patch.object(controller.compute_api.volume_api,
|
||||
'attachment_get',
|
||||
side_effect=fake_attachment_get)) as (
|
||||
mock_volume_get, mock_attachment_get
|
||||
):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/servers/%s/os-volume_attachments/%s' %
|
||||
(uuids.server1, uuids.old_vol_id))
|
||||
req.headers['content-type'] = 'application/json'
|
||||
req.environ['nova.context'] = ctxt
|
||||
body = {
|
||||
'volumeAttachment': {
|
||||
'volumeId': uuids.new_vol_id
|
||||
}
|
||||
}
|
||||
ex = self.assertRaises(
|
||||
webob.exc.HTTPBadRequest, controller.update, req,
|
||||
uuids.server1, uuids.old_vol_id, body=body)
|
||||
self.assertIn('Swapping multi-attach volumes with more than one ',
|
||||
six.text_type(ex))
|
||||
mock_attachment_get.assert_has_calls([
|
||||
mock.call(ctxt, uuids.attachment_id1),
|
||||
mock.call(ctxt, uuids.attachment_id2)], any_order=True)
|
||||
mock_roll_detaching.assert_called_once_with(ctxt, uuids.old_vol_id)
|
||||
|
||||
|
||||
class CommonBadRequestTestCase(object):
|
||||
|
||||
resource = None
|
||||
|
@ -2858,6 +2858,58 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
|
||||
_do_test()
|
||||
|
||||
def test_count_attachments_for_swap_not_found_and_readonly(self):
|
||||
"""Tests that attachment records that aren't found are considered
|
||||
read/write by default. Also tests that read-only attachments are
|
||||
not counted.
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
volume = {
|
||||
'attachments': {
|
||||
uuids.server1: {
|
||||
'attachment_id': uuids.attachment1
|
||||
},
|
||||
uuids.server2: {
|
||||
'attachment_id': uuids.attachment2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def fake_attachment_get(_context, attachment_id):
|
||||
if attachment_id == uuids.attachment1:
|
||||
raise exception.VolumeAttachmentNotFound(
|
||||
attachment_id=attachment_id)
|
||||
return {'connection_info': {'attach_mode': 'ro'}}
|
||||
|
||||
with mock.patch.object(self.compute_api.volume_api, 'attachment_get',
|
||||
side_effect=fake_attachment_get) as mock_get:
|
||||
self.assertEqual(
|
||||
1, self.compute_api._count_attachments_for_swap(ctxt, volume))
|
||||
mock_get.assert_has_calls([
|
||||
mock.call(ctxt, uuids.attachment1),
|
||||
mock.call(ctxt, uuids.attachment2)], any_order=True)
|
||||
|
||||
@mock.patch('nova.volume.cinder.API.attachment_get',
|
||||
new_callable=mock.NonCallableMock) # asserts not called
|
||||
def test_count_attachments_for_swap_no_query(self, mock_attachment_get):
|
||||
"""Tests that if the volume has <2 attachments, we don't query
|
||||
the attachments for their attach_mode value.
|
||||
"""
|
||||
volume = {}
|
||||
self.assertEqual(
|
||||
0, self.compute_api._count_attachments_for_swap(
|
||||
mock.sentinel.context, volume))
|
||||
volume = {
|
||||
'attachments': {
|
||||
uuids.server: {
|
||||
'attachment_id': uuids.attach1
|
||||
}
|
||||
}
|
||||
}
|
||||
self.assertEqual(
|
||||
1, self.compute_api._count_attachments_for_swap(
|
||||
mock.sentinel.context, volume))
|
||||
|
||||
@mock.patch.object(compute_utils, 'is_volume_backed_instance')
|
||||
@mock.patch.object(objects.Instance, 'save')
|
||||
@mock.patch.object(image_api.API, 'create')
|
||||
|
10
releasenotes/notes/bug-1775418-754fc50261f5d7c3.yaml
Normal file
10
releasenotes/notes/bug-1775418-754fc50261f5d7c3.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
The `os-volume_attachments`_ update API, commonly referred to as the swap
|
||||
volume API will now return a ``400`` (BadRequest) error when attempting to
|
||||
swap from a multi attached volume with more than one active read/write
|
||||
attachment resolving `bug #1775418`_.
|
||||
|
||||
.. _os-volume_attachments: https://developer.openstack.org/api-ref/compute/?expanded=update-a-volume-attachment-detail
|
||||
.. _bug #1775418: https://launchpad.net/bugs/1775418
|
Loading…
Reference in New Issue
Block a user