Check for outstanding attachments during reserve
There are a number of places on the Nova side where we need to do things with volume connections like remove an attachment and immediately reserve a new one. This introduces a potential race where bad things can happen. To deal with that, we actually want to to create a new attachment to do a reserve, BEFORE we delete the current attachment. The problem is that the new Attachment code currently will toggle the volume status and attach status such that you will then not be able to remove the attachment in the future, and it also makes things such that the true state of the volume is not reflected properly. This patch adds a check on reserve for additional attachments and if there are attachments for a volume that have the volume as in-use we don't clear that status during the new reserve call. Change-Id: I8062897bd3d4fe20de9a5660eac6fec856cc3c1e Closes-Bug: #1717564
This commit is contained in:
parent
a0f8690b87
commit
64612cdbfb
@ -178,3 +178,73 @@ class AttachmentManagerTestCase(test.TestCase):
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
self.assertEqual(2, len(vref.volume_attachment))
|
||||
|
||||
@mock.patch('cinder.volume.api.check_policy')
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
|
||||
def test_attachment_create_reserve_delete(
|
||||
self,
|
||||
mock_rpc_attachment_update,
|
||||
mock_policy):
|
||||
volume_params = {'status': 'available'}
|
||||
connector = {
|
||||
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
|
||||
"ip": "192.168.1.20",
|
||||
"platform": "x86_64",
|
||||
"host": "tempest-1",
|
||||
"os_type": "linux2",
|
||||
"multipath": False}
|
||||
|
||||
connection_info = {'fake_key': 'fake_value',
|
||||
'fake_key2': ['fake_value1', 'fake_value2']}
|
||||
mock_rpc_attachment_update.return_value = connection_info
|
||||
|
||||
vref = tests_utils.create_volume(self.context, **volume_params)
|
||||
aref = self.volume_api.attachment_create(self.context,
|
||||
vref,
|
||||
fake.UUID2,
|
||||
connector=connector)
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
# Need to set the status here because our mock isn't doing it for us
|
||||
vref.status = 'in-use'
|
||||
vref.save()
|
||||
|
||||
# Now a second attachment acting as a reserve
|
||||
self.volume_api.attachment_create(self.context,
|
||||
vref,
|
||||
fake.UUID2)
|
||||
|
||||
# We should now be able to delete the original attachment that gave us
|
||||
# 'in-use' status, and in turn we should revert to the outstanding
|
||||
# attachments reserve
|
||||
self.volume_api.attachment_delete(self.context,
|
||||
aref)
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
self.assertEqual('reserved', vref.status)
|
||||
|
||||
@mock.patch('cinder.volume.api.check_policy')
|
||||
def test_reserve_reserve_delete(self, mock_policy):
|
||||
"""Test that we keep reserved status across multiple reserves."""
|
||||
volume_params = {'status': 'available'}
|
||||
|
||||
vref = tests_utils.create_volume(self.context, **volume_params)
|
||||
aref = self.volume_api.attachment_create(self.context,
|
||||
vref,
|
||||
fake.UUID2)
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
self.assertEqual('reserved', vref.status)
|
||||
|
||||
self.volume_api.attachment_create(self.context,
|
||||
vref,
|
||||
fake.UUID2)
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
self.assertEqual('reserved', vref.status)
|
||||
self.volume_api.attachment_delete(self.context,
|
||||
aref)
|
||||
vref = objects.Volume.get_by_id(self.context,
|
||||
vref.id)
|
||||
self.assertEqual('reserved', vref.status)
|
||||
self.assertEqual(1, len(vref.volume_attachment))
|
||||
|
@ -2043,19 +2043,37 @@ class API(base.Base):
|
||||
self.volume_rpcapi.attachment_delete(ctxt,
|
||||
attachment.id,
|
||||
volume)
|
||||
status_updates = {'status': 'available',
|
||||
'attach_status': 'detached'}
|
||||
remaining_attachments = AO_LIST.get_all_by_volume_id(ctxt, volume.id)
|
||||
|
||||
# TODO(jdg): Make this check attachments_by_volume_id when we
|
||||
# implement multi-attach for real
|
||||
if len(remaining_attachments) < 1:
|
||||
volume.status = 'available'
|
||||
volume.attach_status = 'detached'
|
||||
volume.save()
|
||||
# NOTE(jdg) Try and figure out the > state we have left and set that
|
||||
# attached > attaching > > detaching > reserved
|
||||
pending_status_list = []
|
||||
for attachment in remaining_attachments:
|
||||
pending_status_list.append(attachment.attach_status)
|
||||
if 'attached' in pending_status_list:
|
||||
status_updates['status'] = 'in-use'
|
||||
status_updates['attach_status'] = 'attached'
|
||||
elif 'attaching' in pending_status_list:
|
||||
status_updates['status'] = 'attaching'
|
||||
status_updates['attach_status'] = 'attaching'
|
||||
elif 'detaching' in pending_status_list:
|
||||
status_updates['status'] = 'detaching'
|
||||
status_updates['attach_status'] = 'detaching'
|
||||
elif 'reserved' in pending_status_list:
|
||||
status_updates['status'] = 'reserved'
|
||||
status_updates['attach_status'] = 'reserved'
|
||||
|
||||
volume.status = status_updates['status']
|
||||
volume.attach_status = status_updates['attach_status']
|
||||
volume.save()
|
||||
return remaining_attachments
|
||||
|
||||
|
||||
class HostAPI(base.Base):
|
||||
"""Sub-set of the Volume Manager API for managing host operations."""
|
||||
|
||||
def set_host_enabled(self, context, host, enabled):
|
||||
"""Sets the specified host's ability to accept new volumes."""
|
||||
raise NotImplementedError()
|
||||
|
Loading…
Reference in New Issue
Block a user