diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index d70b12d69..7053a4615 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1608,44 +1608,50 @@ def volume_detached(context, volume_id, attachment_id): """ session = get_session() with session.begin(): - attachment = None try: attachment = volume_attachment_get(context, attachment_id, session=session) except exception.VolumeAttachmentNotFound: - pass + attachment_updates = None + attachment = None - # If this is already detached, attachment will be None if attachment: now = timeutils.utcnow() - attachment['attach_status'] = fields.VolumeAttachStatus.DETACHED - attachment['detach_time'] = now - attachment['deleted'] = True - attachment['deleted_at'] = now + attachment_updates = { + 'attach_status': fields.VolumeAttachStatus.DETACHED, + 'detach_time': now, + 'deleted': True, + 'deleted_at': now, + 'updated_at': + literal_column('updated_at'), + } + attachment.update(attachment_updates) attachment.save(session=session) + del attachment_updates['updated_at'] - attachment_list = volume_attachment_get_all_by_volume_id( - context, volume_id, session=session) - remain_attachment = False - if attachment_list and len(attachment_list) > 0: - remain_attachment = True - - volume_ref = _volume_get(context, volume_id, session=session) - if not remain_attachment: + volume_ref = _volume_get(context, volume_id, + session=session) + volume_updates = {'updated_at': literal_column('updated_at')} + if not volume_ref.volume_attachment: # Hide status update from user if we're performing volume migration # or uploading it to image - if ((not volume_ref['migration_status'] and - not (volume_ref['status'] == 'uploading')) or - volume_ref['migration_status'] in ('success', 'error')): - volume_ref['status'] = 'available' + if ((not volume_ref.migration_status and + not (volume_ref.status == 'uploading')) or + volume_ref.migration_status in ('success', 'error')): + volume_updates['status'] = 'available' - volume_ref['attach_status'] = fields.VolumeAttachStatus.DETACHED - volume_ref.save(session=session) + volume_updates['attach_status'] = ( + fields.VolumeAttachStatus.DETACHED) else: # Volume is still attached - volume_ref['status'] = 'in-use' - volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED - volume_ref.save(session=session) + volume_updates['status'] = 'in-use' + volume_updates['attach_status'] = ( + fields.VolumeAttachStatus.ATTACHED) + + volume_ref.update(volume_updates) + volume_ref.save(session=session) + del volume_updates['updated_at'] + return (volume_updates, attachment_updates) @require_context diff --git a/cinder/objects/volume.py b/cinder/objects/volume.py index d8d681706..c49956245 100644 --- a/cinder/objects/volume.py +++ b/cinder/objects/volume.py @@ -523,6 +523,25 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject, self.save() return attachment + def finish_detach(self, attachment_id): + with self.obj_as_admin(): + volume_updates, attachment_updates = ( + db.volume_detached(self._context, self.id, attachment_id)) + db.volume_admin_metadata_delete(self._context, self.id, + 'attached_mode') + self.admin_metadata.pop('attached_mode', None) + # Remove attachment in volume only when this field is loaded. + if attachment_updates and self.obj_attr_is_set('volume_attachment'): + for i, attachment in enumerate(self.volume_attachment): + if attachment.id == attachment_id: + del self.volume_attachment.objects[i] + break + + self.update(volume_updates) + self.obj_reset_changes( + list(volume_updates.keys()) + + ['volume_attachment', 'admin_metadata']) + @base.CinderObjectRegistry.register class VolumeList(base.ObjectListBase, base.CinderObject): diff --git a/cinder/tests/unit/objects/test_volume.py b/cinder/tests/unit/objects/test_volume.py index 194feb7fd..1190a3a4d 100644 --- a/cinder/tests/unit/objects/test_volume.py +++ b/cinder/tests/unit/objects/test_volume.py @@ -446,6 +446,73 @@ class TestVolume(test_objects.BaseObjectsTestCase): True) self.assertEqual('rw', volume.admin_metadata['attached_mode']) + @mock.patch('cinder.db.volume_admin_metadata_delete') + @mock.patch('cinder.db.sqlalchemy.api.volume_detached') + @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' + 'get_all_by_volume_id') + def test_volume_detached_with_attachment( + self, volume_attachment_get, + volume_detached, + metadata_delete): + va_objs = [objects.VolumeAttachment(context=self.context, id=i) + for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] + # As changes are not saved, we need reset it here. Later changes + # will be checked. + for obj in va_objs: + obj.obj_reset_changes() + va_list = objects.VolumeAttachmentList(context=self.context, + objects=va_objs) + va_list.obj_reset_changes() + volume_attachment_get.return_value = va_list + admin_context = context.get_admin_context() + volume = fake_volume.fake_volume_obj( + admin_context, + volume_attachment=va_list, + volume_admin_metadata=[{'key': 'attached_mode', + 'value': 'rw'}]) + self.assertEqual(3, len(volume.volume_attachment)) + volume_detached.return_value = ({'status': 'in-use'}, + {'attached_mode': 'rw'}) + with mock.patch.object(admin_context, 'elevated') as mock_elevated: + mock_elevated.return_value = admin_context + volume.finish_detach(fake.OBJECT_ID) + volume_detached.assert_called_once_with(admin_context, + volume.id, + fake.OBJECT_ID) + metadata_delete.assert_called_once_with(admin_context, + volume.id, + 'attached_mode') + self.assertEqual('in-use', volume.status) + self.assertEqual({}, volume.cinder_obj_get_changes()) + self.assertEqual(2, len(volume.volume_attachment)) + self.assertIsNone(volume.admin_metadata.get('attached_mode')) + + @mock.patch('cinder.db.volume_admin_metadata_delete') + @mock.patch('cinder.db.sqlalchemy.api.volume_detached') + @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' + 'get_all_by_volume_id') + def test_volume_detached_without_attachment( + self, volume_attachment_get, volume_detached, metadata_delete): + admin_context = context.get_admin_context() + volume = fake_volume.fake_volume_obj( + admin_context, + volume_admin_metadata=[{'key': 'attached_mode', + 'value': 'rw'}]) + self.assertFalse(volume.obj_attr_is_set('volume_attachment')) + volume_detached.return_value = ({'status': 'in-use'}, None) + with mock.patch.object(admin_context, 'elevated') as mock_elevated: + mock_elevated.return_value = admin_context + volume.finish_detach(fake.OBJECT_ID) + metadata_delete.assert_called_once_with(admin_context, + volume.id, + 'attached_mode') + volume_detached.assert_called_once_with(admin_context, + volume.id, + fake.OBJECT_ID) + self.assertEqual('in-use', volume.status) + self.assertEqual({}, volume.cinder_obj_get_changes()) + self.assertFalse(volume_attachment_get.called) + class TestVolumeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.volume_get_all') diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index e29ff9c07..fd5b2ad7f 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -387,33 +387,101 @@ class DBAPIVolumeTestCase(BaseTest): 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) - db.volume_attached(self.ctxt, attachment['id'], + db.volume_attached(self.ctxt, attachment.id, instance_uuid, None, '/tmp') - db.volume_detached(self.ctxt, volume['id'], attachment['id']) - volume = db.volume_get(self.ctxt, volume['id']) + volume_updates, attachment_updates = ( + db.volume_detached(self.ctxt, volume.id, attachment.id)) + expected_attachment = { + 'attach_status': fields.VolumeAttachStatus.DETACHED, + 'detach_time': mock.ANY, + 'deleted': True, + 'deleted_at': mock.ANY, } + self.assertDictEqual(expected_attachment, attachment_updates) + expected_volume = { + 'status': 'available', + 'attach_status': fields.VolumeAttachStatus.DETACHED, } + self.assertDictEqual(expected_volume, volume_updates) + volume = db.volume_get(self.ctxt, volume.id) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, - attachment['id']) - self.assertEqual('available', volume['status']) + attachment.id) + self.assertEqual('available', volume.status) + + def test_volume_detached_two_attachments(self): + volume = db.volume_create(self.ctxt, {}) + instance_uuid = fake.INSTANCE_ID + values = {'volume_id': volume.id, + 'instance_uuid': instance_uuid, + 'attach_status': fields.VolumeAttachStatus.ATTACHING, } + attachment = db.volume_attach(self.ctxt, values) + values2 = {'volume_id': volume.id, + 'instance_uuid': fake.OBJECT_ID, + 'attach_status': fields.VolumeAttachStatus.ATTACHING, } + db.volume_attach(self.ctxt, values2) + db.volume_attached(self.ctxt, attachment.id, + instance_uuid, + None, '/tmp') + volume_updates, attachment_updates = ( + db.volume_detached(self.ctxt, volume.id, attachment.id)) + expected_attachment = { + 'attach_status': fields.VolumeAttachStatus.DETACHED, + 'detach_time': mock.ANY, + 'deleted': True, + 'deleted_at': mock.ANY, } + self.assertDictEqual(expected_attachment, attachment_updates) + expected_volume = { + 'status': 'in-use', + 'attach_status': fields.VolumeAttachStatus.ATTACHED, } + self.assertDictEqual(expected_volume, volume_updates) + volume = db.volume_get(self.ctxt, volume.id) + self.assertRaises(exception.VolumeAttachmentNotFound, + db.volume_attachment_get, + self.ctxt, + attachment.id) + self.assertEqual('in-use', volume.status) + + def test_volume_detached_invalid_attachment(self): + volume = db.volume_create(self.ctxt, {}) + # detach it again + volume_updates, attachment_updates = ( + db.volume_detached(self.ctxt, volume.id, fake.ATTACHMENT_ID)) + self.assertIsNone(attachment_updates) + expected_volume = { + 'status': 'available', + 'attach_status': fields.VolumeAttachStatus.DETACHED, } + self.assertDictEqual(expected_volume, volume_updates) + volume = db.volume_get(self.ctxt, volume.id) + self.assertEqual('available', volume.status) def test_volume_detached_from_host(self): volume = db.volume_create(self.ctxt, {}) host_name = 'fake_host' - values = {'volume_id': volume['id'], + values = {'volume_id': volume.id, 'attach_host': host_name, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) - db.volume_attached(self.ctxt, attachment['id'], + db.volume_attached(self.ctxt, attachment.id, None, host_name, '/tmp') - db.volume_detached(self.ctxt, volume['id'], attachment['id']) - volume = db.volume_get(self.ctxt, volume['id']) + volume_updates, attachment_updates = ( + db.volume_detached(self.ctxt, volume.id, attachment.id)) + expected_attachment = { + 'attach_status': fields.VolumeAttachStatus.DETACHED, + 'detach_time': mock.ANY, + 'deleted': True, + 'deleted_at': mock.ANY} + self.assertDictEqual(expected_attachment, attachment_updates) + expected_volume = { + 'status': 'available', + 'attach_status': fields.VolumeAttachStatus.DETACHED, } + self.assertDictEqual(expected_volume, volume_updates) + volume = db.volume_get(self.ctxt, volume.id) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, - attachment['id']) - self.assertEqual('available', volume['status']) + attachment.id) + self.assertEqual('available', volume.status) def test_volume_get(self): volume = db.volume_create(self.ctxt, {}) diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 489390056..80de4f06d 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -2087,7 +2087,7 @@ class VolumeTestCase(base.BaseVolumeTestCase): with volume.obj_as_admin(): volume.admin_metadata['readonly'] = True volume.save() - volume_id = volume['id'] + volume_id = volume.id self.volume.create_volume(self.user_context, volume=volume) volume_passed = volume if volume_object else None @@ -2120,10 +2120,12 @@ class VolumeTestCase(base.BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) + volume=volume) + self.volume.detach_volume(self.context, volume_id, + attachment.id, + volume=volume_passed) + vol = objects.Volume.get_by_id(self.context, volume_id) + self.assertEqual('available', vol.status) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index a5ddedfc0..beabca17c 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -439,20 +439,26 @@ class VolumeRpcAPITestCase(test.TestCase): mode='rw', version=version) - def test_detach_volume(self): + @ddt.data('3.0', '3.4') + @mock.patch('oslo_messaging.RPCClient.can_send_version') + def test_detach_volume(self, version, can_send_version): + can_send_version.return_value = (version == '3.4') self._test_volume_api('detach_volume', rpc_method='call', volume=self.fake_volume_obj, attachment_id='fake_uuid', - version="3.0") + version=version) - def test_detach_volume_cluster(self): + @ddt.data('3.0', '3.4') + @mock.patch('oslo_messaging.RPCClient.can_send_version') + def test_detach_volume_cluster(self, version, can_send_version): + can_send_version.return_value = (version == '3.4') self._set_cluster() self._test_volume_api('detach_volume', rpc_method='call', volume=self.fake_volume_obj, attachment_id='fake_uuid', - version="3.0") + version=version) def test_copy_volume_to_image(self): self._test_volume_api('copy_volume_to_image', diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 9049076bc..d31755635 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -1022,29 +1022,33 @@ class VolumeManager(manager.CleanableManager, return attachment @coordination.synchronized('{volume_id}-{f_name}') - def detach_volume(self, context, volume_id, attachment_id=None): + def detach_volume(self, context, volume_id, attachment_id=None, + volume=None): """Updates db to show volume is detached.""" # TODO(vish): refactor this into a more general "unreserve" - volume = self.db.volume_get(context, volume_id) - attachment = None + # FIXME(lixiaoy1): Remove this in v4.0 of RPC API. + if volume is None: + # For older clients, mimic the old behavior and look up the volume + # by its volume_id. + volume = objects.Volume.get_by_id(context, volume_id) + if attachment_id: try: - attachment = self.db.volume_attachment_get(context, - attachment_id) + attachment = objects.VolumeAttachment.get_by_id(context, + attachment_id) except exception.VolumeAttachmentNotFound: LOG.info(_LI("Volume detach called, but volume not attached."), resource=volume) # We need to make sure the volume status is set to the correct # status. It could be in detaching status now, and we don't # want to leave it there. - self.db.volume_detached(context, volume_id, attachment_id) + volume.finish_detach(attachment_id) return else: # We can try and degrade gracefully here by trying to detach # a volume without the attachment_id here if the volume only has # one attachment. This is for backwards compatibility. - attachments = self.db.volume_attachment_get_all_by_volume_id( - context, volume_id) + attachments = volume.volume_attachment if len(attachments) > 1: # There are more than 1 attachments for this volume # we have to have an attachment id. @@ -1059,10 +1063,9 @@ class VolumeManager(manager.CleanableManager, # so set the status to available and move on. LOG.info(_LI("Volume detach called, but volume not attached."), resource=volume) - self.db.volume_update( - context, volume_id, { - 'status': 'available', - 'attach_status': fields.VolumeAttachStatus.DETACHED}) + volume.status = 'available' + volume.attach_status = fields.VolumeAttachStatus.DETACHED + volume.save() return self._notify_about_volume_usage(context, volume, "detach.start") @@ -1092,7 +1095,6 @@ class VolumeManager(manager.CleanableManager, # We're going to remove the export here # (delete the iscsi target) - volume = self.db.volume_get(context, volume_id) try: utils.require_driver_initialized(self.driver) self.driver.remove_export(context.elevated(), volume) @@ -1108,11 +1110,7 @@ class VolumeManager(manager.CleanableManager, raise exception.RemoveExportException(volume=volume_id, reason=six.text_type(ex)) - self.db.volume_detached(context.elevated(), volume_id, - attachment.get('id')) - self.db.volume_admin_metadata_delete(context.elevated(), volume_id, - 'attached_mode') - + volume.finish_detach(attachment.id) self._notify_about_volume_usage(context, volume, "detach.end") LOG.info(_LI("Detach volume completed successfully."), resource=volume) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 21655384a..889bf55f9 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -114,9 +114,10 @@ class VolumeAPI(rpc.RPCAPI): 3.2 - Adds support for sending objects over RPC in get_backup_device(). 3.3 - Adds support for sending objects over RPC in attach_volume(). + 3.4 - Adds support for sending objects over RPC in detach_volume(). """ - RPC_API_VERSION = '3.3' + RPC_API_VERSION = '3.4' RPC_DEFAULT_VERSION = '3.0' TOPIC = constants.VOLUME_TOPIC BINARY = 'cinder-volume' @@ -201,9 +202,13 @@ class VolumeAPI(rpc.RPCAPI): return cctxt.call(ctxt, 'attach_volume', **msg_args) def detach_volume(self, ctxt, volume, attachment_id): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'detach_volume', volume_id=volume.id, - attachment_id=attachment_id) + msg_args = {'volume_id': volume.id, + 'attachment_id': attachment_id, + 'volume': volume} + cctxt = self._get_cctxt(volume.service_topic_queue, ('3.4', '3.0')) + if not self.client.can_send_version('3.4'): + msg_args.pop('volume') + return cctxt.call(ctxt, 'detach_volume', **msg_args) def copy_volume_to_image(self, ctxt, volume, image_meta): cctxt = self._get_cctxt(volume['host'])