Delete volumes with snapshots

This adds the 'cascade' parameter to volume delete,
which deletes snapshots along with a volume in
one call.

This is done in the volume manager, and not in
a driver-optimized way, which will be a later
improvement.

Blueprint: del-vols-with-snaps
Change-Id: I33d15b76d4bd0de14c635d404b2c97096c977a58
This commit is contained in:
Eric Harney 2016-01-07 16:25:52 -05:00
parent d46fbf0e13
commit bf2b6b9d90
10 changed files with 153 additions and 17 deletions

View File

@ -186,11 +186,13 @@ class VolumeController(wsgi.Controller):
"""Delete a volume."""
context = req.environ['cinder.context']
cascade = utils.get_bool_param('cascade', req.params)
LOG.info(_LI("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
self.volume_api.delete(context, volume, cascade=cascade)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return webob.Response(status_int=202)

View File

@ -255,6 +255,10 @@ def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_undeletable_snapshots_filter():
return IMPL.volume_has_undeletable_snapshots_filter()
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()

View File

@ -1831,6 +1831,15 @@ def volume_has_snapshots_filter():
~models.Snapshot.deleted))
def volume_has_undeletable_snapshots_filter():
deletable_statuses = ['available', 'error']
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses))))
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,

View File

@ -65,7 +65,7 @@ class VolumeUnmanageTest(test.TestCase):
res = self._get_resp(vol.id)
self.assertEqual(202, res.status_int, res)
mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True)
mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False)
vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
self.assertEqual('deleting', vol.status)
db.volume_destroy(self.ctxt, vol.id)

View File

@ -1283,7 +1283,8 @@ class VolumeApiTest(test.TestCase):
self.assertEqual(202, resp.status_int)
def test_volume_delete_attached(self):
def stub_volume_attached(self, context, volume, force=False):
def stub_volume_attached(self, context, volume,
force=False, cascade=False):
raise exception.VolumeAttached(volume_id=volume['id'])
self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)

View File

@ -4298,6 +4298,49 @@ class VolumeTestCase(BaseVolumeTestCase):
None,
connector)
def test_cascade_delete_volume_with_snapshots(self):
"""Test volume deletion with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot = self._create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
volume_api.delete(self.context,
volume,
cascade=True)
def test_cascade_delete_volume_with_snapshots_error(self):
"""Test volume deletion with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot = self._create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
snapshot.update({'status': 'in-use'})
snapshot.save()
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume,
cascade=True)
@ddt.ddt
class VolumeMigrationTestCase(VolumeTestCase):

View File

@ -291,8 +291,9 @@ class VolumeRpcAPITestCase(test.TestCase):
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.33')
can_send_version.assert_called_once_with('1.33')
cascade=False,
version='1.40')
can_send_version.assert_any_call('1.40')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
@ -302,7 +303,19 @@ class VolumeRpcAPITestCase(test.TestCase):
volume=self.fake_volume_obj,
unmanage_only=False,
version='1.15')
can_send_version.assert_called_once_with('1.33')
can_send_version.assert_any_call('1.33')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_delete_volume_cascade(self, can_send_version):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
unmanage_only=False,
cascade=True,
version='1.40')
can_send_version.assert_any_call('1.33')
can_send_version.assert_any_call('1.40')
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',

View File

@ -329,7 +329,10 @@ class API(base.Base):
return vref
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
def delete(self, context, volume,
force=False,
unmanage_only=False,
cascade=False):
if context.is_admin and context.project_id != volume.project_id:
project_id = volume.project_id
else:
@ -374,8 +377,12 @@ class API(base.Base):
expected['status'] = ('available', 'error', 'error_restoring',
'error_extending')
# Volume cannot have snapshots if we want to delete it
filters = [~db.volume_has_snapshots_filter()]
if cascade:
# Allow deletion if all snapshots are in an expected state
filters = [~db.volume_has_undeletable_snapshots_filter()]
else:
# Don't allow deletion of volume with snapshots
filters = [~db.volume_has_snapshots_filter()]
values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
result = volume.conditional_update(values, expected, filters)
@ -388,6 +395,22 @@ class API(base.Base):
LOG.info(msg)
raise exception.InvalidVolume(reason=msg)
if cascade:
values = {'status': 'deleting'}
expected = {'status': ('available', 'error', 'deleting'),
'cgsnapshot_id': None}
snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
context, volume.id)
for s in snapshots:
result = s.conditional_update(values, expected, filters)
if not result:
volume.update({'status': 'error_deleting'})
volume.save()
msg = _('Failed to update snapshot.')
raise exception.InvalidVolume(reason=msg)
cache = image_cache.ImageVolumeCache(self.db, self)
entry = cache.get_by_image_volume(context, volume.id)
if entry:
@ -404,7 +427,10 @@ class API(base.Base):
msg = _("Unable to delete encrypted volume: %s.") % e.msg
raise exception.InvalidVolume(reason=msg)
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
self.volume_rpcapi.delete_volume(context,
volume,
unmanage_only,
cascade)
LOG.info(_LI("Delete volume request issued successfully."),
resource=volume)

View File

@ -204,7 +204,7 @@ def locked_snapshot_operation(f):
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.39'
RPC_API_VERSION = '1.40'
target = messaging.Target(version=RPC_API_VERSION)
@ -636,8 +636,10 @@ class VolumeManager(manager.SchedulerDependentManager):
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False,
volume=None):
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
@ -675,6 +677,13 @@ class VolumeManager(manager.SchedulerDependentManager):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
@ -693,6 +702,25 @@ class VolumeManager(manager.SchedulerDependentManager):
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:

View File

@ -92,9 +92,10 @@ class VolumeAPI(rpc.RPCAPI):
1.38 - Scaling backup service, add get_backup_device() and
secure_file_operations_enabled()
1.39 - Update replication methods to reflect new backend rep strategy
1.40 - Add cascade option to delete_volume().
"""
RPC_API_VERSION = '1.39'
RPC_API_VERSION = '1.40'
TOPIC = CONF.volume_topic
BINARY = 'cinder-volume'
@ -152,13 +153,22 @@ class VolumeAPI(rpc.RPCAPI):
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume', **msg_args)
def delete_volume(self, ctxt, volume, unmanage_only=False):
def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
msg_args = {'volume_id': volume.id, 'unmanage_only': unmanage_only}
version = '1.15'
if self.client.can_send_version('1.33'):
version = '1.33'
msg_args['volume'] = volume
else:
version = '1.15'
if self.client.can_send_version('1.40'):
version = '1.40'
if cascade:
msg_args['cascade'] = cascade
elif cascade:
msg = _('Cascade option is not supported.')
raise exception.Invalid(reason=msg)
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'delete_volume', **msg_args)