Merge "Remove API races from delete methods"

This commit is contained in:
Jenkins
2016-01-08 08:18:43 +00:00
committed by Gerrit Code Review
9 changed files with 135 additions and 245 deletions

View File

@@ -19,7 +19,7 @@ from webob import exc
from cinder.api import extensions from cinder.api import extensions
from cinder.api.openstack import wsgi from cinder.api.openstack import wsgi
from cinder import exception from cinder import exception
from cinder.i18n import _, _LI from cinder.i18n import _LI
from cinder import volume from cinder import volume
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@@ -58,9 +58,6 @@ class VolumeUnmanageController(wsgi.Controller):
self.volume_api.delete(context, vol, unmanage_only=True) self.volume_api.delete(context, vol, unmanage_only=True)
except exception.VolumeNotFound as error: except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg) raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeAttached:
msg = _("Volume cannot be deleted while in attached state")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202) return webob.Response(status_int=202)

View File

@@ -204,9 +204,6 @@ class VolumeController(wsgi.Controller):
self.volume_api.delete(context, volume) self.volume_api.delete(context, volume)
except exception.VolumeNotFound as error: except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg) raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeAttached:
msg = _("Volume cannot be deleted while in attached state")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202) return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate) @wsgi.serializers(xml=VolumesTemplate)

View File

@@ -275,6 +275,10 @@ def volume_update_status_based_on_attachment(context, volume_id):
return IMPL.volume_update_status_based_on_attachment(context, volume_id) return IMPL.volume_update_status_based_on_attachment(context, volume_id)
def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_attachments_filter(): def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter() return IMPL.volume_has_attachments_filter()

View File

@@ -1833,6 +1833,12 @@ def volume_update_status_based_on_attachment(context, volume_id):
return volume_ref return volume_ref
def volume_has_snapshots_filter():
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))
def volume_has_attachments_filter(): def volume_has_attachments_filter():
return sql.exists().where( return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id, and_(models.Volume.id == models.VolumeAttachment.volume_id,

View File

@@ -84,21 +84,23 @@ class SnapshotUnmanageTest(test.TestCase):
res = req.get_response(app()) res = req.get_response(app())
return res return res
@mock.patch('cinder.db.conditional_update', return_value=1)
@mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot') @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot')
def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_db, def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_volume_get_by_id,
mock_volume_get_by_id, mock_db_update): mock_db_update, mock_conditional_update):
"""Return success for valid and unattached volume.""" """Return success for valid and unattached volume."""
ctxt = context.RequestContext('admin', 'fake', True) ctxt = context.RequestContext('admin', 'fake', True)
volume = fake_volume.fake_volume_obj(ctxt, id='fake_volume_id') volume = fake_volume.fake_volume_obj(ctxt, id='fake_volume_id')
mock_volume_get_by_id.return_value = volume mock_volume_get_by_id.return_value = volume
res = self._get_resp(snapshot_id) res = self._get_resp(snapshot_id)
self.assertEqual(1, mock_db.call_count) self.assertEqual(1, mock_volume_get_by_id.call_count)
self.assertEqual(2, len(mock_db.call_args[0]), mock_db.call_args) self.assertEqual(2, len(mock_volume_get_by_id.call_args[0]),
self.assertEqual('fake_volume_id', mock_db.call_args[0][1]) mock_volume_get_by_id.call_args)
self.assertEqual('fake_volume_id',
mock_volume_get_by_id.call_args[0][1])
self.assertEqual(1, mock_rpcapi.call_count) self.assertEqual(1, mock_rpcapi.call_count)
self.assertEqual(3, len(mock_rpcapi.call_args[0])) self.assertEqual(3, len(mock_rpcapi.call_args[0]))

View File

@@ -17,90 +17,13 @@ from oslo_serialization import jsonutils
import webob import webob
from cinder import context from cinder import context
from cinder import exception from cinder import db
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit.api import fakes from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_snapshot from cinder.tests.unit import utils
from cinder.tests.unit import fake_volume
# This list of fake volumes is used by our tests. Each is configured in a
# slightly different way, and includes only the properties that are required
# for these particular tests to function correctly.
snapshot_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffd'
detached_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffe'
attached_vol_id = 'ffffffff-0000-ffff-0000-ffffffffffff'
bad_vol_id = 'ffffffff-0000-ffff-0000-fffffffffff0'
vols = {snapshot_vol_id: {'id': snapshot_vol_id,
'status': 'available',
'attach_status': 'detached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'consistencygroup_id': None,
'encryption_key_id': None},
detached_vol_id: {'id': detached_vol_id,
'status': 'available',
'attach_status': 'detached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'consistencygroup_id': None,
'encryption_key_id': None},
attached_vol_id: {'id': attached_vol_id,
'status': 'available',
'attach_status': 'attached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'consistencygroup_id': None,
'encryption_key_id': None}
}
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def api_get(self, context, volume_id):
"""Replacement for cinder.volume.api.API.get.
We stub the cinder.volume.api.API.get method to check for the existence
of volume_id in our list of fake volumes and raise an exception if the
specified volume ID is not in our list.
"""
vol = vols.get(volume_id, None)
if not vol:
raise exception.VolumeNotFound(volume_id)
return fake_volume.fake_volume_obj(context, **vol)
def db_snapshot_get_all_for_volume(context, volume_id):
"""Replacement for cinder.db.snapshot_get_all_for_volume.
We stub the cinder.db.snapshot_get_all_for_volume method because when we
go to unmanage a volume, the code checks for snapshots and won't unmanage
volumes with snapshots. For these tests, only the snapshot_vol_id reports
any snapshots. The delete code just checks for array length, doesn't
inspect the contents.
"""
if volume_id == snapshot_vol_id:
db_snapshot = {'volume_id': volume_id}
snapshot = fake_snapshot.fake_db_snapshot(**db_snapshot)
return [snapshot]
return []
@mock.patch('cinder.volume.api.API.get', api_get)
@mock.patch('cinder.db.snapshot_get_all_for_volume',
db_snapshot_get_all_for_volume)
class VolumeUnmanageTest(test.TestCase): class VolumeUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_unmanage.py """Test cases for cinder/api/contrib/volume_unmanage.py
@@ -117,50 +40,54 @@ class VolumeUnmanageTest(test.TestCase):
def setUp(self): def setUp(self):
super(VolumeUnmanageTest, self).setUp() super(VolumeUnmanageTest, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake_project', True)
api = fakes.router.APIRouter()
self.app = fakes.urlmap.URLMap()
self.app['/v2'] = api
def _get_resp(self, volume_id): def _get_resp(self, volume_id):
"""Helper to build an os-unmanage req for the specified volume_id.""" """Helper to build an os-unmanage req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id) req = webob.Request.blank('/v2/%s/volumes/%s/action' %
(self.ctxt.project_id, volume_id))
req.method = 'POST' req.method = 'POST'
req.headers['Content-Type'] = 'application/json' req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext('admin', req.environ['cinder.context'] = self.ctxt
'fake',
True)
body = {'os-unmanage': ''} body = {'os-unmanage': ''}
req.body = jsonutils.dump_as_bytes(body) req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app()) res = req.get_response(self.app)
return res return res
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume')
def test_unmanage_volume_ok(self, mock_rpcapi, mock_db): def test_unmanage_volume_ok(self, mock_rpcapi):
"""Return success for valid and unattached volume.""" """Return success for valid and unattached volume."""
res = self._get_resp(detached_vol_id) vol = utils.create_volume(self.ctxt)
res = self._get_resp(vol.id)
# volume_update is (context, id, new_data)
self.assertEqual(1, mock_db.call_count)
self.assertEqual(3, len(mock_db.call_args[0]), mock_db.call_args)
self.assertEqual(detached_vol_id, mock_db.call_args[0][1])
# delete_volume is (context, status, unmanageOnly)
self.assertEqual(1, mock_rpcapi.call_count)
self.assertEqual(3, len(mock_rpcapi.call_args[0]))
self.assertTrue(mock_rpcapi.call_args[0][2])
self.assertEqual(202, res.status_int, res) self.assertEqual(202, res.status_int, res)
mock_rpcapi.called_once_with(self.ctxt, mock.ANY, unmanage_only=True)
vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
self.assertEqual('deleting', vol.status)
db.volume_destroy(self.ctxt, vol.id)
def test_unmanage_volume_bad_volume_id(self): def test_unmanage_volume_bad_volume_id(self):
"""Return 404 if the volume does not exist.""" """Return 404 if the volume does not exist."""
res = self._get_resp(bad_vol_id) res = self._get_resp('nonexistent-volume-id')
self.assertEqual(404, res.status_int, res) self.assertEqual(404, res.status_int, res)
def test_unmanage_volume_attached_(self): def test_unmanage_volume_attached(self):
"""Return 400 if the volume exists but is attached.""" """Return 400 if the volume exists but is attached."""
res = self._get_resp(attached_vol_id) vol = utils.create_volume(self.ctxt, status='in-use',
attach_status='attached')
res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res) self.assertEqual(400, res.status_int, res)
db.volume_destroy(self.ctxt, vol.id)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_unmanage_volume_with_snapshots(self):
def test_unmanage_volume_with_snapshots(self, metadata_get):
"""Return 400 if the volume exists but has snapshots.""" """Return 400 if the volume exists but has snapshots."""
res = self._get_resp(snapshot_vol_id) vol = utils.create_volume(self.ctxt)
snap = utils.create_snapshot(self.ctxt, vol.id)
res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res) self.assertEqual(400, res.status_int, res)
db.volume_destroy(self.ctxt, vol.id)
db.snapshot_destroy(self.ctxt, snap.id)

View File

@@ -1289,10 +1289,10 @@ class VolumeApiTest(test.TestCase):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1') req = fakes.HTTPRequest.blank('/v2/volumes/1')
exp = self.assertRaises(webob.exc.HTTPBadRequest, exp = self.assertRaises(exception.VolumeAttached,
self.controller.delete, self.controller.delete,
req, 1) req, 1)
expect_msg = "Volume cannot be deleted while in attached state" expect_msg = "Volume 1 is still attached, detach volume first."
self.assertEqual(expect_msg, six.text_type(exp)) self.assertEqual(expect_msg, six.text_type(exp))
def test_volume_delete_no_volume(self): def test_volume_delete_no_volume(self):

View File

@@ -134,6 +134,7 @@ class BaseVolumeTestCase(test.TestCase):
'pools': {}} 'pools': {}}
# keep ordered record of what we execute # keep ordered record of what we execute
self.called = [] self.called = []
self.volume_api = cinder.volume.api.API()
def _cleanup(self): def _cleanup(self):
try: try:
@@ -937,25 +938,17 @@ class VolumeTestCase(BaseVolumeTestCase):
@mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api) @mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api)
def test_create_delete_volume_with_encrypted_volume_type(self): def test_create_delete_volume_with_encrypted_volume_type(self):
ctxt = context.get_admin_context() db_vol_type = db.volume_type_create(self.context,
{'id': 'type-id', 'name': 'LUKS'})
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create( db.volume_type_encryption_create(
ctxt, self.context, 'type-id',
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API() volume = self.volume_api.create(self.context,
1,
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') 'name',
'description',
volume = volume_api.create(self.context, volume_type=db_vol_type)
1,
'name',
'description',
volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None)) self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
@@ -963,7 +956,8 @@ class VolumeTestCase(BaseVolumeTestCase):
volume['host'] = 'fake_host' volume['host'] = 'fake_host'
volume['status'] = 'available' volume['status'] = 'available'
volume_api.delete(self.context, volume) db.volume_update(self.context, volume['id'], {'status': 'available'})
self.volume_api.delete(self.context, volume)
volume = db.volume_get(self.context, volume['id']) volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status']) self.assertEqual('deleting', volume['status'])
@@ -2990,10 +2984,11 @@ class VolumeTestCase(BaseVolumeTestCase):
pass pass
@staticmethod @staticmethod
def _create_snapshot(volume_id, size=1, metadata=None): def _create_snapshot(volume_id, size=1, metadata=None, ctxt=None,
**kwargs):
"""Create a snapshot object.""" """Create a snapshot object."""
metadata = metadata or {} metadata = metadata or {}
snap = objects.Snapshot(context.get_admin_context()) snap = objects.Snapshot(ctxt or context.get_admin_context())
snap.volume_size = size snap.volume_size = size
snap.user_id = 'fake' snap.user_id = 'fake'
snap.project_id = 'fake' snap.project_id = 'fake'
@@ -3001,6 +2996,8 @@ class VolumeTestCase(BaseVolumeTestCase):
snap.status = "creating" snap.status = "creating"
if metadata is not None: if metadata is not None:
snap.metadata = metadata snap.metadata = metadata
snap.update(kwargs)
snap.create() snap.create()
return snap return snap
@@ -3172,16 +3169,12 @@ class VolumeTestCase(BaseVolumeTestCase):
def _test_cannot_delete_volume(self, status): def _test_cannot_delete_volume(self, status):
"""Test volume can't be deleted in invalid stats.""" """Test volume can't be deleted in invalid stats."""
# create a volume and assign to host # create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params) volume = tests_utils.create_volume(self.context, CONF.host,
self.volume.create_volume(self.context, volume['id']) status=status)
volume['status'] = status
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'in-use' status raises InvalidVolume # 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.volume_api.delete,
self.context, self.context,
volume) volume)
@@ -3191,20 +3184,17 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_force_delete_volume(self): def test_force_delete_volume(self):
"""Test volume can be forced to delete.""" """Test volume can be forced to delete."""
# create a volume and assign to host # create a volume and assign to host
self.volume_params['status'] = 'error_deleting'
volume = tests_utils.create_volume(self.context, **self.volume_params) volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'error_deleting'
volume_api = cinder.volume.api.API()
# 'error_deleting' volumes can't be deleted # 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.volume_api.delete,
self.context, self.context,
volume) volume)
# delete with force # delete with force
volume_api.delete(self.context, volume, force=True) self.volume_api.delete(self.context, volume, force=True)
# status is deleting # status is deleting
volume = objects.Volume.get_by_id(context.get_admin_context(), volume = objects.Volume.get_by_id(context.get_admin_context(),
@@ -3216,21 +3206,17 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_cannot_force_delete_attached_volume(self): def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state.""" """Test volume can't be force delete in attached state."""
volume = tests_utils.create_volume(self.context, **self.volume_params) volume = tests_utils.create_volume(self.context, CONF.host,
self.volume.create_volume(self.context, volume['id']) status='in-use',
volume['status'] = 'in-use' attach_status = 'attached')
volume['attach_status'] = 'attached'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
self.assertRaises(exception.VolumeAttached,
volume_api.delete,
self.context, self.context,
volume, volume,
force=True) force=True)
self.volume.delete_volume(self.context, volume['id']) db.volume_destroy(self.context, volume.id)
def test_cannot_delete_volume_with_snapshots(self): def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots.""" """Test volume can't be deleted with dependent snapshots."""
@@ -3256,22 +3242,20 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_can_delete_errored_snapshot(self): def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted.""" """Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, **self.volume_params) volume = tests_utils.create_volume(self.context, CONF.host)
self.volume.create_volume(self.context, volume['id']) snapshot = self._create_snapshot(volume.id, size=volume['size'],
snapshot = self._create_snapshot(volume['id'], size=volume['size']) ctxt=self.context, status='bad')
self.volume.create_snapshot(self.context, volume['id'], snapshot)
volume_api = cinder.volume.api.API()
snapshot.status = 'badstatus'
self.assertRaises(exception.InvalidSnapshot, self.assertRaises(exception.InvalidSnapshot,
volume_api.delete_snapshot, self.volume_api.delete_snapshot,
self.context, self.context,
snapshot) snapshot)
snapshot.status = 'error' snapshot.status = 'error'
self.volume.delete_snapshot(self.context, snapshot) snapshot.save()
self.volume.delete_volume(self.context, volume['id']) self.volume_api.delete_snapshot(self.context, snapshot)
self.assertEqual('deleting', snapshot.status)
self.volume.delete_volume(self.context, volume.id)
def test_create_snapshot_force(self): def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly.""" """Test snapshot in use can be created forcibly."""
@@ -4971,12 +4955,11 @@ class VolumeMigrationTestCase(VolumeTestCase):
class ConsistencyGroupTestCase(BaseVolumeTestCase): class ConsistencyGroupTestCase(BaseVolumeTestCase):
def test_delete_volume_in_consistency_group(self): def test_delete_volume_in_consistency_group(self):
"""Test deleting a volume that's tied to a consistency group fails.""" """Test deleting a volume that's tied to a consistency group fails."""
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **self.volume_params)
consistencygroup_id = '12345678-1234-5678-1234-567812345678' consistencygroup_id = '12345678-1234-5678-1234-567812345678'
volume = db.volume_update(self.context, volume['id'], volume_api = cinder.volume.api.API()
{'status': 'available', self.volume_params.update({'status': 'available',
'consistencygroup_id': consistencygroup_id}) 'consistencygroup_id': consistencygroup_id})
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.context, volume) volume_api.delete, self.context, volume)

View File

@@ -352,22 +352,21 @@ class API(base.Base):
@wrap_check_policy @wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False): def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']: if context.is_admin and context.project_id != volume.project_id:
project_id = volume['project_id'] project_id = volume.project_id
else: else:
project_id = context.project_id project_id = context.project_id
volume_id = volume['id'] if not volume.host:
if not volume['host']:
volume_utils.notify_about_volume_usage(context, volume_utils.notify_about_volume_usage(context,
volume, "delete.start") volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it # NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation # Note(zhiteng): update volume quota reservation
try: try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context, QUOTAS.add_volume_type_opts(context,
reserve_opts, reserve_opts,
volume['volume_type_id']) volume.volume_type_id)
reservations = QUOTAS.reserve(context, reservations = QUOTAS.reserve(context,
project_id=project_id, project_id=project_id,
**reserve_opts) **reserve_opts)
@@ -384,51 +383,35 @@ class API(base.Base):
volume, "delete.end") volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."), LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume', resource={'type': 'volume',
'id': volume_id}) 'id': volume.id})
return return
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume['id'])
raise exception.VolumeAttached(volume_id=volume_id)
if not force and volume['status'] not in ["available", "error", # Build required conditions for conditional update
"error_restoring", expected = {'attach_status': db.Not('attached'),
"error_extending"]: 'migration_status': self.AVAILABLE_MIGRATION_STATUS,
msg = _("Volume status must be available or error, " 'consistencygroup_id': None}
"but current status is: %s.") % volume['status']
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume['id'],
'vol_status': volume['status']})
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume): # If not force deleting we have status conditions
# Volume is migrating, wait until done if not force:
LOG.info(_LI('Unable to delete volume: %s, ' expected['status'] = ('available', 'error', 'error_restoring',
'volume is currently migrating.'), volume['id']) 'error_extending')
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume['consistencygroup_id'] is not None: # Volume cannot have snapshots if we want to delete it
msg = _("Volume cannot be deleted while in a consistency group.") filters = [~db.volume_has_snapshots_filter()]
LOG.info(_LI('Unable to delete volume: %s, ' values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
'volume is currently part of a '
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context, result = volume.conditional_update(values, expected, filters)
volume_id)
if len(snapshots): if not result:
LOG.info(_LI('Unable to delete volume: %s, ' status = utils.build_or_str(expected.get('status'),
'volume currently has snapshots.'), volume['id']) _(' status must be %s and '))
msg = _("Volume still has %d dependent " msg = _('Volume%s must not be migrating, attached, belong to a '
"snapshots.") % len(snapshots) 'consistency group or have snapshots.') % status
LOG.info(msg)
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
cache = image_cache.ImageVolumeCache(self.db, self) cache = image_cache.ImageVolumeCache(self.db, self)
entry = cache.get_by_image_volume(context, volume_id) entry = cache.get_by_image_volume(context, volume.id)
if entry: if entry:
cache.evict(context, entry) cache.evict(context, entry)
@@ -443,10 +426,6 @@ class API(base.Base):
msg = _("Unable to delete encrypted volume: %s.") % e.msg msg = _("Unable to delete encrypted volume: %s.") % e.msg
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
volume.status = 'deleting'
volume.terminated_at = timeutils.utcnow()
volume.save()
self.volume_rpcapi.delete_volume(context, volume, unmanage_only) self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."), LOG.info(_LI("Delete volume request issued successfully."),
resource=volume) resource=volume)
@@ -949,29 +928,24 @@ class API(base.Base):
@wrap_check_policy @wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False, def delete_snapshot(self, context, snapshot, force=False,
unmanage_only=False): unmanage_only=False):
if not force and snapshot.status not in ["available", "error"]: # Build required conditions for conditional update
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, ' expected = {'cgsnapshot_id': None}
'due to invalid status. ' # If not force deleting we have status conditions
'Status must be available or ' if not force:
'error, not %(snap_status)s.'), expected['status'] = ('available', 'error')
{'snap_id': snapshot.id,
'snap_status': snapshot.status}) result = snapshot.conditional_update({'status': 'deleting'}, expected)
msg = _("Volume Snapshot status must be available or error.") if not result:
raise exception.InvalidSnapshot(reason=msg) status = utils.build_or_str(expected.get('status'),
cgsnapshot_id = snapshot.cgsnapshot_id _(' status must be %s and '))
if cgsnapshot_id: msg = (_('Snapshot%s must not be part of a consistency group.') %
msg = _('Unable to delete snapshot %s because it is part of a ' status)
'consistency group.') % snapshot.id
LOG.error(msg) LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg) raise exception.InvalidSnapshot(reason=msg)
snapshot_obj = self.get_snapshot(context, snapshot.id) # Make RPC call to the right host
snapshot_obj.status = 'deleting' volume = objects.Volume.get_by_id(context, snapshot.volume_id)
snapshot_obj.save() self.volume_rpcapi.delete_snapshot(context, snapshot, volume.host,
volume = self.db.volume_get(context, snapshot_obj.volume_id)
self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
volume['host'],
unmanage_only=unmanage_only) unmanage_only=unmanage_only)
LOG.info(_LI("Snapshot delete request issued successfully."), LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot) resource=snapshot)