Merge "Add 'unmanaging' state to volumes and snapshots"
This commit is contained in:
commit
f40c86ba51
@ -122,6 +122,7 @@ OBJ_VERSIONS.add('1.14', {'VolumeAttachmentList': '1.1'})
|
||||
OBJ_VERSIONS.add('1.15', {'Volume': '1.6', 'Snapshot': '1.2'})
|
||||
OBJ_VERSIONS.add('1.16', {'BackupDeviceInfo': '1.0'})
|
||||
OBJ_VERSIONS.add('1.17', {'VolumeAttachment': '1.1'})
|
||||
OBJ_VERSIONS.add('1.18', {'Snapshot': '1.3'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
@ -105,9 +105,10 @@ class SnapshotStatus(BaseCinderEnum):
|
||||
DELETED = 'deleted'
|
||||
UPDATING = 'updating'
|
||||
ERROR_DELETING = 'error_deleting'
|
||||
UNMANAGING = 'unmanaging'
|
||||
|
||||
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
|
||||
UPDATING, ERROR_DELETING)
|
||||
UPDATING, ERROR_DELETING, UNMANAGING)
|
||||
|
||||
|
||||
class SnapshotStatusField(BaseEnumField):
|
||||
|
@ -34,7 +34,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# Version 1.0: Initial version
|
||||
# Version 1.1: Changed 'status' field to use SnapshotStatusField
|
||||
# Version 1.2: This object is now cleanable (adds rows to workers table)
|
||||
VERSION = '1.2'
|
||||
# Version 1.3: SnapshotStatusField now includes "unmanaging"
|
||||
VERSION = '1.3'
|
||||
|
||||
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
|
||||
# are typically the relationship in the sqlalchemy object.
|
||||
@ -116,6 +117,10 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
super(Snapshot, self).obj_make_compatible(primitive, target_version)
|
||||
target_version = versionutils.convert_version_to_tuple(target_version)
|
||||
|
||||
if target_version < (1, 3):
|
||||
if primitive.get('status') == c_fields.SnapshotStatus.UNMANAGING:
|
||||
primitive['status'] = c_fields.SnapshotStatus.DELETING
|
||||
|
||||
@classmethod
|
||||
def _from_db_object(cls, context, snapshot, db_snapshot,
|
||||
expected_attrs=None):
|
||||
|
@ -69,7 +69,7 @@ class VolumeUnmanageTest(test.TestCase):
|
||||
|
||||
mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False)
|
||||
vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
|
||||
self.assertEqual('deleting', vol.status)
|
||||
self.assertEqual('unmanaging', vol.status)
|
||||
db.volume_destroy(self.ctxt, vol.id)
|
||||
|
||||
def test_unmanage_volume_bad_volume_id(self):
|
||||
|
@ -39,7 +39,7 @@ object_data = {
|
||||
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
|
||||
'Service': '1.4-c7d011989d1718ca0496ccf640b42712',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.2-d6a9d58f627bb2a5cf804b0dd7a12bc7',
|
||||
'Snapshot': '1.3-69dfbe3244992478a0174cb512cd7f27',
|
||||
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Volume': '1.6-8a56256db74c0642dca1a30739d88074',
|
||||
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
|
@ -13,6 +13,8 @@
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_utils import timeutils
|
||||
import pytz
|
||||
@ -47,6 +49,7 @@ fake_snapshot_obj = {
|
||||
}
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestSnapshot(test_objects.BaseObjectsTestCase):
|
||||
|
||||
@mock.patch('cinder.db.get_by_id', return_value=fake_db_snapshot)
|
||||
@ -209,6 +212,18 @@ class TestSnapshot(test_objects.BaseObjectsTestCase):
|
||||
mock.call(self.context,
|
||||
fake.SNAPSHOT_ID)])
|
||||
|
||||
@ddt.data('1.1', '1.3')
|
||||
def test_obj_make_compatible(self, version):
|
||||
snapshot = objects.Snapshot(context=self.context)
|
||||
snapshot.status = 'unmanaging'
|
||||
primitive = snapshot.obj_to_primitive(version)
|
||||
snapshot = objects.Snapshot.obj_from_primitive(primitive)
|
||||
if version == '1.3':
|
||||
status = fields.SnapshotStatus.UNMANAGING
|
||||
else:
|
||||
status = fields.SnapshotStatus.DELETING
|
||||
self.assertEqual(status, snapshot.status)
|
||||
|
||||
|
||||
class TestSnapshotList(test_objects.BaseObjectsTestCase):
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
|
@ -412,6 +412,8 @@ class API(base.Base):
|
||||
# Don't allow deletion of volume with snapshots
|
||||
filters = [~db.volume_has_snapshots_filter()]
|
||||
values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
|
||||
if unmanage_only is True:
|
||||
values['status'] = 'unmanaging'
|
||||
if volume.status == 'error_managing':
|
||||
values['status'] = 'error_managing_deleting'
|
||||
|
||||
@ -985,8 +987,10 @@ class API(base.Base):
|
||||
expected['status'] = (fields.SnapshotStatus.AVAILABLE,
|
||||
fields.SnapshotStatus.ERROR)
|
||||
|
||||
result = snapshot.conditional_update(
|
||||
{'status': fields.SnapshotStatus.DELETING}, expected)
|
||||
values = {'status': fields.SnapshotStatus.DELETING}
|
||||
if unmanage_only is True:
|
||||
values['status'] = fields.SnapshotStatus.UNMANAGING
|
||||
result = snapshot.conditional_update(values, expected)
|
||||
if not result:
|
||||
status = utils.build_or_str(expected.get('status'),
|
||||
_('status must be %s and'))
|
||||
|
@ -706,7 +706,10 @@ class VolumeManager(manager.CleanableManager,
|
||||
is_migrating_dest = (is_migrating and
|
||||
volume.migration_status.startswith(
|
||||
'target:'))
|
||||
self._notify_about_volume_usage(context, volume, "delete.start")
|
||||
notification = "delete.start"
|
||||
if unmanage_only:
|
||||
notification = "unmanage.start"
|
||||
self._notify_about_volume_usage(context, volume, notification)
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught
|
||||
@ -749,8 +752,12 @@ class VolumeManager(manager.CleanableManager,
|
||||
with excutils.save_and_reraise_exception():
|
||||
# If this is a destination volume, we have to clear the
|
||||
# database record to avoid user confusion.
|
||||
new_status = 'error_deleting'
|
||||
if unmanage_only is True:
|
||||
new_status = 'error_unmanaging'
|
||||
|
||||
self._clear_db(context, is_migrating_dest, volume,
|
||||
'error_deleting')
|
||||
new_status)
|
||||
|
||||
# If deleting source/destination volume in a migration, we should
|
||||
# skip quotas.
|
||||
@ -779,7 +786,10 @@ class VolumeManager(manager.CleanableManager,
|
||||
# If deleting source/destination volume in a migration, we should
|
||||
# skip quotas.
|
||||
if not is_migrating:
|
||||
self._notify_about_volume_usage(context, volume, "delete.end")
|
||||
notification = "delete.end"
|
||||
if unmanage_only:
|
||||
notification = "unmanage.end"
|
||||
self._notify_about_volume_usage(context, volume, notification)
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
@ -801,7 +811,10 @@ class VolumeManager(manager.CleanableManager,
|
||||
|
||||
self.publish_service_capabilities(context)
|
||||
|
||||
LOG.info(_LI("Deleted volume successfully."), resource=volume)
|
||||
msg = _LI("Deleted volume successfully.")
|
||||
if unmanage_only:
|
||||
msg = _LI("Unmanaged volume successfully.")
|
||||
LOG.info(msg, resource=volume)
|
||||
|
||||
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
|
||||
# This method is called when driver.unmanage() or
|
||||
@ -936,8 +949,11 @@ class VolumeManager(manager.CleanableManager,
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
LOG.info(_LI("Delete snapshot completed successfully"),
|
||||
resource=snapshot)
|
||||
|
||||
msg = _LI("Delete snapshot completed successfully.")
|
||||
if unmanage_only:
|
||||
msg = _LI("Unmanage snapshot completed successfully.")
|
||||
LOG.info(msg, resource=snapshot)
|
||||
|
||||
@coordination.synchronized('{volume_id}')
|
||||
def attach_volume(self, context, volume_id, instance_uuid, host_name,
|
||||
|
Loading…
Reference in New Issue
Block a user