Don't change volume's status when create backups from snapshots

when users try to create a backup from a snapshot, the volume
related to the snapshot is set to backing-up during the action
and can't be used for other actions.
When create a backup from a large snapshot, such as larger
than 1 TB, it will cost few hours generally. It's really a
problem that the volume is not available for such a long time.

If the snapshot is provided, we change the status of the snapshot;
otherwise, we change the status of the volume as usual.

This patch added "backing-up" status for snapshot as well

DocImpact

Change-Id: I86d34c470fabbf4132b5e004d9f368e751c893a5
Closes-bug: #1670541
This commit is contained in:
wangxiyuan 2017-03-07 10:42:02 +08:00
parent debc2b8fa8
commit d304f621c2
9 changed files with 109 additions and 14 deletions

View File

@ -291,7 +291,10 @@ class API(base.Base):
if snapshot_id: if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id) snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
data_timestamp = snapshot.created_at data_timestamp = snapshot.created_at
self.db.snapshot_update(
context, snapshot_id,
{'status': fields.SnapshotStatus.BACKING_UP})
else:
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': 'backing-up', {'status': 'backing-up',
'previous_status': previous_status}) 'previous_status': previous_status})

View File

@ -355,7 +355,10 @@ class BackupManager(manager.ThreadPoolManager):
def create_backup(self, context, backup): def create_backup(self, context, backup):
"""Create volume backups using configured backup service.""" """Create volume backups using configured backup service."""
volume_id = backup.volume_id volume_id = backup.volume_id
snapshot_id = backup.snapshot_id
volume = objects.Volume.get_by_id(context, volume_id) volume = objects.Volume.get_by_id(context, volume_id)
snapshot = objects.Snapshot.get_by_id(
context, snapshot_id) if snapshot_id else None
previous_status = volume.get('previous_status', None) previous_status = volume.get('previous_status', None)
LOG.info('Create backup started, backup: %(backup_id)s ' LOG.info('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.', 'volume: %(volume_id)s.',
@ -368,7 +371,7 @@ class BackupManager(manager.ThreadPoolManager):
backup.availability_zone = self.az backup.availability_zone = self.az
backup.save() backup.save()
expected_status = 'backing-up' expected_status = 'available' if snapshot_id else "backing-up"
actual_status = volume['status'] actual_status = volume['status']
if actual_status != expected_status: if actual_status != expected_status:
err = _('Create backup aborted, expected volume status ' err = _('Create backup aborted, expected volume status '
@ -379,6 +382,18 @@ class BackupManager(manager.ThreadPoolManager):
self._update_backup_error(backup, err) self._update_backup_error(backup, err)
raise exception.InvalidVolume(reason=err) raise exception.InvalidVolume(reason=err)
if snapshot_id:
expected_status = fields.SnapshotStatus.BACKING_UP
actual_status = snapshot['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected snapshot status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, err)
raise exception.InvalidSnapshot(reason=err)
expected_status = fields.BackupStatus.CREATING expected_status = fields.BackupStatus.CREATING
actual_status = backup.status actual_status = backup.status
if actual_status != expected_status: if actual_status != expected_status:
@ -401,6 +416,10 @@ class BackupManager(manager.ThreadPoolManager):
self._update_backup_error(backup, six.text_type(err)) self._update_backup_error(backup, six.text_type(err))
# Restore the original status. # Restore the original status.
if snapshot_id:
self.db.snapshot_update(context, snapshot_id,
{'status': fields.BackupStatus.AVAILABLE})
else:
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': previous_status, {'status': previous_status,
'previous_status': 'backing-up'}) 'previous_status': 'backing-up'})

View File

@ -129,6 +129,7 @@ OBJ_VERSIONS.add('1.21', {'ManageableSnapshot': '1.0',
'ManageableVolume': '1.0', 'ManageableVolume': '1.0',
'ManageableVolumeList': '1.0', 'ManageableVolumeList': '1.0',
'ManageableSnapshotList': '1.0'}) 'ManageableSnapshotList': '1.0'})
OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'})
class CinderObjectRegistry(base.VersionedObjectRegistry): class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -123,9 +123,10 @@ class SnapshotStatus(BaseCinderEnum):
UPDATING = 'updating' UPDATING = 'updating'
ERROR_DELETING = 'error_deleting' ERROR_DELETING = 'error_deleting'
UNMANAGING = 'unmanaging' UNMANAGING = 'unmanaging'
BACKING_UP = 'backing-up'
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
UPDATING, ERROR_DELETING, UNMANAGING) UPDATING, ERROR_DELETING, UNMANAGING, BACKING_UP)
class SnapshotStatusField(BaseEnumField): class SnapshotStatusField(BaseEnumField):

View File

@ -36,7 +36,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
# Version 1.1: Changed 'status' field to use SnapshotStatusField # Version 1.1: Changed 'status' field to use SnapshotStatusField
# Version 1.2: This object is now cleanable (adds rows to workers table) # Version 1.2: This object is now cleanable (adds rows to workers table)
# Version 1.3: SnapshotStatusField now includes "unmanaging" # Version 1.3: SnapshotStatusField now includes "unmanaging"
VERSION = '1.3' # Version 1.4: SnapshotStatusField now includes "backing-up"
VERSION = '1.4'
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
# are typically the relationship in the sqlalchemy object. # are typically the relationship in the sqlalchemy object.
@ -121,6 +122,9 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
if target_version < (1, 3): if target_version < (1, 3):
if primitive.get('status') == c_fields.SnapshotStatus.UNMANAGING: if primitive.get('status') == c_fields.SnapshotStatus.UNMANAGING:
primitive['status'] = c_fields.SnapshotStatus.DELETING primitive['status'] = c_fields.SnapshotStatus.DELETING
if target_version < (1, 4):
if primitive.get('status') == c_fields.SnapshotStatus.BACKING_UP:
primitive['status'] = c_fields.SnapshotStatus.AVAILABLE
@classmethod @classmethod
def _from_db_object(cls, context, snapshot, db_snapshot, def _from_db_object(cls, context, snapshot, db_snapshot,

View File

@ -78,7 +78,8 @@ class BaseBackupTest(test.TestCase):
project_id=str(uuid.uuid4()), project_id=str(uuid.uuid4()),
service=None, service=None,
temp_volume_id=None, temp_volume_id=None,
temp_snapshot_id=None): temp_snapshot_id=None,
snapshot_id=None):
"""Create a backup entry in the DB. """Create a backup entry in the DB.
Return the entry ID Return the entry ID
@ -96,7 +97,7 @@ class BaseBackupTest(test.TestCase):
kwargs['status'] = status kwargs['status'] = status
kwargs['fail_reason'] = '' kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot'] = False kwargs['snapshot_id'] = snapshot_id
kwargs['parent_id'] = None kwargs['parent_id'] = None
kwargs['size'] = size kwargs['size'] = size
kwargs['object_count'] = object_count kwargs['object_count'] = object_count
@ -618,6 +619,28 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size']) self.assertEqual(vol_size, backup['size'])
@mock.patch('cinder.backup.manager.BackupManager._run_backup')
@ddt.data(fields.SnapshotStatus.BACKING_UP,
fields.SnapshotStatus.AVAILABLE)
def test_create_backup_with_snapshot(self, snapshot_status,
mock_run_backup):
vol_id = self._create_volume_db_entry(status='available')
snapshot = self._create_snapshot_db_entry(volume_id=vol_id,
status=snapshot_status)
backup = self._create_backup_db_entry(volume_id=vol_id,
snapshot_id=snapshot.id)
if snapshot_status == fields.SnapshotStatus.BACKING_UP:
self.backup_mgr.create_backup(self.ctxt, backup)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('available', vol.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
else:
self.assertRaises(exception.InvalidSnapshot,
self.backup_mgr.create_backup, self.ctxt, backup)
@mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown') @mock.patch('cinder.utils.temporary_chown')
@ -1495,6 +1518,32 @@ class BackupAPITestCase(BaseBackupTest):
backup = self.api.create(self.ctxt, None, None, volume_id, None) backup = self.api.create(self.ctxt, None, None, volume_id, None)
self.assertEqual('testhost', backup.host) self.assertEqual('testhost', backup.host)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@ddt.data(True, False)
def test_create_backup_resource_status(self, is_snapshot, mock_create,
mock_get_service):
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available')
snapshot = self._create_snapshot_db_entry(volume_id=volume_id)
if is_snapshot:
self.api.create(self.ctxt, None, None, volume_id, None,
snapshot_id=snapshot.id)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('backing-up', snapshot.status)
self.assertEqual('available', volume.status)
else:
self.api.create(self.ctxt, None, None, volume_id, None)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('available', snapshot.status)
self.assertEqual('backing-up', volume.status)
@mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.api.API._get_available_backup_service_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
def test_restore_volume(self, def test_restore_volume(self,

View File

@ -43,7 +43,7 @@ object_data = {
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733', 'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
'Service': '1.4-c7d011989d1718ca0496ccf640b42712', 'Service': '1.4-c7d011989d1718ca0496ccf640b42712',
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.3-69dfbe3244992478a0174cb512cd7f27', 'Snapshot': '1.4-b7aa184837ccff570b8443bfd1773017',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.6-7d3bc8577839d5725670d55e480fe95f', 'Volume': '1.6-7d3bc8577839d5725670d55e480fe95f',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',

View File

@ -231,7 +231,7 @@ class TestSnapshot(test_objects.BaseObjectsTestCase):
fake.SNAPSHOT_ID)]) fake.SNAPSHOT_ID)])
@ddt.data('1.1', '1.3') @ddt.data('1.1', '1.3')
def test_obj_make_compatible(self, version): def test_obj_make_compatible_1_3(self, version):
snapshot = objects.Snapshot(context=self.context) snapshot = objects.Snapshot(context=self.context)
snapshot.status = 'unmanaging' snapshot.status = 'unmanaging'
primitive = snapshot.obj_to_primitive(version) primitive = snapshot.obj_to_primitive(version)
@ -242,6 +242,18 @@ class TestSnapshot(test_objects.BaseObjectsTestCase):
status = fields.SnapshotStatus.DELETING status = fields.SnapshotStatus.DELETING
self.assertEqual(status, snapshot.status) self.assertEqual(status, snapshot.status)
@ddt.data('1.3', '1.4')
def test_obj_make_compatible_1_4(self, version):
snapshot = objects.Snapshot(context=self.context)
snapshot.status = fields.SnapshotStatus.BACKING_UP
primitive = snapshot.obj_to_primitive(version)
snapshot = objects.Snapshot.obj_from_primitive(primitive)
if version == '1.4':
status = fields.SnapshotStatus.BACKING_UP
else:
status = fields.SnapshotStatus.AVAILABLE
self.assertEqual(status, snapshot.status)
class TestSnapshotList(test_objects.BaseObjectsTestCase): class TestSnapshotList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.objects.volume.Volume.get_by_id')

View File

@ -0,0 +1,6 @@
---
fixes:
- Fix the unreasonable status change for volumes and snapshots when creating
backups from snapshots.
upgrade:
- The "backing-up" status is added to snapshot's status matrix.