Fix cinder quota-usage error

When backing up a snapshot or 'in-use' volume,
cinder creates a temp volume from the snapshot
or 'in-use' volume, then backup the temp volume.
The temp volume doesn't use the volume-quota,
but deleting the temp volume when finishing
backup will minus quota. This process leads to
a wrong quota-usage. So add admin volume metadata
when creating the temporary volume and then use
that data to detect temporary volume. Then skip
handling quota when deleting the temp volume.

Change-Id: If100a678cf9062c4078f850d8edc001b6b0705f4
Closes-Bug: #1670636
This commit is contained in:
lihaijing 2018-01-18 13:14:45 +08:00
parent 59bfe72bd5
commit c755e29126
4 changed files with 34 additions and 12 deletions

View File

@ -64,6 +64,7 @@ def create_volume(ctxt,
testcase_instance=None,
id=None,
metadata=None,
admin_metadata=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {'size': size,
@ -79,6 +80,9 @@ def create_volume(ctxt,
if metadata:
vol['metadata'] = metadata
if admin_metadata:
vol['admin_metadata'] = admin_metadata
ctxt = ctxt.elevated()
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status

View File

@ -222,6 +222,7 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
(backup_device, is_snapshot) = self.volume.driver.get_backup_device(
self.context, backup_obj)
volume = objects.Volume.get_by_id(self.context, vol.id)
self.assertNotIn('temporary', backup_device.admin_metadata.keys())
self.assertEqual(volume, backup_device)
self.assertFalse(is_snapshot)
backup_obj.refresh()
@ -231,7 +232,9 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
admin_meta = {'temporary': 'True'}
temp_vol = tests_utils.create_volume(self.context,
admin_metadata=admin_meta)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
@ -248,7 +251,7 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
backup_obj.refresh()
self.assertEqual(temp_vol.id, backup_obj.temp_volume_id)
def test__create_temp_volume_from_snapshot(self):
def test_create_temp_volume_from_snapshot(self):
volume_dict = {'id': fake.SNAPSHOT_ID,
'host': 'fakehost',
'cluster_name': 'fakecluster',
@ -346,8 +349,8 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
@mock.patch(driver_name + '.initialize_connection')
@mock.patch(driver_name + '.create_export', return_value=None)
@mock.patch(driver_name + '._connect_device')
def test__attach_volume_encrypted(self, connect_mock, export_mock,
initialize_mock):
def test_attach_volume_encrypted(self, connect_mock, export_mock,
initialize_mock):
properties = {'host': 'myhost', 'ip': '192.168.1.43',
'initiator': u'iqn.1994-05.com.redhat:d9be887375',
'multipath': False, 'os_type': 'linux2',

View File

@ -1273,9 +1273,10 @@ class BaseVD(object):
'attach_status': fields.VolumeAttachStatus.DETACHED,
'availability_zone': volume.availability_zone,
'volume_type_id': volume.volume_type_id,
'admin_metadata': {'temporary': 'True'},
}
kwargs.update(volume_options or {})
temp_vol_ref = objects.Volume(context=context, **kwargs)
temp_vol_ref = objects.Volume(context=context.elevated(), **kwargs)
temp_vol_ref.create()
return temp_vol_ref

View File

@ -723,6 +723,10 @@ class VolumeManager(manager.CleanableManager,
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
3. Delete a temp volume for backup
If deleting the temp volume for backup, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
@ -757,6 +761,14 @@ class VolumeManager(manager.CleanableManager,
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# To backup a snapshot or a 'in-use' volume, create a temp volume
# from the snapshot or in-use volume, and back it up.
# Get admin_metadata to detect temporary volume.
is_temp_vol = False
if volume.admin_metadata.get('temporary', 'False') == 'True':
is_temp_vol = True
LOG.info("Trying to delete temp volume: %s", volume.id)
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
@ -768,7 +780,8 @@ class VolumeManager(manager.CleanableManager,
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
@ -818,9 +831,10 @@ class VolumeManager(manager.CleanableManager,
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
# Get reservations
try:
reservations = None
@ -842,9 +856,9 @@ class VolumeManager(manager.CleanableManager,
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"