Change snapshots type with volume retype
Both volumes and snapshots have a volume_type_id field in the DB, but when we migrate a volume we leave the snapshots with the old type. This can only happen for retypes that don't do migration, since we cannot do a retype with migration if the volume has snapshots. Leaving the snapshots with the old time makes a mess of the quota usage when we do the retype as well as when we delete the snapshots. This patch fixes the quota issue by making sure the snapshots are retyped as well. This means that we will check quotas for the snapshots when retyping a volume that have them and we will properly reserve and set the quota on retype. Closes-Bug: #1877164 Change-Id: I90e9f85d192e1f2fee4ec8615a5bc95851a90f8e
This commit is contained in:
parent
d1665ea99e
commit
b77fb72aa9
@ -45,12 +45,20 @@ class GenericProjectInfo(object):
|
||||
|
||||
|
||||
def get_volume_type_reservation(ctxt, volume, type_id,
|
||||
reserve_vol_type_only=False):
|
||||
reserve_vol_type_only=False,
|
||||
negative=False):
|
||||
from cinder import quota
|
||||
QUOTAS = quota.QUOTAS
|
||||
# Reserve quotas for the given volume type
|
||||
try:
|
||||
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
|
||||
# When retyping a volume it may contain snapshots (if we are not
|
||||
# migrating it) and we need to account for its snapshots' size
|
||||
if volume.snapshots:
|
||||
reserve_opts['snapshots'] = len(volume.snapshots)
|
||||
if not CONF.no_snapshot_gb_quota:
|
||||
reserve_opts['gigabytes'] += sum(snap.volume_size
|
||||
for snap in volume.snapshots)
|
||||
QUOTAS.add_volume_type_opts(ctxt,
|
||||
reserve_opts,
|
||||
type_id)
|
||||
@ -59,6 +67,12 @@ def get_volume_type_reservation(ctxt, volume, type_id,
|
||||
if reserve_vol_type_only:
|
||||
reserve_opts.pop('volumes')
|
||||
reserve_opts.pop('gigabytes')
|
||||
reserve_opts.pop('snapshots', None)
|
||||
|
||||
if negative:
|
||||
for key, value in reserve_opts.items():
|
||||
reserve_opts[key] = -value
|
||||
|
||||
# Note that usually the project_id on the volume will be the same as
|
||||
# the project_id in the context. But, if they are different then the
|
||||
# reservations must be recorded against the project_id that owns the
|
||||
|
@ -18,6 +18,7 @@
|
||||
import datetime
|
||||
from unittest import mock
|
||||
|
||||
import ddt
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
|
||||
@ -34,6 +35,8 @@ from cinder import quota
|
||||
from cinder import quota_utils
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
import cinder.tests.unit.image.fake
|
||||
from cinder.tests.unit import test
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
@ -1916,6 +1919,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||
delta=-2 * 1024), ])
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class QuotaVolumeTypeReservationTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -1932,32 +1936,32 @@ class QuotaVolumeTypeReservationTestCase(test.TestCase):
|
||||
mock_add_volume_type_opts,
|
||||
mock_reserve):
|
||||
my_context = FakeContext('MyProject', None)
|
||||
volume = {'name': 'my_vol_name',
|
||||
'id': 'my_vol_id',
|
||||
'size': '1',
|
||||
'project_id': 'vol_project_id',
|
||||
}
|
||||
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
|
||||
volume = fake_volume.fake_volume_obj(my_context,
|
||||
name= 'my_vol_name',
|
||||
id= 'my_vol_id',
|
||||
size= 1,
|
||||
project_id= 'vol_project_id')
|
||||
quota_utils.get_volume_type_reservation(my_context,
|
||||
volume,
|
||||
self.volume_type['id'])
|
||||
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
|
||||
mock_add_volume_type_opts.assert_called_once_with(
|
||||
my_context,
|
||||
reserve_opts,
|
||||
self.volume_type['id'])
|
||||
mock_reserve.assert_called_once_with(my_context,
|
||||
project_id='vol_project_id',
|
||||
gigabytes='1',
|
||||
gigabytes=1,
|
||||
volumes=1)
|
||||
|
||||
@mock.patch.object(quota.QUOTAS, 'reserve')
|
||||
def test_volume_type_reservation_with_type_only(self, mock_reserve):
|
||||
my_context = FakeContext('MyProject', None)
|
||||
volume = {'name': 'my_vol_name',
|
||||
'id': 'my_vol_id',
|
||||
'size': '1',
|
||||
'project_id': 'vol_project_id',
|
||||
}
|
||||
volume = fake_volume.fake_volume_obj(my_context,
|
||||
name='my_vol_name',
|
||||
id='my_vol_id',
|
||||
size=1,
|
||||
project_id='vol_project_id')
|
||||
quota_utils.get_volume_type_reservation(my_context,
|
||||
volume,
|
||||
self.volume_type['id'],
|
||||
@ -1965,7 +1969,58 @@ class QuotaVolumeTypeReservationTestCase(test.TestCase):
|
||||
vtype_volume_quota = "%s_%s" % ('volumes', self.volume_type['name'])
|
||||
vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name'])
|
||||
reserve_opts = {vtype_volume_quota: 1,
|
||||
vtype_size_quota: volume['size']}
|
||||
vtype_size_quota: volume.size}
|
||||
mock_reserve.assert_called_once_with(my_context,
|
||||
project_id='vol_project_id',
|
||||
**reserve_opts)
|
||||
|
||||
@ddt.data({'count_snaps': True, 'negative': True},
|
||||
{'count_snaps': True, 'negative': False},
|
||||
{'count_snaps': False, 'negative': True},
|
||||
{'count_snaps': False, 'negative': False})
|
||||
@ddt.unpack
|
||||
@mock.patch.object(quota.QUOTAS, 'reserve')
|
||||
def test_volume_type_reservation_snapshots_with_type_only(self,
|
||||
mock_reserve,
|
||||
count_snaps,
|
||||
negative):
|
||||
"""Volume type reservations on volume with snapshots
|
||||
|
||||
Test that when the volume has snapshots it takes them into account,
|
||||
and even calculates the quota correctly depending on
|
||||
no_snapshot_gb_quota configuration option.
|
||||
|
||||
It should work for negative and positive quotas.
|
||||
"""
|
||||
self.override_config('no_snapshot_gb_quota', not count_snaps)
|
||||
my_context = FakeContext('MyProject', None)
|
||||
snaps = [fake_snapshot.fake_db_snapshot(volume_size=1),
|
||||
fake_snapshot.fake_db_snapshot(volume_size=2)]
|
||||
volume = fake_volume.fake_volume_obj(my_context,
|
||||
expected_attrs=['snapshots'],
|
||||
name='my_vol_name',
|
||||
id=fake.VOLUME_ID,
|
||||
size=1,
|
||||
project_id=fake.PROJECT_ID,
|
||||
snapshots=snaps)
|
||||
quota_utils.get_volume_type_reservation(my_context,
|
||||
volume,
|
||||
self.volume_type['id'],
|
||||
reserve_vol_type_only=True,
|
||||
negative=negative)
|
||||
|
||||
factor = -1 if negative else 1
|
||||
if count_snaps:
|
||||
snaps_size = (snaps[0]['volume_size'] + snaps[1]['volume_size'])
|
||||
else:
|
||||
snaps_size = 0
|
||||
vtype_volume_quota = "volumes_%s" % self.volume_type['name']
|
||||
vtype_snapshot_quota = "snapshots_%s" % self.volume_type['name']
|
||||
vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name'])
|
||||
reserve_opts = {vtype_volume_quota: factor * 1,
|
||||
vtype_snapshot_quota: factor * 2,
|
||||
vtype_size_quota: factor * (volume['size'] +
|
||||
snaps_size)}
|
||||
mock_reserve.assert_called_once_with(my_context,
|
||||
project_id=fake.PROJECT_ID,
|
||||
**reserve_opts)
|
||||
|
@ -31,6 +31,7 @@ from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import quota
|
||||
from cinder import quota_utils
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_volume
|
||||
@ -783,41 +784,23 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
volume.previous_status = 'available'
|
||||
volume.save()
|
||||
if snap:
|
||||
create_snapshot(volume.id, size=volume.size,
|
||||
user_id=self.user_context.user_id,
|
||||
project_id=self.user_context.project_id,
|
||||
ctxt=self.user_context)
|
||||
snapshot = create_snapshot(volume.id, size=volume.size,
|
||||
user_id=self.user_context.user_id,
|
||||
project_id=self.user_context.project_id,
|
||||
volume_type_id=volume.volume_type_id,
|
||||
ctxt=self.user_context)
|
||||
if driver or diff_equal:
|
||||
host_obj = {'host': CONF.host, 'capabilities': {}}
|
||||
else:
|
||||
host_obj = {'host': 'newhost', 'capabilities': {}}
|
||||
|
||||
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
|
||||
QUOTAS.add_volume_type_opts(self.context,
|
||||
reserve_opts,
|
||||
vol_type['id'])
|
||||
if reserve_vol_type_only:
|
||||
reserve_opts.pop('volumes')
|
||||
reserve_opts.pop('gigabytes')
|
||||
try:
|
||||
usage = db.quota_usage_get(elevated, project_id, 'volumes')
|
||||
total_volumes_in_use = usage.in_use
|
||||
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
|
||||
total_gigabytes_in_use = usage.in_use
|
||||
except exception.QuotaUsageNotFound:
|
||||
total_volumes_in_use = 0
|
||||
total_gigabytes_in_use = 0
|
||||
reservations = QUOTAS.reserve(self.context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
reservations = quota_utils.get_volume_type_reservation(
|
||||
self.context, volume, new_type.id, reserve_vol_type_only)
|
||||
old_reservations = quota_utils.get_volume_type_reservation(
|
||||
self.context, volume, old_vol_type['id'], reserve_vol_type_only,
|
||||
negative=True)
|
||||
|
||||
old_reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
|
||||
QUOTAS.add_volume_type_opts(self.context,
|
||||
old_reserve_opts,
|
||||
old_vol_type['id'])
|
||||
old_reservations = QUOTAS.reserve(self.context,
|
||||
project_id=project_id,
|
||||
**old_reserve_opts)
|
||||
old_usage = db.quota_usage_get_all_by_project(elevated, project_id)
|
||||
|
||||
with mock.patch.object(self.volume.driver, 'retype') as _retype,\
|
||||
mock.patch.object(volume_types, 'volume_types_diff') as _diff,\
|
||||
@ -874,17 +857,22 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
# Get new in_use after retype, it should not be changed.
|
||||
if reserve_vol_type_only:
|
||||
try:
|
||||
usage = db.quota_usage_get(elevated, project_id, 'volumes')
|
||||
new_total_volumes_in_use = usage.in_use
|
||||
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
|
||||
new_total_gigabytes_in_use = usage.in_use
|
||||
except exception.QuotaUsageNotFound:
|
||||
new_total_volumes_in_use = 0
|
||||
new_total_gigabytes_in_use = 0
|
||||
self.assertEqual(total_volumes_in_use, new_total_volumes_in_use)
|
||||
self.assertEqual(total_gigabytes_in_use,
|
||||
new_total_gigabytes_in_use)
|
||||
new_usage = db.quota_usage_get_all_by_project(elevated, project_id)
|
||||
for resource in ('volumes', 'gigabytes', 'snapshots'):
|
||||
empty = {'in_use': 0, 'reserved': 0}
|
||||
# Global resource hasn't changed
|
||||
self.assertEqual(old_usage.get(resource, empty)['in_use'],
|
||||
new_usage.get(resource, empty)['in_use'])
|
||||
# The new type was empty before
|
||||
self.assertEqual(
|
||||
0, old_usage.get(resource + '_new', empty)['in_use'])
|
||||
# Old type resources have been moved to the new one
|
||||
self.assertEqual(
|
||||
old_usage.get(resource + '_old', empty)['in_use'],
|
||||
new_usage.get(resource + '_new', empty)['in_use'])
|
||||
# The old type is empty now
|
||||
self.assertEqual(
|
||||
0, new_usage.get(resource + '_old', empty)['in_use'])
|
||||
|
||||
# check properties
|
||||
if driver or diff_equal:
|
||||
@ -903,6 +891,12 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
new_type,
|
||||
returned_diff,
|
||||
host_obj)
|
||||
# When retyping a volume with snapshots the snapshots should be
|
||||
# retyped as well
|
||||
if snap:
|
||||
snapshot.refresh()
|
||||
self.assertEqual(new_type.id, snapshot.volume_type_id)
|
||||
|
||||
elif not exc:
|
||||
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
|
||||
self.assertEqual('retyping', volume.status)
|
||||
@ -951,6 +945,10 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
def test_retype_volume_migration_equal_types(self):
|
||||
self._retype_volume_exec(False, diff_equal=True)
|
||||
|
||||
def test_retype_volume_migration_equal_types_snaps(self):
|
||||
self._retype_volume_exec(False, snap=True, diff_equal=True,
|
||||
reserve_vol_type_only=True)
|
||||
|
||||
def test_retype_volume_with_type_only(self):
|
||||
self._retype_volume_exec(True, reserve_vol_type_only=True)
|
||||
|
||||
|
@ -1690,18 +1690,10 @@ class API(base.Base):
|
||||
|
||||
# Get old reservations
|
||||
try:
|
||||
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume.volume_type_id)
|
||||
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
|
||||
# quota for retyping operation since they didn't changed, just
|
||||
# reserve volume_type and type gigabytes is fine.
|
||||
reserve_opts.pop('volumes')
|
||||
reserve_opts.pop('gigabytes')
|
||||
old_reservations = QUOTAS.reserve(context,
|
||||
project_id=volume.project_id,
|
||||
**reserve_opts)
|
||||
old_reservations = quota_utils.get_volume_type_reservation(
|
||||
context, volume, volume.volume_type_id,
|
||||
reserve_vol_type_only=True, negative=True)
|
||||
|
||||
except Exception:
|
||||
volume.status = volume.previous_status
|
||||
volume.save()
|
||||
|
@ -200,7 +200,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
_VOLUME_CLONE_SKIP_PROPERTIES = {
|
||||
'id', '_name_id', 'name_id', 'name', 'status',
|
||||
'attach_status', 'migration_status', 'volume_type',
|
||||
'consistencygroup', 'volume_attachment', 'group'}
|
||||
'consistencygroup', 'volume_attachment', 'group', 'snapshots'}
|
||||
|
||||
def _get_service(self,
|
||||
host: str = None,
|
||||
@ -3048,6 +3048,11 @@ class VolumeManager(manager.CleanableManager,
|
||||
volume.update(model_update)
|
||||
volume.save()
|
||||
|
||||
# We need to make the snapshots match the volume's type
|
||||
for snap in volume.snapshots:
|
||||
snap.volume_type_id = new_type_id
|
||||
snap.save()
|
||||
|
||||
if old_reservations:
|
||||
QUOTAS.commit(context, old_reservations, project_id=project_id)
|
||||
if new_reservations:
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
`Bug #1877164 <https://bugs.launchpad.net/cinder/+bug/1877164>`_: Fix
|
||||
retyping volume with snapshots leaves the snapshots with the old type,
|
||||
making the quotas wrong inmediately for snapshots, and breaking them even
|
||||
more after those snapshots are deleted.
|
Loading…
Reference in New Issue
Block a user