Fix automatic quota sync for migrating volumes
When using the automatic quota refresh via `until_refresh` and `max_age` configuration options the calculated quota usage by the refresh will not be correct if there are volumes that are migrating, since source and destination volumes are counted in the refresh. Normal quota usage calculation does not work like this, it only counts it once, and so should the automatic quota calculation. This patch fixes this by adding a filter to the query that skips migration destination volumes. It also updatest the DB implementation of volume_data_get_for_project that didn't match the signature in cinder.db.api Closes-Bug: #1917450 Change-Id: Ifff092917abe07726367a953f5ed420626c53bb9
This commit is contained in:
parent
a66f688992
commit
e55043ff00
@ -1579,15 +1579,25 @@ def volume_data_get_for_host(context, host, count_only=False):
|
||||
|
||||
@require_admin_context
|
||||
def _volume_data_get_for_project(context, project_id, volume_type_id=None,
|
||||
session=None, host=None):
|
||||
session=None, host=None, skip_internal=True):
|
||||
model = models.Volume
|
||||
query = model_query(context,
|
||||
func.count(models.Volume.id),
|
||||
func.sum(models.Volume.size),
|
||||
func.count(model.id),
|
||||
func.sum(model.size),
|
||||
read_deleted="no",
|
||||
session=session).\
|
||||
filter_by(project_id=project_id)
|
||||
|
||||
# When calling the method for quotas we don't count volumes that are the
|
||||
# destination of a migration since they were not accounted for quotas or
|
||||
# reservations in the first place.
|
||||
if skip_internal:
|
||||
query = query.filter(
|
||||
or_(model.migration_status.is_(None),
|
||||
~model.migration_status.startswith('target:')))
|
||||
|
||||
if host:
|
||||
query = query.filter(_filter_host(models.Volume.host, host))
|
||||
query = query.filter(_filter_host(model.host, host))
|
||||
|
||||
if volume_type_id:
|
||||
query = query.filter_by(volume_type_id=volume_type_id)
|
||||
@ -1618,10 +1628,9 @@ def _backup_data_get_for_project(context, project_id, volume_type_id=None,
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def volume_data_get_for_project(context, project_id,
|
||||
volume_type_id=None, host=None):
|
||||
return _volume_data_get_for_project(context, project_id,
|
||||
volume_type_id, host=host)
|
||||
def volume_data_get_for_project(context, project_id, host=None):
|
||||
return _volume_data_get_for_project(context, project_id, host=host,
|
||||
skip_internal=False)
|
||||
|
||||
|
||||
VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata,
|
||||
|
@ -513,6 +513,47 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
db.volume_data_get_for_project(
|
||||
self.ctxt, 'p%d' % i))
|
||||
|
||||
@mock.patch.object(sqlalchemy_api, '_volume_data_get_for_project')
|
||||
def test_volume_data_get_for_project_migrating(self, mock_vol_data):
|
||||
expected = (mock.sentinel.count, mock.sentinel.gb)
|
||||
mock_vol_data.return_value = expected
|
||||
res = db.volume_data_get_for_project(self.ctxt,
|
||||
mock.sentinel.project_id,
|
||||
mock.sentinel.host)
|
||||
self.assertEqual(expected, res)
|
||||
mock_vol_data.assert_called_once_with(self.ctxt,
|
||||
mock.sentinel.project_id,
|
||||
host=mock.sentinel.host,
|
||||
skip_internal=False)
|
||||
|
||||
@ddt.data((True, THREE_HUNDREDS, THREE),
|
||||
(False, THREE_HUNDREDS + ONE_HUNDREDS, THREE + 1))
|
||||
@ddt.unpack
|
||||
def test__volume_data_get_for_project_migrating(self, skip_internal,
|
||||
gigabytes, count):
|
||||
for i in range(2):
|
||||
db.volume_create(self.ctxt,
|
||||
{'project_id': 'project',
|
||||
'size': ONE_HUNDREDS,
|
||||
'host': 'h-%d' % i,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID})
|
||||
# This volume is migrating and will be counted
|
||||
db.volume_create(self.ctxt, {'project_id': 'project',
|
||||
'size': ONE_HUNDREDS,
|
||||
'host': 'h-%d' % i,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID,
|
||||
'migration_status': 'migrating'})
|
||||
# This one will not be counted
|
||||
db.volume_create(self.ctxt, {'project_id': 'project',
|
||||
'size': ONE_HUNDREDS,
|
||||
'host': 'h-%d' % i,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID,
|
||||
'migration_status': 'target:vol-id'})
|
||||
|
||||
result = sqlalchemy_api._volume_data_get_for_project(
|
||||
self.ctxt, 'project', skip_internal=skip_internal)
|
||||
self.assertEqual((count, gigabytes), result)
|
||||
|
||||
def test_volume_data_get_for_project_with_host(self):
|
||||
|
||||
db.volume_create(self.ctxt, {'project_id': fake.PROJECT_ID,
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
`Bug #1917450 <https://bugs.launchpad.net/cinder/+bug/1917450>`_: Fix
|
||||
automatic quota refresh to correctly account for migrating volumes. During
|
||||
volume migration we'll have 2 volumes in cinder and only one will be
|
||||
accounted for in quota usage.
|
Loading…
Reference in New Issue
Block a user