From 95fcb50a422fbd1949bd99b8cf1276a8de1f2556 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Thu, 11 Mar 2021 09:49:32 +0100 Subject: [PATCH] Fix quota usage duplicate entries Our current quota system has a race condition on reservations that only happens when we are creating new entries in the quota_usages table. We normally lock quota_usages rows using a SELECT ... FOR UPDATE query, but that's only effective when the entries exist, and current code just creates them and proceeds without a lock on them. This, together with the table not having unique constraint means that we can get duplicated entries and we can have one entry overwriting the data written by another request. The quota_usages table does soft deletes, so the project_id and resource fields are not enough for a unique constraint, so we add a new column called race_preventer so we can have a unique constraint with the 3 fields. Additionally we have to make sure that we acquire the locks before doing the reservation calculations or the syncs, so once we create any missing entries we close the session and try to get the locks again. With these 2 changes we'll avoid duplicated entries as well as avoid getting our quota usage out of sync right from the start. For the unique constraint part of the code there were 2 alternatives (one was even used in an earlier patchset): - Create a virtual/computed column for the Table that sets it to a fixed value when deleted_at is NULL and to NULL in any other case, then use this virtual/computed column together with project_id and resource fields for a unique constraint. This change was my preferred solution, but it requires bumping the SQLAlchemy version to 1.3.11 where the feature was added as computed columns [1] and in some DB engines requires a relatively new version, for example for PostgreSQL is only supported on version 12 or later. - Set deleted_at to a non NULL value by default on creation, and make sure our code always uses the deleted field to filter values. This is a bit nasty, but it has the advantage of not requiring new DB fields, no DB data migrations for existing entries, and easy to rollback once we figure out the underlying issue (although it may require a DB data migration on rollback if we want to leave the deleted_at entry at NULL). The decision to add a new field was because one of the alternatives is kind of hacky and the other one depends on specific DBMS versions and requires a SQLAlchemy version bump. [1]: https://docs.sqlalchemy.org/en/13/core/defaults.html#computed-generated-always-as-columns Closes-Bug: #1484343 Change-Id: I9000c16c5b3e6f313f02256a10cb4bc0a26379f7 (cherry picked from commit 1fb0767d8874f993e4ac6d9fda92933f1c5b6d3f) --- cinder/db/sqlalchemy/api.py | 162 +++++++++++------- .../141_add_quota_usage_unique_constraint.py | 37 ++++ cinder/db/sqlalchemy/models.py | 16 ++ cinder/tests/unit/db/test_purge.py | 72 ++++---- cinder/tests/unit/test_db_api.py | 68 +++++++- cinder/tests/unit/test_quota.py | 86 +++++++--- ...ota-usage-duplicates-c00725089da7bbd8.yaml | 5 + 7 files changed, 317 insertions(+), 129 deletions(-) create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/141_add_quota_usage_unique_constraint.py create mode 100644 releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index e83dd3753fe..ae2c6671e17 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1052,7 +1052,6 @@ def quota_usage_get_all_by_project(context, project_id): @require_admin_context def _quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None): - quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource @@ -1124,36 +1123,93 @@ def quota_usage_update_resource(context, old_res, new_res): usage.until_refresh = 1 +def _is_duplicate(exc): + """Check if an exception is caused by a unique constraint failure.""" + return isinstance(exc, db_exc.DBDuplicateEntry) + + +def _get_sync_updates(ctxt, project_id, session, resources, resource_name): + """Return usage for a specific resource. + + Resources are volumes, gigabytes, backups, snapshots, and also + volumes_ snapshots_ for each volume type. + """ + # Grab the sync routine + sync = QUOTA_SYNC_FUNCTIONS[resources[resource_name].sync] + # VolumeTypeResource includes the id and name of the resource. + volume_type_id = getattr(resources[resource_name], + 'volume_type_id', None) + volume_type_name = getattr(resources[resource_name], + 'volume_type_name', None) + updates = sync(ctxt, project_id, + volume_type_id=volume_type_id, + volume_type_name=volume_type_name, + session=session) + return updates + + @require_context -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) +@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, + exception_checker=_is_duplicate) def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None): elevated = context.elevated() session = get_session() - with session.begin(): + + # We don't use begin as a context manager because there are cases where we + # want to finish a transaction and begin a new one. + session.begin() + try: if project_id is None: project_id = context.project_id - # Get the current usages - usages = _get_quota_usages(context, session, project_id, - resources=deltas.keys()) + # Loop until we can lock all the resource rows we'll be modifying + while True: + # Get the current usages and lock existing rows + usages = _get_quota_usages(context, session, project_id, + resources=deltas.keys()) + missing = [res for res in deltas if res not in usages] + # If we have successfully locked all the rows we can continue. + # SELECT ... FOR UPDATE used in _get_quota usages cannot lock + # non-existing rows, so there can be races with other requests + # trying to create those rows. + if not missing: + break + + # Create missing rows calculating current values instead of + # assuming there are no used resources as admins may have been + # using this mechanism to force quota usage refresh. + for resource in missing: + updates = _get_sync_updates(elevated, project_id, session, + resources, resource) + _quota_usage_create(elevated, project_id, resource, + updates[resource], 0, + until_refresh or None, session=session) + + # NOTE: When doing the commit there can be a race condition with + # other service instances or thread that are also creating the + # same rows and in that case this will raise either a Deadlock + # exception (when multiple transactions were creating the same rows + # and the DB failed to acquire the row lock on the non-first + # transaction) or a DBDuplicateEntry exception if some other + # transaction created the row between us doing the + # _get_quota_usages and here. In both cases this transaction will + # be rolled back and the wrap_db_retry decorator will retry. + + # Commit new rows to the DB. + session.commit() + + # Start a new session before trying to lock all the rows again. By + # trying to get all the locks in a loop we can protect us against + # admins directly deleting DB rows. + session.begin() # Handle usage refresh - work = set(deltas.keys()) - while work: - resource = work.pop() - + for resource in deltas.keys(): # Do we need to refresh the usage? refresh = False - if resource not in usages: - usages[resource] = _quota_usage_create(elevated, - project_id, - resource, - 0, 0, - until_refresh or None, - session=session) - refresh = True - elif usages[resource].in_use < 0: + if usages[resource].in_use < 0: + # If we created the entry right now we want to refresh. # Negative in_use count indicates a desync, so try to # heal from that... refresh = True @@ -1168,43 +1224,12 @@ def quota_reserve(context, resources, quotas, deltas, expire, # OK, refresh the usage if refresh: - # Grab the sync routine - sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] - volume_type_id = getattr(resources[resource], - 'volume_type_id', None) - volume_type_name = getattr(resources[resource], - 'volume_type_name', None) - updates = sync(elevated, project_id, - volume_type_id=volume_type_id, - volume_type_name=volume_type_name, - session=session) - for res, in_use in updates.items(): - # Make sure we have a destination for the usage! - if res not in usages: - usages[res] = _quota_usage_create( - elevated, - project_id, - res, - 0, 0, - until_refresh or None, - session=session - ) - - # Update the usage - usages[res].in_use = in_use - usages[res].until_refresh = until_refresh or None - - # Because more than one resource may be refreshed - # by the call to the sync routine, and we don't - # want to double-sync, we make sure all refreshed - # resources are dropped from the work set. - work.discard(res) - - # NOTE(Vek): We make the assumption that the sync - # routine actually refreshes the - # resources that it is the sync routine - # for. We don't check, because this is - # a best-effort mechanism. + updates = _get_sync_updates(elevated, project_id, session, + resources, resource) + # Updates will always contain a single resource usage matching + # the resource variable. + usages[resource].in_use = updates[resource] + usages[resource].until_refresh = until_refresh or None # There are 3 cases where we want to update "until_refresh" in the # DB: when we enabled it, when we disabled it, and when we changed @@ -1263,14 +1288,18 @@ def quota_reserve(context, resources, quotas, deltas, expire, if delta > 0: usages[resource].reserved += delta - if unders: - LOG.warning("Change will make usage less than 0 for the following " - "resources: %s", unders) - if overs: - usages = {k: dict(in_use=v.in_use, reserved=v.reserved) - for k, v in usages.items()} - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages=usages) + if unders: + LOG.warning("Change will make usage less than 0 for the following " + "resources: %s", unders) + if overs: + usages = {k: dict(in_use=v.in_use, reserved=v.reserved) + for k, v in usages.items()} + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages=usages) + session.commit() + except Exception: + session.rollback() + raise return reservations @@ -6635,8 +6664,9 @@ def purge_deleted_rows(context, age_in_days): deleted.is_(True), models.QualityOfServiceSpecs. deleted_at < deleted_age)).delete() result = session.execute( - table.delete() - .where(table.c.deleted_at < deleted_age)) + table.delete(). + where(and_(table.columns.deleted.is_(True), + table.c.deleted_at < deleted_age))) except db_exc.DBReferenceError as ex: LOG.error('DBError detected when purging from ' '%(tablename)s: %(error)s.', diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/141_add_quota_usage_unique_constraint.py b/cinder/db/sqlalchemy/migrate_repo/versions/141_add_quota_usage_unique_constraint.py new file mode 100644 index 00000000000..81ed7bb2d50 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/141_add_quota_usage_unique_constraint.py @@ -0,0 +1,37 @@ +# Copyright 2021 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, MetaData, Table +from migrate.changeset import constraint + + +def upgrade(migrate_engine): + """Update quota_usages table to prevent races on creation. + + Add race_preventer field and a unique constraint to prevent quota usage + duplicates and races that mess the quota system when first creating rows. + """ + # There's no need to set the race_preventer field for existing DB entries, + # since the race we want to prevent is only on creation. + meta = MetaData(bind=migrate_engine) + quota_usages = Table('quota_usages', meta, autoload=True) + + if not hasattr(quota_usages.c, 'race_preventer'): + quota_usages.create_column(Column('race_preventer', Boolean, + nullable=True)) + unique = constraint.UniqueConstraint( + 'project_id', 'resource', 'race_preventer', + table=quota_usages) + unique.create(engine=migrate_engine) diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py index a55f5d23438..70433bfdd5e 100644 --- a/cinder/db/sqlalchemy/models.py +++ b/cinder/db/sqlalchemy/models.py @@ -641,6 +641,13 @@ class QuotaUsage(BASE, CinderBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' + # NOTE: project_id and resource are not enough as unique constraint since + # we do soft deletes and there could be duplicated entries, so we add the + # race_preventer field. + __table_args__ = ( + UniqueConstraint('project_id', 'resource', 'race_preventer'), + CinderBase.__table_args__) + id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) @@ -655,6 +662,15 @@ class QuotaUsage(BASE, CinderBase): until_refresh = Column(Integer, nullable=True) + # To prevent races during creation on quota_reserve method + race_preventer = Column(Boolean, nullable=True, default=True) + + @staticmethod + def delete_values(): + res = CinderBase.delete_values() + res['race_preventer'] = None + return res + class Reservation(BASE, CinderBase): """Represents a resource reservation for quotas.""" diff --git a/cinder/tests/unit/db/test_purge.py b/cinder/tests/unit/db/test_purge.py index 70ca333187b..3aff747f631 100644 --- a/cinder/tests/unit/db/test_purge.py +++ b/cinder/tests/unit/db/test_purge.py @@ -114,107 +114,110 @@ class PurgeDeletedTest(test.TestCase): make_vol_now = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_vol_meta_now = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_vol_types_now = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_vol_type_proj_now = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_snap_now = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_snap_meta_now = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_vol_glance_meta_now = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_snap_glance_meta_now = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ - .values(deleted_at=now) + .values(deleted_at=now, deleted=True) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) + .values(deleted_at=old, deleted=True) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) + .values(deleted_at=older, deleted=True) make_qos_now = self.qos.update().where( - self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now) + self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now, + deleted=True) make_qos_old = self.qos.update().where( - self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old) + self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old, + deleted=True) make_qos_older = self.qos.update().where( - self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older) + self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older, + deleted=True) make_qos_child_record_now = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[0:1])).values( - deleted_at=now) + deleted_at=now, deleted=True) make_qos_child_record_old = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) + deleted_at=old, deleted=True) make_qos_child_record_older = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) + deleted_at=older, deleted=True) make_vol_types1_now = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[0:1])).values( - deleted_at=now) + deleted_at=now, deleted=True) make_vol_types1_old = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) + deleted_at=old, deleted=True) make_vol_types1_older = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) + deleted_at=older, deleted=True) self.conn.execute(make_vol_now) self.conn.execute(make_vol_old) @@ -389,7 +392,8 @@ class PurgeDeletedTest(test.TestCase): # set volume record to deleted 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) make_old = self.volumes.update().where( - self.volumes.c.id.in_([uuid_str])).values(deleted_at=old) + self.volumes.c.id.in_([uuid_str])).values(deleted_at=old, + deleted=True) self.conn.execute(make_old) # Verify that purge_deleted_rows fails due to Foreign Key constraint diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index 7223bafe4e7..27ed9e9b443 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -20,6 +20,7 @@ from unittest.mock import call import ddt from oslo_config import cfg +import oslo_db from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy.sql import operators @@ -68,8 +69,8 @@ def _quota_reserve(context, project_id): deltas[resource] = i + 1 return db.quota_reserve( context, resources, quotas, deltas, - datetime.datetime.utcnow(), datetime.datetime.utcnow(), - datetime.timedelta(days=1), project_id + datetime.datetime.utcnow(), until_refresh=None, + max_age=datetime.timedelta(days=1), project_id=project_id ) @@ -2563,6 +2564,46 @@ class DBAPIReservationTestCase(BaseTest): self.ctxt, 'project1')) + @mock.patch('time.sleep', mock.Mock()) + def test_quota_reserve_create_usages_race(self): + """Test we retry when there is a race in creation.""" + def create(*args, original_create=sqlalchemy_api._quota_usage_create, + **kwargs): + # Create the quota usage entry (with values set to 0) + session = sqlalchemy_api.get_session() + kwargs['session'] = session + with session.begin(): + original_create(*args, **kwargs) + # Simulate that there's been a race condition with other create and + # that we got the exception + raise oslo_db.exception.DBDuplicateEntry + + resources = quota.QUOTAS.resources + quotas = {'volumes': 5} + deltas = {'volumes': 2} + project_id = 'project1' + expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) + + with mock.patch.object(sqlalchemy_api, '_quota_usage_create', + side_effect=create) as create_mock: + sqlalchemy_api.quota_reserve(self.ctxt, resources, quotas, deltas, + expire, 0, 0, project_id=project_id) + + # The create call only happens once, when the race happens, because + # on the second try of the quota_reserve call the entry is already + # in the DB. + create_mock.assert_called_once_with(mock.ANY, 'project1', + 'volumes', 0, 0, None, + session=mock.ANY) + + # Confirm that regardless of who created the DB entry the values are + # updated + usages = sqlalchemy_api.quota_usage_get_all_by_project(self.ctxt, + project_id) + expected = {'project_id': project_id, + 'volumes': {'in_use': 0, 'reserved': deltas['volumes']}} + self.assertEqual(expected, usages) + class DBAPIMessageTestCase(BaseTest): @@ -2822,6 +2863,29 @@ class DBAPIQuotaTestCase(BaseTest): self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'p1')) + def test__quota_usage_create(self): + session = sqlalchemy_api.get_session() + usage = sqlalchemy_api._quota_usage_create(self.ctxt, 'project1', + 'resource', + in_use=10, reserved=0, + until_refresh=None, + session=session) + self.assertEqual('project1', usage.project_id) + self.assertEqual('resource', usage.resource) + self.assertEqual(10, usage.in_use) + self.assertEqual(0, usage.reserved) + self.assertIsNone(usage.until_refresh) + + def test__quota_usage_create_duplicate(self): + session = sqlalchemy_api.get_session() + kwargs = {'project_id': 'project1', 'resource': 'resource', + 'in_use': 10, 'reserved': 0, 'until_refresh': None, + 'session': session} + sqlalchemy_api._quota_usage_create(self.ctxt, **kwargs) + self.assertRaises(oslo_db.exception.DBDuplicateEntry, + sqlalchemy_api._quota_usage_create, + self.ctxt, **kwargs) + class DBAPIBackupTestCase(BaseTest): diff --git a/cinder/tests/unit/test_quota.py b/cinder/tests/unit/test_quota.py index eb87c45622e..2c742017cd9 100644 --- a/cinder/tests/unit/test_quota.py +++ b/cinder/tests/unit/test_quota.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import datetime from unittest import mock @@ -1341,6 +1342,12 @@ class FakeSession(object): def query(self, *args, **kwargs): pass + def rollback(self): + pass + + def commit(self): + pass + class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): @@ -1497,36 +1504,61 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase): self.assertEqual(0, len(reservations)) - def test_quota_reserve_create_usages(self): - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, - gigabytes=10 * 1024, ) - deltas = dict(volumes=2, - gigabytes=2 * 1024, ) + @mock.patch.object(sqa_api, '_reservation_create') + @mock.patch.object(sqa_api, '_get_sync_updates') + @mock.patch.object(sqa_api, '_quota_usage_create') + @mock.patch.object(sqa_api, '_get_quota_usages') + def test_quota_reserve_create_usages(self, usages_mock, quota_create_mock, + sync_mock, reserve_mock): + project_id = 'test_project' + context = FakeContext(project_id, 'test_class') + quotas = collections.OrderedDict([('volumes', 5), + ('gigabytes', 10 * 1024)]) + deltas = collections.OrderedDict([('volumes', 2), + ('gigabytes', 2 * 1024)]) + + sync_mock.side_effect = [{'volumes': 2}, {'gigabytes': 2 * 1024}] + vol_usage = self._make_quota_usage(project_id, 'volumes', 2, 0, + None, None, None) + gb_usage = self._make_quota_usage(project_id, 'gigabytes', 2 * 1024, 0, + None, None, None) + usages_mock.side_effect = [ + {}, + collections.OrderedDict([('volumes', vol_usage), + ('gigabytes', gb_usage)]) + ] + reservations = [mock.Mock(), mock.Mock()] + reserve_mock.side_effect = reservations + result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) - self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) - self.compare_usage(self.usages_created, - [dict(resource='volumes', - project_id='test_project', - in_use=0, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=0, - reserved=2 * 1024, - until_refresh=None), ]) - self.compare_reservation( - result, - [dict(resource='volumes', - usage_id=self.usages_created['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages_created['gigabytes'], - delta=2 * 1024), ]) + self.assertEqual([r.uuid for r in reservations], result) + + usages_mock.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, project_id, resources=deltas.keys()), + mock.call(mock.ANY, mock.ANY, project_id, resources=deltas.keys()) + ]) + + sync_mock.assert_has_calls([ + mock.call(mock.ANY, project_id, mock.ANY, self.resources, + 'volumes'), + mock.call(mock.ANY, project_id, mock.ANY, self.resources, + 'gigabytes')]) + + quota_create_mock.assert_has_calls([ + mock.call(mock.ANY, project_id, 'volumes', 2, 0, None, + session=mock.ANY), + mock.call(mock.ANY, project_id, 'gigabytes', 2 * 1024, 0, None, + session=mock.ANY) + ]) + + reserve_mock.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, vol_usage, project_id, 'volumes', + 2, mock.ANY, session=mock.ANY), + mock.call(mock.ANY, mock.ANY, gb_usage, project_id, 'gigabytes', + 2 * 1024, mock.ANY, session=mock.ANY), + ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) diff --git a/releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml b/releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml new file mode 100644 index 00000000000..ab1bc817b22 --- /dev/null +++ b/releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + `Bug #1484343 `_: Fix + creation of duplicated quota usage entries in DB.