Merge "Fix quota usage duplicate entries"
This commit is contained in:
commit
6d015b4557
|
@ -1052,7 +1052,6 @@ def quota_usage_get_all_by_project(context, project_id):
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def _quota_usage_create(context, project_id, resource, in_use, reserved,
|
def _quota_usage_create(context, project_id, resource, in_use, reserved,
|
||||||
until_refresh, session=None):
|
until_refresh, session=None):
|
||||||
|
|
||||||
quota_usage_ref = models.QuotaUsage()
|
quota_usage_ref = models.QuotaUsage()
|
||||||
quota_usage_ref.project_id = project_id
|
quota_usage_ref.project_id = project_id
|
||||||
quota_usage_ref.resource = resource
|
quota_usage_ref.resource = resource
|
||||||
|
@ -1124,36 +1123,93 @@ def quota_usage_update_resource(context, old_res, new_res):
|
||||||
usage.until_refresh = 1
|
usage.until_refresh = 1
|
||||||
|
|
||||||
|
|
||||||
|
def _is_duplicate(exc):
|
||||||
|
"""Check if an exception is caused by a unique constraint failure."""
|
||||||
|
return isinstance(exc, db_exc.DBDuplicateEntry)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_sync_updates(ctxt, project_id, session, resources, resource_name):
|
||||||
|
"""Return usage for a specific resource.
|
||||||
|
|
||||||
|
Resources are volumes, gigabytes, backups, snapshots, and also
|
||||||
|
volumes_<type_name> snapshots_<type_name> for each volume type.
|
||||||
|
"""
|
||||||
|
# Grab the sync routine
|
||||||
|
sync = QUOTA_SYNC_FUNCTIONS[resources[resource_name].sync]
|
||||||
|
# VolumeTypeResource includes the id and name of the resource.
|
||||||
|
volume_type_id = getattr(resources[resource_name],
|
||||||
|
'volume_type_id', None)
|
||||||
|
volume_type_name = getattr(resources[resource_name],
|
||||||
|
'volume_type_name', None)
|
||||||
|
updates = sync(ctxt, project_id,
|
||||||
|
volume_type_id=volume_type_id,
|
||||||
|
volume_type_name=volume_type_name,
|
||||||
|
session=session)
|
||||||
|
return updates
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
|
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
|
||||||
|
exception_checker=_is_duplicate)
|
||||||
def quota_reserve(context, resources, quotas, deltas, expire,
|
def quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
until_refresh, max_age, project_id=None):
|
until_refresh, max_age, project_id=None):
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
|
||||||
|
# We don't use begin as a context manager because there are cases where we
|
||||||
|
# want to finish a transaction and begin a new one.
|
||||||
|
session.begin()
|
||||||
|
try:
|
||||||
if project_id is None:
|
if project_id is None:
|
||||||
project_id = context.project_id
|
project_id = context.project_id
|
||||||
|
|
||||||
# Get the current usages
|
# Loop until we can lock all the resource rows we'll be modifying
|
||||||
usages = _get_quota_usages(context, session, project_id,
|
while True:
|
||||||
resources=deltas.keys())
|
# Get the current usages and lock existing rows
|
||||||
|
usages = _get_quota_usages(context, session, project_id,
|
||||||
|
resources=deltas.keys())
|
||||||
|
missing = [res for res in deltas if res not in usages]
|
||||||
|
# If we have successfully locked all the rows we can continue.
|
||||||
|
# SELECT ... FOR UPDATE used in _get_quota usages cannot lock
|
||||||
|
# non-existing rows, so there can be races with other requests
|
||||||
|
# trying to create those rows.
|
||||||
|
if not missing:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Create missing rows calculating current values instead of
|
||||||
|
# assuming there are no used resources as admins may have been
|
||||||
|
# using this mechanism to force quota usage refresh.
|
||||||
|
for resource in missing:
|
||||||
|
updates = _get_sync_updates(elevated, project_id, session,
|
||||||
|
resources, resource)
|
||||||
|
_quota_usage_create(elevated, project_id, resource,
|
||||||
|
updates[resource], 0,
|
||||||
|
until_refresh or None, session=session)
|
||||||
|
|
||||||
|
# NOTE: When doing the commit there can be a race condition with
|
||||||
|
# other service instances or thread that are also creating the
|
||||||
|
# same rows and in that case this will raise either a Deadlock
|
||||||
|
# exception (when multiple transactions were creating the same rows
|
||||||
|
# and the DB failed to acquire the row lock on the non-first
|
||||||
|
# transaction) or a DBDuplicateEntry exception if some other
|
||||||
|
# transaction created the row between us doing the
|
||||||
|
# _get_quota_usages and here. In both cases this transaction will
|
||||||
|
# be rolled back and the wrap_db_retry decorator will retry.
|
||||||
|
|
||||||
|
# Commit new rows to the DB.
|
||||||
|
session.commit()
|
||||||
|
|
||||||
|
# Start a new session before trying to lock all the rows again. By
|
||||||
|
# trying to get all the locks in a loop we can protect us against
|
||||||
|
# admins directly deleting DB rows.
|
||||||
|
session.begin()
|
||||||
|
|
||||||
# Handle usage refresh
|
# Handle usage refresh
|
||||||
work = set(deltas.keys())
|
for resource in deltas.keys():
|
||||||
while work:
|
|
||||||
resource = work.pop()
|
|
||||||
|
|
||||||
# Do we need to refresh the usage?
|
# Do we need to refresh the usage?
|
||||||
refresh = False
|
refresh = False
|
||||||
if resource not in usages:
|
if usages[resource].in_use < 0:
|
||||||
usages[resource] = _quota_usage_create(elevated,
|
# If we created the entry right now we want to refresh.
|
||||||
project_id,
|
|
||||||
resource,
|
|
||||||
0, 0,
|
|
||||||
until_refresh or None,
|
|
||||||
session=session)
|
|
||||||
refresh = True
|
|
||||||
elif usages[resource].in_use < 0:
|
|
||||||
# Negative in_use count indicates a desync, so try to
|
# Negative in_use count indicates a desync, so try to
|
||||||
# heal from that...
|
# heal from that...
|
||||||
refresh = True
|
refresh = True
|
||||||
|
@ -1168,43 +1224,12 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
|
|
||||||
# OK, refresh the usage
|
# OK, refresh the usage
|
||||||
if refresh:
|
if refresh:
|
||||||
# Grab the sync routine
|
updates = _get_sync_updates(elevated, project_id, session,
|
||||||
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
|
resources, resource)
|
||||||
volume_type_id = getattr(resources[resource],
|
# Updates will always contain a single resource usage matching
|
||||||
'volume_type_id', None)
|
# the resource variable.
|
||||||
volume_type_name = getattr(resources[resource],
|
usages[resource].in_use = updates[resource]
|
||||||
'volume_type_name', None)
|
usages[resource].until_refresh = until_refresh or None
|
||||||
updates = sync(elevated, project_id,
|
|
||||||
volume_type_id=volume_type_id,
|
|
||||||
volume_type_name=volume_type_name,
|
|
||||||
session=session)
|
|
||||||
for res, in_use in updates.items():
|
|
||||||
# Make sure we have a destination for the usage!
|
|
||||||
if res not in usages:
|
|
||||||
usages[res] = _quota_usage_create(
|
|
||||||
elevated,
|
|
||||||
project_id,
|
|
||||||
res,
|
|
||||||
0, 0,
|
|
||||||
until_refresh or None,
|
|
||||||
session=session
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update the usage
|
|
||||||
usages[res].in_use = in_use
|
|
||||||
usages[res].until_refresh = until_refresh or None
|
|
||||||
|
|
||||||
# Because more than one resource may be refreshed
|
|
||||||
# by the call to the sync routine, and we don't
|
|
||||||
# want to double-sync, we make sure all refreshed
|
|
||||||
# resources are dropped from the work set.
|
|
||||||
work.discard(res)
|
|
||||||
|
|
||||||
# NOTE(Vek): We make the assumption that the sync
|
|
||||||
# routine actually refreshes the
|
|
||||||
# resources that it is the sync routine
|
|
||||||
# for. We don't check, because this is
|
|
||||||
# a best-effort mechanism.
|
|
||||||
|
|
||||||
# There are 3 cases where we want to update "until_refresh" in the
|
# There are 3 cases where we want to update "until_refresh" in the
|
||||||
# DB: when we enabled it, when we disabled it, and when we changed
|
# DB: when we enabled it, when we disabled it, and when we changed
|
||||||
|
@ -1263,14 +1288,18 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
if delta > 0:
|
if delta > 0:
|
||||||
usages[resource].reserved += delta
|
usages[resource].reserved += delta
|
||||||
|
|
||||||
if unders:
|
if unders:
|
||||||
LOG.warning("Change will make usage less than 0 for the following "
|
LOG.warning("Change will make usage less than 0 for the following "
|
||||||
"resources: %s", unders)
|
"resources: %s", unders)
|
||||||
if overs:
|
if overs:
|
||||||
usages = {k: dict(in_use=v.in_use, reserved=v.reserved)
|
usages = {k: dict(in_use=v.in_use, reserved=v.reserved)
|
||||||
for k, v in usages.items()}
|
for k, v in usages.items()}
|
||||||
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
|
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
|
||||||
usages=usages)
|
usages=usages)
|
||||||
|
session.commit()
|
||||||
|
except Exception:
|
||||||
|
session.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
return reservations
|
return reservations
|
||||||
|
|
||||||
|
@ -6635,8 +6664,9 @@ def purge_deleted_rows(context, age_in_days):
|
||||||
deleted.is_(True), models.QualityOfServiceSpecs.
|
deleted.is_(True), models.QualityOfServiceSpecs.
|
||||||
deleted_at < deleted_age)).delete()
|
deleted_at < deleted_age)).delete()
|
||||||
result = session.execute(
|
result = session.execute(
|
||||||
table.delete()
|
table.delete().
|
||||||
.where(table.c.deleted_at < deleted_age))
|
where(and_(table.columns.deleted.is_(True),
|
||||||
|
table.c.deleted_at < deleted_age)))
|
||||||
except db_exc.DBReferenceError as ex:
|
except db_exc.DBReferenceError as ex:
|
||||||
LOG.error('DBError detected when purging from '
|
LOG.error('DBError detected when purging from '
|
||||||
'%(tablename)s: %(error)s.',
|
'%(tablename)s: %(error)s.',
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
# Copyright 2021 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, MetaData, Table
|
||||||
|
from migrate.changeset import constraint
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
"""Update quota_usages table to prevent races on creation.
|
||||||
|
|
||||||
|
Add race_preventer field and a unique constraint to prevent quota usage
|
||||||
|
duplicates and races that mess the quota system when first creating rows.
|
||||||
|
"""
|
||||||
|
# There's no need to set the race_preventer field for existing DB entries,
|
||||||
|
# since the race we want to prevent is only on creation.
|
||||||
|
meta = MetaData(bind=migrate_engine)
|
||||||
|
quota_usages = Table('quota_usages', meta, autoload=True)
|
||||||
|
|
||||||
|
if not hasattr(quota_usages.c, 'race_preventer'):
|
||||||
|
quota_usages.create_column(Column('race_preventer', Boolean,
|
||||||
|
nullable=True))
|
||||||
|
unique = constraint.UniqueConstraint(
|
||||||
|
'project_id', 'resource', 'race_preventer',
|
||||||
|
table=quota_usages)
|
||||||
|
unique.create(engine=migrate_engine)
|
|
@ -641,6 +641,13 @@ class QuotaUsage(BASE, CinderBase):
|
||||||
"""Represents the current usage for a given resource."""
|
"""Represents the current usage for a given resource."""
|
||||||
|
|
||||||
__tablename__ = 'quota_usages'
|
__tablename__ = 'quota_usages'
|
||||||
|
# NOTE: project_id and resource are not enough as unique constraint since
|
||||||
|
# we do soft deletes and there could be duplicated entries, so we add the
|
||||||
|
# race_preventer field.
|
||||||
|
__table_args__ = (
|
||||||
|
UniqueConstraint('project_id', 'resource', 'race_preventer'),
|
||||||
|
CinderBase.__table_args__)
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
|
|
||||||
project_id = Column(String(255), index=True)
|
project_id = Column(String(255), index=True)
|
||||||
|
@ -655,6 +662,15 @@ class QuotaUsage(BASE, CinderBase):
|
||||||
|
|
||||||
until_refresh = Column(Integer, nullable=True)
|
until_refresh = Column(Integer, nullable=True)
|
||||||
|
|
||||||
|
# To prevent races during creation on quota_reserve method
|
||||||
|
race_preventer = Column(Boolean, nullable=True, default=True)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_values():
|
||||||
|
res = CinderBase.delete_values()
|
||||||
|
res['race_preventer'] = None
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class Reservation(BASE, CinderBase):
|
class Reservation(BASE, CinderBase):
|
||||||
"""Represents a resource reservation for quotas."""
|
"""Represents a resource reservation for quotas."""
|
||||||
|
|
|
@ -114,107 +114,110 @@ class PurgeDeletedTest(test.TestCase):
|
||||||
|
|
||||||
make_vol_now = self.volumes.update().\
|
make_vol_now = self.volumes.update().\
|
||||||
where(self.volumes.c.id.in_(self.uuidstrs[0:1]))\
|
where(self.volumes.c.id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_vol_old = self.volumes.update().\
|
make_vol_old = self.volumes.update().\
|
||||||
where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\
|
where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_vol_older = self.volumes.update().\
|
make_vol_older = self.volumes.update().\
|
||||||
where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\
|
where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
make_vol_meta_now = self.vm.update().\
|
make_vol_meta_now = self.vm.update().\
|
||||||
where(self.vm.c.volume_id.in_(self.uuidstrs[0:1]))\
|
where(self.vm.c.volume_id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_vol_meta_old = self.vm.update().\
|
make_vol_meta_old = self.vm.update().\
|
||||||
where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\
|
where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_vol_meta_older = self.vm.update().\
|
make_vol_meta_older = self.vm.update().\
|
||||||
where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\
|
where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_vol_types_now = self.vol_types.update().\
|
make_vol_types_now = self.vol_types.update().\
|
||||||
where(self.vol_types.c.id.in_(self.uuidstrs[0:1]))\
|
where(self.vol_types.c.id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_vol_types_old = self.vol_types.update().\
|
make_vol_types_old = self.vol_types.update().\
|
||||||
where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\
|
where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_vol_types_older = self.vol_types.update().\
|
make_vol_types_older = self.vol_types.update().\
|
||||||
where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\
|
where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
make_vol_type_proj_now = self.vol_type_proj.update().\
|
make_vol_type_proj_now = self.vol_type_proj.update().\
|
||||||
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[0:1]))\
|
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_vol_type_proj_old = self.vol_type_proj.update().\
|
make_vol_type_proj_old = self.vol_type_proj.update().\
|
||||||
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\
|
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_vol_type_proj_older = self.vol_type_proj.update().\
|
make_vol_type_proj_older = self.vol_type_proj.update().\
|
||||||
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\
|
where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_snap_now = self.snapshots.update().\
|
make_snap_now = self.snapshots.update().\
|
||||||
where(self.snapshots.c.id.in_(self.uuidstrs[0:1]))\
|
where(self.snapshots.c.id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_snap_old = self.snapshots.update().\
|
make_snap_old = self.snapshots.update().\
|
||||||
where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\
|
where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_snap_older = self.snapshots.update().\
|
make_snap_older = self.snapshots.update().\
|
||||||
where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\
|
where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_snap_meta_now = self.sm.update().\
|
make_snap_meta_now = self.sm.update().\
|
||||||
where(self.sm.c.snapshot_id.in_(self.uuidstrs[0:1]))\
|
where(self.sm.c.snapshot_id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_snap_meta_old = self.sm.update().\
|
make_snap_meta_old = self.sm.update().\
|
||||||
where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\
|
where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_snap_meta_older = self.sm.update().\
|
make_snap_meta_older = self.sm.update().\
|
||||||
where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\
|
where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_vol_glance_meta_now = self.vgm.update().\
|
make_vol_glance_meta_now = self.vgm.update().\
|
||||||
where(self.vgm.c.volume_id.in_(self.uuidstrs[0:1]))\
|
where(self.vgm.c.volume_id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_vol_glance_meta_old = self.vgm.update().\
|
make_vol_glance_meta_old = self.vgm.update().\
|
||||||
where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\
|
where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_vol_glance_meta_older = self.vgm.update().\
|
make_vol_glance_meta_older = self.vgm.update().\
|
||||||
where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\
|
where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
make_snap_glance_meta_now = self.vgm.update().\
|
make_snap_glance_meta_now = self.vgm.update().\
|
||||||
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[0:1]))\
|
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[0:1]))\
|
||||||
.values(deleted_at=now)
|
.values(deleted_at=now, deleted=True)
|
||||||
make_snap_glance_meta_old = self.vgm.update().\
|
make_snap_glance_meta_old = self.vgm.update().\
|
||||||
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\
|
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\
|
||||||
.values(deleted_at=old)
|
.values(deleted_at=old, deleted=True)
|
||||||
make_snap_glance_meta_older = self.vgm.update().\
|
make_snap_glance_meta_older = self.vgm.update().\
|
||||||
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\
|
where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\
|
||||||
.values(deleted_at=older)
|
.values(deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_qos_now = self.qos.update().where(
|
make_qos_now = self.qos.update().where(
|
||||||
self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now)
|
self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now,
|
||||||
|
deleted=True)
|
||||||
make_qos_old = self.qos.update().where(
|
make_qos_old = self.qos.update().where(
|
||||||
self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old)
|
self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old,
|
||||||
|
deleted=True)
|
||||||
make_qos_older = self.qos.update().where(
|
make_qos_older = self.qos.update().where(
|
||||||
self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older)
|
self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older,
|
||||||
|
deleted=True)
|
||||||
|
|
||||||
make_qos_child_record_now = self.qos.update().where(
|
make_qos_child_record_now = self.qos.update().where(
|
||||||
self.qos.c.specs_id.in_(self.uuidstrs[0:1])).values(
|
self.qos.c.specs_id.in_(self.uuidstrs[0:1])).values(
|
||||||
deleted_at=now)
|
deleted_at=now, deleted=True)
|
||||||
make_qos_child_record_old = self.qos.update().where(
|
make_qos_child_record_old = self.qos.update().where(
|
||||||
self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values(
|
self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values(
|
||||||
deleted_at=old)
|
deleted_at=old, deleted=True)
|
||||||
make_qos_child_record_older = self.qos.update().where(
|
make_qos_child_record_older = self.qos.update().where(
|
||||||
self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values(
|
self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values(
|
||||||
deleted_at=older)
|
deleted_at=older, deleted=True)
|
||||||
|
|
||||||
make_vol_types1_now = self.vol_types.update().where(
|
make_vol_types1_now = self.vol_types.update().where(
|
||||||
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[0:1])).values(
|
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[0:1])).values(
|
||||||
deleted_at=now)
|
deleted_at=now, deleted=True)
|
||||||
make_vol_types1_old = self.vol_types.update().where(
|
make_vol_types1_old = self.vol_types.update().where(
|
||||||
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values(
|
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values(
|
||||||
deleted_at=old)
|
deleted_at=old, deleted=True)
|
||||||
make_vol_types1_older = self.vol_types.update().where(
|
make_vol_types1_older = self.vol_types.update().where(
|
||||||
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values(
|
self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values(
|
||||||
deleted_at=older)
|
deleted_at=older, deleted=True)
|
||||||
|
|
||||||
self.conn.execute(make_vol_now)
|
self.conn.execute(make_vol_now)
|
||||||
self.conn.execute(make_vol_old)
|
self.conn.execute(make_vol_old)
|
||||||
|
@ -389,7 +392,8 @@ class PurgeDeletedTest(test.TestCase):
|
||||||
# set volume record to deleted 20 days ago
|
# set volume record to deleted 20 days ago
|
||||||
old = timeutils.utcnow() - datetime.timedelta(days=20)
|
old = timeutils.utcnow() - datetime.timedelta(days=20)
|
||||||
make_old = self.volumes.update().where(
|
make_old = self.volumes.update().where(
|
||||||
self.volumes.c.id.in_([uuid_str])).values(deleted_at=old)
|
self.volumes.c.id.in_([uuid_str])).values(deleted_at=old,
|
||||||
|
deleted=True)
|
||||||
self.conn.execute(make_old)
|
self.conn.execute(make_old)
|
||||||
|
|
||||||
# Verify that purge_deleted_rows fails due to Foreign Key constraint
|
# Verify that purge_deleted_rows fails due to Foreign Key constraint
|
||||||
|
|
|
@ -20,6 +20,7 @@ from unittest.mock import call
|
||||||
|
|
||||||
import ddt
|
import ddt
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
import oslo_db
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
from sqlalchemy.sql import operators
|
from sqlalchemy.sql import operators
|
||||||
|
@ -68,8 +69,8 @@ def _quota_reserve(context, project_id):
|
||||||
deltas[resource] = i + 1
|
deltas[resource] = i + 1
|
||||||
return db.quota_reserve(
|
return db.quota_reserve(
|
||||||
context, resources, quotas, deltas,
|
context, resources, quotas, deltas,
|
||||||
datetime.datetime.utcnow(), datetime.datetime.utcnow(),
|
datetime.datetime.utcnow(), until_refresh=None,
|
||||||
datetime.timedelta(days=1), project_id
|
max_age=datetime.timedelta(days=1), project_id=project_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -2563,6 +2564,46 @@ class DBAPIReservationTestCase(BaseTest):
|
||||||
self.ctxt,
|
self.ctxt,
|
||||||
'project1'))
|
'project1'))
|
||||||
|
|
||||||
|
@mock.patch('time.sleep', mock.Mock())
|
||||||
|
def test_quota_reserve_create_usages_race(self):
|
||||||
|
"""Test we retry when there is a race in creation."""
|
||||||
|
def create(*args, original_create=sqlalchemy_api._quota_usage_create,
|
||||||
|
**kwargs):
|
||||||
|
# Create the quota usage entry (with values set to 0)
|
||||||
|
session = sqlalchemy_api.get_session()
|
||||||
|
kwargs['session'] = session
|
||||||
|
with session.begin():
|
||||||
|
original_create(*args, **kwargs)
|
||||||
|
# Simulate that there's been a race condition with other create and
|
||||||
|
# that we got the exception
|
||||||
|
raise oslo_db.exception.DBDuplicateEntry
|
||||||
|
|
||||||
|
resources = quota.QUOTAS.resources
|
||||||
|
quotas = {'volumes': 5}
|
||||||
|
deltas = {'volumes': 2}
|
||||||
|
project_id = 'project1'
|
||||||
|
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
|
||||||
|
|
||||||
|
with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
|
||||||
|
side_effect=create) as create_mock:
|
||||||
|
sqlalchemy_api.quota_reserve(self.ctxt, resources, quotas, deltas,
|
||||||
|
expire, 0, 0, project_id=project_id)
|
||||||
|
|
||||||
|
# The create call only happens once, when the race happens, because
|
||||||
|
# on the second try of the quota_reserve call the entry is already
|
||||||
|
# in the DB.
|
||||||
|
create_mock.assert_called_once_with(mock.ANY, 'project1',
|
||||||
|
'volumes', 0, 0, None,
|
||||||
|
session=mock.ANY)
|
||||||
|
|
||||||
|
# Confirm that regardless of who created the DB entry the values are
|
||||||
|
# updated
|
||||||
|
usages = sqlalchemy_api.quota_usage_get_all_by_project(self.ctxt,
|
||||||
|
project_id)
|
||||||
|
expected = {'project_id': project_id,
|
||||||
|
'volumes': {'in_use': 0, 'reserved': deltas['volumes']}}
|
||||||
|
self.assertEqual(expected, usages)
|
||||||
|
|
||||||
|
|
||||||
class DBAPIMessageTestCase(BaseTest):
|
class DBAPIMessageTestCase(BaseTest):
|
||||||
|
|
||||||
|
@ -2822,6 +2863,29 @@ class DBAPIQuotaTestCase(BaseTest):
|
||||||
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
self.assertEqual(expected, db.quota_usage_get_all_by_project(
|
||||||
self.ctxt, 'p1'))
|
self.ctxt, 'p1'))
|
||||||
|
|
||||||
|
def test__quota_usage_create(self):
|
||||||
|
session = sqlalchemy_api.get_session()
|
||||||
|
usage = sqlalchemy_api._quota_usage_create(self.ctxt, 'project1',
|
||||||
|
'resource',
|
||||||
|
in_use=10, reserved=0,
|
||||||
|
until_refresh=None,
|
||||||
|
session=session)
|
||||||
|
self.assertEqual('project1', usage.project_id)
|
||||||
|
self.assertEqual('resource', usage.resource)
|
||||||
|
self.assertEqual(10, usage.in_use)
|
||||||
|
self.assertEqual(0, usage.reserved)
|
||||||
|
self.assertIsNone(usage.until_refresh)
|
||||||
|
|
||||||
|
def test__quota_usage_create_duplicate(self):
|
||||||
|
session = sqlalchemy_api.get_session()
|
||||||
|
kwargs = {'project_id': 'project1', 'resource': 'resource',
|
||||||
|
'in_use': 10, 'reserved': 0, 'until_refresh': None,
|
||||||
|
'session': session}
|
||||||
|
sqlalchemy_api._quota_usage_create(self.ctxt, **kwargs)
|
||||||
|
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
|
||||||
|
sqlalchemy_api._quota_usage_create,
|
||||||
|
self.ctxt, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class DBAPIBackupTestCase(BaseTest):
|
class DBAPIBackupTestCase(BaseTest):
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
import datetime
|
import datetime
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
|
@ -1341,6 +1342,12 @@ class FakeSession(object):
|
||||||
def query(self, *args, **kwargs):
|
def query(self, *args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def rollback(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def commit(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FakeUsage(sqa_models.QuotaUsage):
|
class FakeUsage(sqa_models.QuotaUsage):
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
|
@ -1497,36 +1504,61 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
||||||
|
|
||||||
self.assertEqual(0, len(reservations))
|
self.assertEqual(0, len(reservations))
|
||||||
|
|
||||||
def test_quota_reserve_create_usages(self):
|
@mock.patch.object(sqa_api, '_reservation_create')
|
||||||
context = FakeContext('test_project', 'test_class')
|
@mock.patch.object(sqa_api, '_get_sync_updates')
|
||||||
quotas = dict(volumes=5,
|
@mock.patch.object(sqa_api, '_quota_usage_create')
|
||||||
gigabytes=10 * 1024, )
|
@mock.patch.object(sqa_api, '_get_quota_usages')
|
||||||
deltas = dict(volumes=2,
|
def test_quota_reserve_create_usages(self, usages_mock, quota_create_mock,
|
||||||
gigabytes=2 * 1024, )
|
sync_mock, reserve_mock):
|
||||||
|
project_id = 'test_project'
|
||||||
|
context = FakeContext(project_id, 'test_class')
|
||||||
|
quotas = collections.OrderedDict([('volumes', 5),
|
||||||
|
('gigabytes', 10 * 1024)])
|
||||||
|
deltas = collections.OrderedDict([('volumes', 2),
|
||||||
|
('gigabytes', 2 * 1024)])
|
||||||
|
|
||||||
|
sync_mock.side_effect = [{'volumes': 2}, {'gigabytes': 2 * 1024}]
|
||||||
|
vol_usage = self._make_quota_usage(project_id, 'volumes', 2, 0,
|
||||||
|
None, None, None)
|
||||||
|
gb_usage = self._make_quota_usage(project_id, 'gigabytes', 2 * 1024, 0,
|
||||||
|
None, None, None)
|
||||||
|
usages_mock.side_effect = [
|
||||||
|
{},
|
||||||
|
collections.OrderedDict([('volumes', vol_usage),
|
||||||
|
('gigabytes', gb_usage)])
|
||||||
|
]
|
||||||
|
reservations = [mock.Mock(), mock.Mock()]
|
||||||
|
reserve_mock.side_effect = reservations
|
||||||
|
|
||||||
result = sqa_api.quota_reserve(context, self.resources, quotas,
|
result = sqa_api.quota_reserve(context, self.resources, quotas,
|
||||||
deltas, self.expire, 0, 0)
|
deltas, self.expire, 0, 0)
|
||||||
|
|
||||||
self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
|
self.assertEqual([r.uuid for r in reservations], result)
|
||||||
self.compare_usage(self.usages_created,
|
|
||||||
[dict(resource='volumes',
|
usages_mock.assert_has_calls([
|
||||||
project_id='test_project',
|
mock.call(mock.ANY, mock.ANY, project_id, resources=deltas.keys()),
|
||||||
in_use=0,
|
mock.call(mock.ANY, mock.ANY, project_id, resources=deltas.keys())
|
||||||
reserved=2,
|
])
|
||||||
until_refresh=None),
|
|
||||||
dict(resource='gigabytes',
|
sync_mock.assert_has_calls([
|
||||||
project_id='test_project',
|
mock.call(mock.ANY, project_id, mock.ANY, self.resources,
|
||||||
in_use=0,
|
'volumes'),
|
||||||
reserved=2 * 1024,
|
mock.call(mock.ANY, project_id, mock.ANY, self.resources,
|
||||||
until_refresh=None), ])
|
'gigabytes')])
|
||||||
self.compare_reservation(
|
|
||||||
result,
|
quota_create_mock.assert_has_calls([
|
||||||
[dict(resource='volumes',
|
mock.call(mock.ANY, project_id, 'volumes', 2, 0, None,
|
||||||
usage_id=self.usages_created['volumes'],
|
session=mock.ANY),
|
||||||
project_id='test_project',
|
mock.call(mock.ANY, project_id, 'gigabytes', 2 * 1024, 0, None,
|
||||||
delta=2),
|
session=mock.ANY)
|
||||||
dict(resource='gigabytes',
|
])
|
||||||
usage_id=self.usages_created['gigabytes'],
|
|
||||||
delta=2 * 1024), ])
|
reserve_mock.assert_has_calls([
|
||||||
|
mock.call(mock.ANY, mock.ANY, vol_usage, project_id, 'volumes',
|
||||||
|
2, mock.ANY, session=mock.ANY),
|
||||||
|
mock.call(mock.ANY, mock.ANY, gb_usage, project_id, 'gigabytes',
|
||||||
|
2 * 1024, mock.ANY, session=mock.ANY),
|
||||||
|
])
|
||||||
|
|
||||||
def test_quota_reserve_negative_in_use(self):
|
def test_quota_reserve_negative_in_use(self):
|
||||||
self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1)
|
self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1)
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
`Bug #1484343 <https://bugs.launchpad.net/cinder/+bug/1484343>`_: Fix
|
||||||
|
creation of duplicated quota usage entries in DB.
|
Loading…
Reference in New Issue