Re-enable -1 child limits for nested quotas

Add back support for -1 limits of child projects. The way that we
support the -1 child limits requires the following changes:
  * Continue quota validation up the hierarchy if the current limit is
    -1 until we hit a hard limit or no more parents, and update the
    any relevant parents' allocated value along the way
  * When updating limits, special care needs to be taken when updating
    child limit to be -1, or when changing from a -1 limit
  * Enable support for creating reservations for "allocated" values
    to support the scenario that:
      - a volume is created on a project with a limit of -1
      - the parent's allocated value has been updated appropriately
      - the volume create fails and the child's in_use quota rolls back
      - now we must also rollback the parent's allocated value

NOTE: There is a race condition between validation the NestedQuotas
and when the driver may be switched into use, and if -1 quotas are used
the validation could be out of date. Will look into better support for
switching on of NestedQuotas on live deployment with -1 limits, which
would likely leverage the "allocated" reservation system.

Closes-Bug: #1548645
Closes-Bug: #1544774
Closes-Bug: #1537189
Change-Id: I2d1dba87baf3595cc8f48574e0281ac17509fe7d
This commit is contained in:
Ryan McNair
2016-02-16 17:12:53 +00:00
parent d3fd2828f6
commit c02336e4dd
12 changed files with 748 additions and 258 deletions

View File

@@ -22,6 +22,7 @@ from keystoneclient import client
from keystoneclient import exceptions
from keystoneclient import session
from cinder import db
from cinder import exception
from cinder.i18n import _, _LW
@@ -102,7 +103,8 @@ def get_volume_type_reservation(ctxt, volume, type_id,
return reservations
def get_project_hierarchy(context, project_id, subtree_as_ids=False):
def get_project_hierarchy(context, project_id, subtree_as_ids=False,
parents_as_ids=False):
"""A Helper method to get the project hierarchy.
Along with hierarchical multitenancy in keystone API v3, projects can be
@@ -114,10 +116,13 @@ def get_project_hierarchy(context, project_id, subtree_as_ids=False):
generic_project = GenericProjectInfo(project_id, keystone.version)
if keystone.version == 'v3':
project = keystone.projects.get(project_id,
subtree_as_ids=subtree_as_ids)
subtree_as_ids=subtree_as_ids,
parents_as_ids=parents_as_ids)
generic_project.parent_id = project.parent_id
generic_project.subtree = (
project.subtree if subtree_as_ids else None)
generic_project.parents = (
project.parents if parents_as_ids else None)
except exceptions.NotFound:
msg = (_("Tenant ID: %s does not exist.") % project_id)
raise webob.exc.HTTPNotFound(explanation=msg)
@@ -145,6 +150,35 @@ def get_all_root_project_ids(context):
return project_roots
def update_alloc_to_next_hard_limit(context, resources, deltas, res,
expire, project_id):
from cinder import quota
QUOTAS = quota.QUOTAS
reservations = []
projects = get_project_hierarchy(context, project_id,
parents_as_ids=True).parents
hard_limit_found = False
# Update allocated values up the chain til we hit a hard limit or run out
# of parents
while projects and not hard_limit_found:
cur_proj_id = list(projects)[0]
projects = projects[cur_proj_id]
cur_quota_lim = QUOTAS.get_by_project_or_default(
context, cur_proj_id, res)
hard_limit_found = (cur_quota_lim != -1)
cur_quota = {res: cur_quota_lim}
cur_delta = {res: deltas[res]}
try:
reservations += db.quota_reserve(
context, resources, cur_quota, cur_delta, expire,
CONF.until_refresh, CONF.max_age, cur_proj_id,
is_allocated_reserve=True)
except exception.OverQuota:
db.reservation_rollback(context, reservations)
raise
return reservations
def validate_setup_for_nested_quota_use(ctxt, resources,
nested_quota_driver,
fix_allocated_quotas=False):