Browse Source

Merge "Remove NestedQuotaDriver"

changes/06/763306/9
Zuul 6 months ago
committed by Gerrit Code Review
parent
commit
6bc06f1dbd
  1. 41
      api-ref/source/v3/parameters.yaml
  2. 30
      api-ref/source/v3/quota-sets.inc
  3. 250
      cinder/api/contrib/quotas.py
  4. 17
      cinder/cmd/status.py
  5. 5
      cinder/exception.py
  6. 12
      cinder/policies/quotas.py
  7. 212
      cinder/quota.py
  8. 103
      cinder/quota_utils.py
  9. 170
      cinder/tests/functional/test_quotas.py
  10. 817
      cinder/tests/unit/api/contrib/test_quotas.py
  11. 2
      cinder/tests/unit/cmd/test_status.py
  12. 265
      cinder/tests/unit/test_quota.py
  13. 18
      cinder/tests/unit/test_quota_utils.py
  14. 172
      doc/source/configuration/block-storage/nested-quota.rst
  15. 1
      doc/source/configuration/index.rst
  16. 6
      releasenotes/notes/remove-nested-quota-driver-8b56f03694e3a694.yaml

41
api-ref/source/v3/parameters.yaml

@ -234,14 +234,6 @@ filter_updated_at:
required: false
type: string
min_version: 3.60
fix_allocated_quotas:
description: |
Whether to fix all the non-leaf projects' ``allocation``
attribute or raise 400 error if ``allocation`` doesn't match
the current quota usage information. Default is ``false``.
in: query
required: false
type: boolean
force_del_qos:
description: |
To delete a QoS specification even if it is in-
@ -649,8 +641,7 @@ backup_gigabytes:
backup_gigabytes_usage:
description: |
The size (GB) usage information of backup for this project, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -695,8 +686,7 @@ backups_number:
backups_number_usage:
description: |
The backup usage information for this project, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -1222,17 +1212,15 @@ gigabytes_for_type:
gigabytes_for_type_usage:
description: |
The size (GB) usage information of volumes and snapshots for this project
and this volume type, including ``in_use``, ``limit``, ``reserved`` and
``allocated`` attributes. Note: ``allocated`` attribute is available only
when nested quota is enabled.
and this volume type, including ``in_use``, ``limit`` and ``reserved``
attributes.
in: body
required: true
type: object
gigabytes_usage:
description: |
The size (GB) usage information of volumes and snapshots for this project,
including ``in_use``, ``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -1337,8 +1325,7 @@ groups_number:
groups_number_usage:
description: |
The group usage information for this project, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -2218,9 +2205,7 @@ per_volume_gigabytes:
per_volume_gigabytes_usage:
description: |
The size (GB) usage information for each volume, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota
is enabled and only ``limit`` is meaningful here.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -2672,16 +2657,14 @@ snapshots_number_for_type:
snapshots_number_for_type_usage:
description: |
The snapshot usage information for this project and this volume type,
including ``in_use``, ``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
snapshots_number_usage:
description: |
The snapshot usage information for this project, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
@ -3186,16 +3169,14 @@ volumes_number_for_type:
volumes_number_for_type_usage:
description: |
The volume usage information for this project and this volume type,
including ``in_use``, ``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
volumes_number_usage:
description: |
The volume usage information for this project, including ``in_use``,
``limit``, ``reserved`` and ``allocated`` attributes.
Note: ``allocated`` attribute is available only when nested quota is enabled.
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object

30
api-ref/source/v3/quota-sets.inc

@ -250,33 +250,3 @@ Response Example
.. literalinclude:: ./samples/quota_sets/quotas-show-defaults-response.json
:language: javascript
Validate setup for nested quota
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{admin_project_id}/os-quota-sets/validate_setup_for_nested_quota_use
Validate setup for nested quota, administrator should ensure that Keystone v3
or greater is being used.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- fix_allocated_quotas: fix_allocated_quotas

250
cinder/api/contrib/quotas.py

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import webob
from cinder.api import extensions
@ -25,7 +24,6 @@ from cinder import exception
from cinder.i18n import _
from cinder.policies import quotas as policy
from cinder import quota
from cinder import quota_utils
from cinder import utils
QUOTAS = quota.QUOTAS
@ -48,8 +46,6 @@ class QuotaSetsController(wsgi.Controller):
return
v = quota_values.get(key, {})
used = (v.get('in_use', 0) + v.get('reserved', 0))
if QUOTAS.using_nested_quotas():
used += v.get('allocated', 0)
if value < used:
msg = (_("Quota %(key)s limit must be equal or greater than "
"existing resources. Current usage is %(usage)s "
@ -70,95 +66,9 @@ class QuotaSetsController(wsgi.Controller):
else:
return {k: v['limit'] for k, v in values.items()}
def _authorize_update_or_delete(self, context_project,
target_project_id,
parent_id):
"""Checks if update or delete are allowed in the current hierarchy.
With hierarchical projects, only the admin of the parent or the root
project has privilege to perform quota update and delete operations.
:param context_project: The project in which the user is scoped to.
:param target_project_id: The id of the project in which the
user want to perform an update or
delete operation.
:param parent_id: The parent id of the project in which the user
want to perform an update or delete operation.
"""
if context_project.is_admin_project:
# The calling project has admin privileges and should be able
# to operate on all quotas.
return
if context_project.parent_id and parent_id != context_project.id:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin.")
raise webob.exc.HTTPForbidden(explanation=msg)
if context_project.id != target_project_id:
if not self._is_descendant(target_project_id,
context_project.subtree):
msg = _("Update and delete quota operations can only be made "
"to projects in the same hierarchy of the project in "
"which users are scoped to.")
raise webob.exc.HTTPForbidden(explanation=msg)
else:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin.")
raise webob.exc.HTTPForbidden(explanation=msg)
def _authorize_show(self, context_project, target_project):
"""Checks if show is allowed in the current hierarchy.
With hierarchical projects, users are allowed to perform a quota show
operation if they have the cloud admin role or if they belong to at
least one of the following projects: the target project, its immediate
parent project, or the root project of its hierarchy.
:param context_project: The project in which the user
is scoped to.
:param target_project: The project in which the user wants
to perform a show operation.
"""
if context_project.is_admin_project:
# The calling project has admin privileges and should be able
# to view all quotas.
return
if target_project.parent_id:
if target_project.id != context_project.id:
if not self._is_descendant(target_project.id,
context_project.subtree):
msg = _("Show operations can only be made to projects in "
"the same hierarchy of the project in which users "
"are scoped to.")
raise webob.exc.HTTPForbidden(explanation=msg)
if context_project.id != target_project.parent_id:
if context_project.parent_id:
msg = _("Only users with token scoped to immediate "
"parents or root projects are allowed to see "
"its children quotas.")
raise webob.exc.HTTPForbidden(explanation=msg)
elif context_project.parent_id:
msg = _("An user with a token scoped to a subproject is not "
"allowed to see the quota of its parents.")
raise webob.exc.HTTPForbidden(explanation=msg)
def _is_descendant(self, target_project_id, subtree):
if subtree is not None:
for key, value in subtree.items():
if key == target_project_id:
return True
if self._is_descendant(target_project_id, value):
return True
return False
def show(self, req, id):
"""Show quota for a particular tenant
This works for hierarchical and non-hierarchical projects. For
hierarchical projects admin of current project, immediate
parent of the project or the CLOUD admin are able to perform
a show.
:param req: request
:param id: target project id that needs to be shown
"""
@ -173,18 +83,6 @@ class QuotaSetsController(wsgi.Controller):
else:
usage = False
if QUOTAS.using_nested_quotas():
# With hierarchical projects, only the admin of the current project
# or the root project has privilege to perform quota show
# operations.
target_project = quota_utils.get_project_hierarchy(
context, target_project_id)
context_project = quota_utils.get_project_hierarchy(
context, context.project_id, subtree_as_ids=True,
is_admin_project=context.is_admin)
self._authorize_show(context_project, target_project)
quotas = self._get_quotas(context, target_project_id, usage)
return self._format_quota_set(target_project_id, quotas)
@ -192,10 +90,6 @@ class QuotaSetsController(wsgi.Controller):
def update(self, req, id, body):
"""Update Quota for a particular tenant
This works for hierarchical and non-hierarchical projects. For
hierarchical projects only immediate parent admin or the
CLOUD admin are able to perform an update.
:param req: request
:param id: target project id that needs to be updated
:param body: key, value pair that will be applied to
@ -208,25 +102,6 @@ class QuotaSetsController(wsgi.Controller):
self.validate_string_length(id, 'quota_set_name',
min_length=1, max_length=255)
# Saving off this value since we need to use it multiple times
use_nested_quotas = QUOTAS.using_nested_quotas()
if use_nested_quotas:
# Get the parent_id of the target project to verify whether we are
# dealing with hierarchical namespace or non-hierarchical namespace
target_project = quota_utils.get_project_hierarchy(
context, target_project_id, parents_as_ids=True)
parent_id = target_project.parent_id
if parent_id:
# Get the children of the project which the token is scoped to
# in order to know if the target_project is in its hierarchy.
context_project = quota_utils.get_project_hierarchy(
context, context.project_id, subtree_as_ids=True,
is_admin_project=context.is_admin)
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
# NOTE(ankit): Pass #1 - In this loop for body['quota_set'].keys(),
# we validate the quota limits to ensure that we can bail out if
# any of the items in the set is bad. Meanwhile we validate value
@ -246,16 +121,6 @@ class QuotaSetsController(wsgi.Controller):
self._validate_existing_resource(key, body['quota_set'][key],
quota_values)
if use_nested_quotas:
try:
reservations += self._update_nested_quota_allocated(
context, target_project, quota_values, key,
body['quota_set'][key])
except exception.OverQuota as e:
if reservations:
db.reservation_rollback(context, reservations)
raise webob.exc.HTTPBadRequest(explanation=e.msg)
valid_quotas[key] = body['quota_set'][key]
# NOTE(ankit): Pass #2 - At this point we know that all the keys and
@ -278,34 +143,6 @@ class QuotaSetsController(wsgi.Controller):
return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) +
quota_obj.get('reserved', 0))
def _update_nested_quota_allocated(self, ctxt, target_project,
target_project_quotas, res, new_limit):
reservations = []
# per_volume_gigabytes doesn't make sense to nest
if res == "per_volume_gigabytes":
return reservations
quota_for_res = target_project_quotas.get(res, {})
orig_quota_from_target_proj = quota_for_res.get('limit', 0)
# If limit was -1, we were "taking" current child's usage from parent
if orig_quota_from_target_proj == -1:
orig_quota_from_target_proj = self._get_quota_usage(quota_for_res)
new_quota_from_target_proj = new_limit
# If we set limit to -1, we will "take" the current usage from parent
if new_limit == -1:
new_quota_from_target_proj = self._get_quota_usage(quota_for_res)
res_change = new_quota_from_target_proj - orig_quota_from_target_proj
if res_change != 0:
deltas = {res: res_change}
resources = QUOTAS.resources
resources.update(GROUP_QUOTAS.resources)
reservations += quota_utils.update_alloc_to_next_hard_limit(
ctxt, resources, deltas, res, None, target_project.id)
return reservations
def defaults(self, req, id):
context = req.environ['cinder.context']
context.authorize(policy.SHOW_POLICY, target={'project_id': id})
@ -317,95 +154,13 @@ class QuotaSetsController(wsgi.Controller):
def delete(self, req, id):
"""Delete Quota for a particular tenant.
This works for hierarchical and non-hierarchical projects. For
hierarchical projects only immediate parent admin or the
CLOUD admin are able to perform a delete.
:param req: request
:param id: target project id that needs to be deleted
"""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY, target={'project_id': id})
if QUOTAS.using_nested_quotas():
self._delete_nested_quota(context, id)
else:
db.quota_destroy_by_project(context, id)
def _delete_nested_quota(self, ctxt, proj_id):
# Get the parent_id of the target project to verify whether we are
# dealing with hierarchical namespace or non-hierarchical
# namespace.
try:
project_quotas = QUOTAS.get_project_quotas(
ctxt, proj_id, usages=True, defaults=False)
project_group_quotas = GROUP_QUOTAS.get_project_quotas(
ctxt, proj_id, usages=True, defaults=False)
project_quotas.update(project_group_quotas)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
target_project = quota_utils.get_project_hierarchy(
ctxt, proj_id)
parent_id = target_project.parent_id
if parent_id:
# Get the children of the project which the token is scoped to
# in order to know if the target_project is in its hierarchy.
context_project = quota_utils.get_project_hierarchy(
ctxt, ctxt.project_id, subtree_as_ids=True)
self._authorize_update_or_delete(context_project,
target_project.id,
parent_id)
defaults = QUOTAS.get_defaults(ctxt, proj_id)
defaults.update(GROUP_QUOTAS.get_defaults(ctxt, proj_id))
# If the project which is being deleted has allocated part of its
# quota to its subprojects, then subprojects' quotas should be
# deleted first.
for res, value in project_quotas.items():
if 'allocated' in project_quotas[res].keys():
if project_quotas[res]['allocated'] > 0:
msg = _("About to delete child projects having "
"non-zero quota. This should not be performed")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Ensure quota usage wouldn't exceed limit on a delete
self._validate_existing_resource(
res, defaults[res], project_quotas)
db.quota_destroy_by_project(ctxt, target_project.id)
for res, limit in project_quotas.items():
# Update child limit to 0 so the parent hierarchy gets it's
# allocated values updated properly
self._update_nested_quota_allocated(
ctxt, target_project, project_quotas, res, 0)
def validate_setup_for_nested_quota_use(self, req):
"""Validates that the setup supports using nested quotas.
Ensures that Keystone v3 or greater is being used, and that the
existing quotas make sense to nest in the current hierarchy (e.g. that
no child quota would be larger than it's parent).
"""
ctxt = req.environ['cinder.context']
ctxt.authorize(policy.VALIDATE_NESTED_QUOTA_POLICY)
params = req.params
try:
resources = QUOTAS.resources
resources.update(GROUP_QUOTAS.resources)
allocated = params.get('fix_allocated_quotas', 'False')
try:
fix_allocated = strutils.bool_from_string(allocated,
strict=True)
except ValueError:
msg = _("Invalid param 'fix_allocated_quotas':%s") % allocated
raise webob.exc.HTTPBadRequest(explanation=msg)
quota_utils.validate_setup_for_nested_quota_use(
ctxt, resources, quota.NestedDbQuotaDriver(),
fix_allocated_quotas=fix_allocated)
except exception.InvalidNestedQuotaSetup as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
db.quota_destroy_by_project(context, id)
class Quotas(extensions.ExtensionDescriptor):
@ -420,8 +175,7 @@ class Quotas(extensions.ExtensionDescriptor):
res = extensions.ResourceExtension(
'os-quota-sets', QuotaSetsController(),
member_actions={'defaults': 'GET'},
collection_actions={'validate_setup_for_nested_quota_use': 'GET'})
member_actions={'defaults': 'GET'})
resources.append(res)
return resources

17
cinder/cmd/status.py

@ -170,21 +170,19 @@ class Checks(uc.UpgradeCommands):
def _check_nested_quota(self):
"""Checks for the use of the nested quota driver.
The NestedDbQuotaDriver is deprecated in the Train release to prepare
for upcoming unified limits changes.
The NestedDbQuotaDriver is deprecated in the Train release and is
removed in Wallaby release to prepare for upcoming unified limits
changes.
"""
# We import here to avoid conf loading order issues with cinder.service
# above.
import cinder.quota # noqa
quota_driver = CONF.quota_driver
if quota_driver == 'cinder.quota.NestedDbQuotaDriver':
return uc.Result(
WARNING,
'The NestedDbQuotaDriver has been deprecated. It will '
'continue to work in the 15.0.0 (Train) release, but will be '
'removed in 16.0.0')
FAILURE,
'The NestedDbQuotaDriver was deprecated in Train release '
'and is removed in Wallaby release.')
return uc.Result(SUCCESS)
def _check_legacy_windows_config(self):
@ -272,9 +270,10 @@ class Checks(uc.UpgradeCommands):
('Removed Drivers', _check_removed_drivers),
# added in Train
('Periodic Interval Use', _check_periodic_interval),
('Use of Nest Quota Driver', _check_nested_quota),
('Service UUIDs', _check_service_uuid),
('Attachment specs', _check_attachment_specs),
# added in Wallaby
('Use of Nested Quota Driver', _check_nested_quota),
)

5
cinder/exception.py

@ -527,11 +527,6 @@ class InvalidQuotaValue(Invalid):
"resources: %(unders)s")
class InvalidNestedQuotaSetup(CinderException):
message = _("Project quotas are not properly setup for nested quotas: "
"%(reason)s.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")

12
cinder/policies/quotas.py

@ -21,8 +21,6 @@ from cinder.policies import base
SHOW_POLICY = 'volume_extension:quotas:show'
UPDATE_POLICY = 'volume_extension:quotas:update'
DELETE_POLICY = 'volume_extension:quotas:delete'
VALIDATE_NESTED_QUOTA_POLICY = \
'volume_extension:quota_classes:validate_setup_for_nested_quota_use'
quota_policies = [
@ -64,16 +62,6 @@ quota_policies = [
'path': '/os-quota-sets/{project_id}'
}
]),
policy.DocumentedRuleDefault(
name=VALIDATE_NESTED_QUOTA_POLICY,
check_str=base.RULE_ADMIN_API,
description="Validate setup for nested quota.",
operations=[
{
'method': 'GET',
'path': '/os-quota-sets/validate_setup_for_nested_quota_use'
}
]),
]

212
cinder/quota.py

@ -16,7 +16,6 @@
"""Quotas for volumes."""
from collections import deque
import datetime
from oslo_config import cfg
@ -30,7 +29,6 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import quota_utils
LOG = logging.getLogger(__name__)
@ -452,212 +450,6 @@ class DbQuotaDriver(object):
db.reservation_expire(context)
class NestedDbQuotaDriver(DbQuotaDriver):
def __init__(self, *args, **kwargs):
super(NestedDbQuotaDriver, self).__init__(*args, **kwargs)
LOG.warning('The NestedDbQuotaDriver is deprecated and will be '
'removed in the "U" release.')
def validate_nested_setup(self, ctxt, resources, project_tree,
fix_allocated_quotas=False):
"""Ensures project_tree has quotas that make sense as nested quotas.
Validates the following:
* No parent project has child_projects who have more combined quota
than the parent's quota limit
* No child quota has a larger in-use value than it's current limit
(could happen before because child default values weren't enforced)
* All parent projects' "allocated" quotas match the sum of the limits
of its children projects
TODO(mc_nair): need a better way to "flip the switch" to use nested
quotas to make this less race-ee
"""
self._allocated = {}
project_queue = deque(project_tree.items())
borked_allocated_quotas = {}
while project_queue:
# Tuple of (current root node, subtree)
cur_proj_id, project_subtree = project_queue.popleft()
# If we're on a leaf node, no need to do validation on it, and in
# order to avoid complication trying to get its children, skip it.
if not project_subtree:
continue
cur_project_quotas = self.get_project_quotas(
ctxt, resources, cur_proj_id)
# Validate each resource when compared to it's child quotas
for resource in cur_project_quotas:
parent_quota = cur_project_quotas[resource]
parent_limit = parent_quota['limit']
parent_usage = (parent_quota['in_use'] +
parent_quota['reserved'])
cur_parent_allocated = parent_quota.get('allocated', 0)
calc_parent_allocated = self._get_cur_project_allocated(
ctxt, resources[resource], {cur_proj_id: project_subtree})
if parent_limit > 0:
parent_free_quota = parent_limit - parent_usage
if parent_free_quota < calc_parent_allocated:
msg = _("Sum of child usage '%(sum)s' is greater "
"than free quota of '%(free)s' for project "
"'%(proj)s' for resource '%(res)s'. Please "
"lower the limit or usage for one or more of "
"the following projects: '%(child_ids)s'") % {
'sum': calc_parent_allocated,
'free': parent_free_quota,
'proj': cur_proj_id, 'res': resource,
'child_ids': ', '.join(project_subtree.keys())
}
raise exception.InvalidNestedQuotaSetup(reason=msg)
# If "allocated" value wasn't right either err or fix DB
if calc_parent_allocated != cur_parent_allocated:
if fix_allocated_quotas:
try:
db.quota_allocated_update(ctxt, cur_proj_id,
resource,
calc_parent_allocated)
except exception.ProjectQuotaNotFound:
# If it was default quota create DB entry for it
db.quota_create(
ctxt, cur_proj_id, resource,
parent_limit, allocated=calc_parent_allocated)
else:
if cur_proj_id not in borked_allocated_quotas:
borked_allocated_quotas[cur_proj_id] = {}
borked_allocated_quotas[cur_proj_id][resource] = {
'db_allocated_quota': cur_parent_allocated,
'expected_allocated_quota': calc_parent_allocated}
project_queue.extend(project_subtree.items())
if borked_allocated_quotas:
msg = _("Invalid allocated quotas defined for the following "
"project quotas: %s") % borked_allocated_quotas
raise exception.InvalidNestedQuotaSetup(message=msg)
def _get_cur_project_allocated(self, ctxt, resource, project_tree):
"""Recursively calculates the allocated value of a project
:param ctxt: context used to retrieve DB values
:param resource: the resource to calculate allocated value for
:param project_tree: the project tree used to calculate allocated
e.g. {'A': {'B': {'D': None}, 'C': None}}
A project's "allocated" value depends on:
1) the quota limits which have been "given" to it's children, in
the case those limits are not unlimited (-1)
2) the current quota being used by a child plus whatever the child
has given to it's children, in the case of unlimited (-1) limits
Scenario #2 requires recursively calculating allocated, and in order
to efficiently calculate things we will save off any previously
calculated allocated values.
NOTE: this currently leaves a race condition when a project's allocated
value has been calculated (with a -1 limit), but then a child project
gets a volume created, thus changing the in-use value and messing up
the child's allocated value. We should look into updating the allocated
values as we're going along and switching to NestedQuotaDriver with
flip of a switch.
"""
# Grab the current node
cur_project_id = list(project_tree)[0]
project_subtree = project_tree[cur_project_id]
res_name = resource.name
if cur_project_id not in self._allocated:
self._allocated[cur_project_id] = {}
if res_name not in self._allocated[cur_project_id]:
# Calculate the allocated value for this resource since haven't yet
cur_project_allocated = 0
child_proj_ids = project_subtree.keys() if project_subtree else {}
res_dict = {res_name: resource}
child_project_quotas = {child_id: self.get_project_quotas(
ctxt, res_dict, child_id) for child_id in child_proj_ids}
for child_id, child_quota in child_project_quotas.items():
child_limit = child_quota[res_name]['limit']
# Non-unlimited quota is easy, anything explicitly given to a
# child project gets added into allocated value
if child_limit != -1:
if child_quota[res_name].get('in_use', 0) > child_limit:
msg = _("Quota limit invalid for project '%(proj)s' "
"for resource '%(res)s': limit of %(limit)d "
"is less than in-use value of %(used)d") % {
'proj': child_id, 'res': res_name,
'limit': child_limit,
'used': child_quota[res_name]['in_use']
}
raise exception.InvalidNestedQuotaSetup(reason=msg)
cur_project_allocated += child_limit
# For -1, take any quota being eaten up by child, as well as
# what the child itself has given up to its children
else:
child_in_use = child_quota[res_name].get('in_use', 0)
# Recursively calculate child's allocated
child_alloc = self._get_cur_project_allocated(
ctxt, resource, {child_id: project_subtree[child_id]})
cur_project_allocated += child_in_use + child_alloc
self._allocated[cur_project_id][res_name] = cur_project_allocated
return self._allocated[cur_project_id][res_name]
def get_default(self, context, resource, project_id):
"""Get a specific default quota for a resource."""
resource = super(NestedDbQuotaDriver, self).get_default(
context, resource, project_id)
return 0 if quota_utils.get_parent_project_id(
context, project_id) else resource.default
def get_defaults(self, context, resources, project_id=None):
defaults = super(NestedDbQuotaDriver, self).get_defaults(
context, resources, project_id)
# All defaults are 0 for child project
if quota_utils.get_parent_project_id(context, project_id):
for key in defaults:
defaults[key] = 0
return defaults
def _reserve(self, context, resources, quotas, deltas, expire, project_id):
reserved = []
# As to not change the exception behavior, flag every res that would
# be over instead of failing on first OverQuota
resources_failed_to_update = []
failed_usages = {}
for res in deltas.keys():
try:
reserved += db.quota_reserve(
context, resources, quotas, {res: deltas[res]},
expire, CONF.until_refresh, CONF.max_age, project_id)
if quotas[res] == -1:
reserved += quota_utils.update_alloc_to_next_hard_limit(
context, resources, deltas, res, expire, project_id)
except exception.OverQuota as e:
resources_failed_to_update.append(res)
failed_usages.update(e.kwargs['usages'])
if resources_failed_to_update:
db.reservation_rollback(context, reserved, project_id)
# We change OverQuota to OverVolumeLimit in other places and expect
# to find all of the OverQuota kwargs
raise exception.OverQuota(overs=sorted(resources_failed_to_update),
quotas=quotas, usages=failed_usages)
return reserved
class BaseResource(object):
"""Describe a single resource for quota checking."""
@ -854,10 +646,6 @@ class QuotaEngine(object):
self._driver_class = self._quota_driver_class
return self._driver_class
def using_nested_quotas(self):
"""Returns true if nested quotas are being used"""
return isinstance(self._driver, NestedDbQuotaDriver)
def __contains__(self, resource):
return resource in self.resources

103
cinder/quota_utils.py

@ -15,13 +15,10 @@
from keystoneauth1 import identity
from keystoneauth1 import loading as ka_loading
from keystoneclient import client
from keystoneclient import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from cinder import db
from cinder import exception
from cinder.i18n import _
CONF = cfg.CONF
CONF.import_group('keystone_authtoken',
@ -96,9 +93,7 @@ def get_project_hierarchy(context, project_id, subtree_as_ids=False,
Along with hierarchical multitenancy in keystone API v3, projects can be
hierarchically organized. Therefore, we need to know the project
hierarchy, if any, in order to do nested quota operations properly.
If the domain is being used as the top most parent, it is filtered out from
the parent tree and parent_id.
hierarchy, if any, in order to do default volume type operations properly.
"""
keystone = _keystone_client(context)
generic_project = GenericProjectInfo(project_id, keystone.version)
@ -125,102 +120,6 @@ def get_project_hierarchy(context, project_id, subtree_as_ids=False,
return generic_project
def get_parent_project_id(context, project_id):
return get_project_hierarchy(context, project_id).parent_id
def get_all_projects(context):
# Right now this would have to be done as cloud admin with Keystone v3
return _keystone_client(context, (3, 0)).projects.list()
def get_all_root_project_ids(context):
project_list = get_all_projects(context)
# Find every project which does not have a parent, meaning it is the
# root of the tree
project_roots = [project.id for project in project_list
if not project.parent_id]
return project_roots
def update_alloc_to_next_hard_limit(context, resources, deltas, res,
expire, project_id):
from cinder import quota
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
reservations = []
projects = get_project_hierarchy(context, project_id,
parents_as_ids=True).parents
hard_limit_found = False
# Update allocated values up the chain til we hit a hard limit or run out
# of parents
while projects and not hard_limit_found:
cur_proj_id = list(projects)[0]
projects = projects[cur_proj_id]
if res == 'groups':
cur_quota_lim = GROUP_QUOTAS.get_by_project_or_default(
context, cur_proj_id, res)
else:
cur_quota_lim = QUOTAS.get_by_project_or_default(
context, cur_proj_id, res)
hard_limit_found = (cur_quota_lim != -1)
cur_quota = {res: cur_quota_lim}
cur_delta = {res: deltas[res]}
try:
reservations += db.quota_reserve(
context, resources, cur_quota, cur_delta, expire,
CONF.until_refresh, CONF.max_age, cur_proj_id,
is_allocated_reserve=True)
except exception.OverQuota:
db.reservation_rollback(context, reservations)
raise
return reservations
def validate_setup_for_nested_quota_use(ctxt, resources,
nested_quota_driver,
fix_allocated_quotas=False):
"""Validates the setup supports using nested quotas.
Ensures that Keystone v3 or greater is being used, that the current
user is of the cloud admin role, and that the existing quotas make sense to
nest in the current hierarchy (e.g. that no child quota would be larger
than it's parent).
:param resources: the quota resources to validate
:param nested_quota_driver: nested quota driver used to validate each tree
:param fix_allocated_quotas: if True, parent projects "allocated" total
will be calculated based on the existing child limits and the DB will
be updated. If False, an exception is raised reporting any parent
allocated quotas are currently incorrect.
"""
try:
project_roots = get_all_root_project_ids(ctxt)
# Now that we've got the roots of each tree, validate the trees
# to ensure that each is setup logically for nested quotas
for root in project_roots:
root_proj = get_project_hierarchy(ctxt, root,
subtree_as_ids=True)
nested_quota_driver.validate_nested_setup(
ctxt,
resources,
{root_proj.id: root_proj.subtree},
fix_allocated_quotas=fix_allocated_quotas
)
except exceptions.VersionNotAvailable:
msg = _("Keystone version 3 or greater must be used to get nested "
"quota support.")
raise exception.CinderException(message=msg)
except exceptions.Forbidden:
msg = _("Must run this command as cloud admin using "
"a Keystone policy.json which allows cloud "
"admin to list and get any project.")
raise exception.CinderException(message=msg)
def _keystone_client(context, version=(3, 0)):
"""Creates and returns an instance of a generic keystone client.

170
cinder/tests/functional/test_quotas.py

@ -1,170 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
from cinder import quota
from cinder.tests.functional.api import client
from cinder.tests.functional import functional_helpers
from cinder.volume import configuration
class NestedQuotasTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
def setUp(self):
super(NestedQuotasTest, self).setUp()
self.api.create_type(self._vol_type_name)
self._create_project_hierarchy()
# Need to mock out Keystone so the functional tests don't require other
# services
_keystone_client = mock.MagicMock()
_keystone_client.version = 'v3'
_keystone_client.projects.get.side_effect = self._get_project
_keystone_client_get = mock.patch(
'cinder.quota_utils._keystone_client',
lambda *args, **kwargs: _keystone_client)
_keystone_client_get.start()
self.addCleanup(_keystone_client_get.stop)
# The QUOTA engine in Cinder is a global variable that lazy loads the
# quota driver, so even if we change the config for the quota driver,
# we won't reliably change the driver being used (or change it back)
# unless the global variables get cleaned up, so using mock instead to
# simulate this change
nested_driver = quota.NestedDbQuotaDriver()
_driver_patcher = mock.patch(
'cinder.quota.QuotaEngine._driver', new=nested_driver)
_driver_patcher.start()
self.addCleanup(_driver_patcher.stop)
# Default to using the top parent in the hierarchy
self._update_project(self.A.id)
def _get_flags(self):
f = super(NestedQuotasTest, self)._get_flags()
f['volume_driver'] = (
{'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver',
'g': configuration.SHARED_CONF_GROUP})
f['default_volume_type'] = {'v': self._vol_type_name}
return f
# Currently we use 413 error for over quota
over_quota_exception = client.OpenStackApiException413
def _create_project_hierarchy(self):
r"""Sets up the nested hierarchy show below.
+-----------+
| A |
| / \ |
| B C |
| / |
| D |
+-----------+
"""
self.A = self.FakeProject()
self.B = self.FakeProject(parent_id=self.A.id)
self.C = self.FakeProject(parent_id=self.A.id)
self.D = self.FakeProject(parent_id=self.B.id)
self.B.subtree = {self.D.id: self.D.subtree}
self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree}
self.A.parents = None
self.B.parents = {self.A.id: None}
self.C.parents = {self.A.id: None}
self.D.parents = {self.B.id: self.B.parents}
# project_by_id attribute is used to recover a project based on its id.
self.project_by_id = {self.A.id: self.A, self.B.id: self.B,
self.C.id: self.C, self.D.id: self.D}
class FakeProject(object):
_dom_id = uuid.uuid4().hex
def __init__(self, parent_id=None):
self.id = uuid.uuid4().hex
self.parent_id = parent_id
self.domain_id = self._dom_id
self.subtree = None
self.parents = None
def _get_project(self, project_id, *args, **kwargs):
return self.project_by_id[project_id]
def _create_volume(self):
return self.api.post_volume({'volume': {'size': 1}})
def test_default_quotas_enforced(self):
# Should be able to create volume on parent project by default
created_vol = self._create_volume()
self._poll_volume_while(created_vol['id'], ['creating'], 'available')
self._update_project(self.B.id)
# Shouldn't be able to create volume on child project by default
self.assertRaises(self.over_quota_exception, self._create_volume)
def test_update_child_with_parent_default_quota(self):
# Make sure we can update to a reasonable value
self.api.quota_set(self.B.id, {'volumes': 5})
# Ensure that the update took and we can create a volume
self._poll_volume_while(
self._create_volume()['id'], ['creating'], 'available')
def test_quota_update_child_greater_than_parent(self):
self.assertRaises(
client.OpenStackApiException400,
self.api.quota_set, self.B.id, {'volumes': 11})
def test_child_soft_limit_propagates_to_parent(self):
self.api.quota_set(self.B.id, {'volumes': 0})
self.api.quota_set(self.D.id, {'volumes': -1})
self._update_project(self.D.id)
self.assertRaises(self.over_quota_exception, self._create_volume)
def test_child_quota_hard_limits_affects_parents_allocated(self):
self.api.quota_set(self.B.id, {'volumes': 5})
self.api.quota_set(self.C.id, {'volumes': 3})
alloc = self.api.quota_get(self.A.id)['volumes']['allocated']
self.assertEqual(8, alloc)
self.assertRaises(client.OpenStackApiException400,
self.api.quota_set, self.C.id, {'volumes': 6})
def _update_quota_and_def_type(self, project_id, quota):
self.api.quota_set(project_id, quota)
type_updates = {'%s_%s' % (key, self._vol_type_name): val for key, val
in quota.items() if key != 'per_volume_gigabytes'}
return self.api.quota_set(project_id, type_updates)
def test_grandchild_soft_limit_propagates_up(self):
quota = {'volumes': -1, 'gigabytes': -1, 'per_volume_gigabytes': -1}
self._update_quota_and_def_type(self.B.id, quota)
self._update_quota_and_def_type(self.D.id, quota)
self._update_project(self.D.id)
# Create two volumes in the grandchild project and ensure grandparent's
# allocated is updated accordingly
vol = self._create_volume()
self._create_volume()
self._update_project(self.A.id)
alloc = self.api.quota_get(self.A.id)['volumes']['allocated']
self.assertEqual(2, alloc)
alloc = self.api.quota_get(self.B.id)['volumes']['allocated']
self.assertEqual(2, alloc)
# Ensure delete reduces the quota
self._update_project(self.D.id)
self.api.delete_volume(vol['id'])
self._poll_volume_while(vol['id'], ['deleting'])
self._update_project(self.A.id)
alloc = self.api.quota_get(self.A.id)['volumes']['allocated']
self.assertEqual(1, alloc)
alloc = self.api.quota_get(self.B.id)['volumes']['allocated']
self.assertEqual(1, alloc)

817
cinder/tests/unit/api/contrib/test_quotas.py

@ -19,7 +19,6 @@
from unittest import mock
import uuid
import ddt
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import webob.exc
@ -28,7 +27,6 @@ from cinder.api.contrib import quotas
from cinder import context
from cinder import db
from cinder import exception
from cinder import quota
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import test_db_api
@ -99,57 +97,17 @@ class QuotaSetsControllerTestBase(test.TestCase):
self.req.environ['cinder.context'].is_admin = True
self.req.params = {}
self._create_project_hierarchy()
self.req.environ['cinder.context'].project_id = self.A.id
self.req.environ['cinder.context'].project_id = uuid.uuid4().hex
get_patcher = mock.patch('cinder.quota_utils.get_project_hierarchy',
self._get_project)
get_patcher.start()
self.addCleanup(get_patcher.stop)
def _list_projects(context):
return self.project_by_id.values()
list_patcher = mock.patch('cinder.quota_utils.get_all_projects',
_list_projects)
list_patcher.start()
self.addCleanup(list_patcher.stop)
self.auth_url = 'http://localhost:5000'
self.fixture = self.useFixture(config_fixture.Config(CONF))
self.fixture.config(auth_url=self.auth_url, group='keystone_authtoken')
def _create_project_hierarchy(self):
r"""Sets an environment used for nested quotas tests.
Create a project hierarchy such as follows:
+-----------+
| |
| A |
| / \ |
| B C |
| / |
| D |
+-----------+
"""
self.A = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
self.B = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.A.id)
self.C = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.A.id)
self.D = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.B.id)
# update projects subtrees
self.B.subtree = {self.D.id: self.D.subtree}
self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree}
self.A.parents = None
self.B.parents = {self.A.id: None}
self.C.parents = {self.A.id: None}
self.D.parents = {self.B.id: self.B.parents}
# project_by_id attribute is used to recover a project based on its id.
self.project_by_id = {self.A.id: self.A, self.B.id: self.B,
self.C.id: self.C, self.D.id: self.D}
def _get_project(self, context, id, subtree_as_ids=False,
parents_as_ids=False, is_admin_project=False):
return self.project_by_id.get(id, self.FakeProject())
@ -208,29 +166,6 @@ class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
result = self.controller.update(self.req, fake.PROJECT_ID, body=body)
self.assertDictEqual(body, result)
def test_update_subproject_not_in_hierarchy_non_nested(self):
# When not using nested quotas, the hierarchy should not be considered
# for an update
E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id)
E.subtree = {F.id: F.subtree}
self.project_by_id[E.id] = E
self.project_by_id[F.id] = F
# Update the project A quota.
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, backups=5, tenant_id=None)
result = self.controller.update(self.req, self.A.id, body=body)
self.assertDictEqual(body, result)
# Try to update the quota of F, it will be allowed even though
# project E doesn't belong to the project hierarchy of A, because
# we are NOT using the nested quota driver
self.req.environ['cinder.context'].project_id = self.A.id
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, backups=5, tenant_id=None)
self.controller.update(self.req, F.id, body=body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_string_length')
def test_update_limit(self, mock_validate):
@ -341,761 +276,31 @@ class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
self.assertDictEqual(result_show, result_show_after)
def test_delete_with_allocated_quota_different_from_zero(self):
self.req.environ['cinder.context'].project_id = self.A.id
project_id_1 = uuid.uuid4().hex
project_id_2 = uuid.uuid4().hex
self.req.environ['cinder.context'].project_id = project_id_1
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, backups=5,
backup_gigabytes=1000, tenant_id=None)
result_update = self.controller.update(self.req, self.A.id, body=body)
result_update = self.controller.update(self.req, project_id_1,
body=body)
self.assertDictEqual(body, result_update)
# Set usage param to True in order to see get allocated values.
self.req.params = {'usage': 'True'}
result_show = self.controller.show(self.req, self.A.id)
result_show = self.controller.show(self.req, project_id_1)
result_update = self.controller.update(self.req, self.B.id, body=body)
result_update = self.controller.update(self.req, project_id_2,
body=body)
self.assertDictEqual(body, result_update)
self.controller.delete(self.req, self.B.id)
self.controller.delete(self.req, project_id_2)
result_show_after = self.controller.show(self.req, self.A.id)
result_show_after = self.controller.show(self.req, project_id_1)
self.assertDictEqual(result_show, result_show_after)
def test_delete_no_admin(self):
self.req.environ['cinder.context'].is_admin = False
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, self.req, fake.PROJECT_ID)
def test_subproject_show_not_using_nested_quotas(self):
# Current roles say for non-nested quotas, an admin should be able to
# see anyones quota
self.req.environ['cinder.context'].project_id = self.B.id
self.controller.show(self.req, self.C.id)
self.controller.show(self.req, self.A.id)
@ddt.ddt
class QuotaSetControllerValidateNestedQuotaSetup(QuotaSetsControllerTestBase):
"""Validates the setup before using NestedQuota driver.
Test case validates flipping on NestedQuota driver after using the
non-nested quota driver for some time.
"""
def _create_project_hierarchy(self):
r"""Sets an environment used for nested quotas tests.
Create a project hierarchy such as follows:
+-----------------+
| |
| A G E |
| / \ \ |
| B C F |
| / |
| D |
+-----------------+
"""
super(QuotaSetControllerValidateNestedQuotaSetup,
self)._create_project_hierarchy()
# Project A, B, C, D are already defined by parent test class
self.E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
self.F = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.E.id)
self.G = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
self.E.subtree = {self.F.id: self.F.subtree}
self.project_by_id.update({self.E.id: self.E, self.F.id: self.F,
self.G.id: self.G})
@ddt.data({'param': None, 'result': False},
{'param': 'true', 'result': True},
{'param': 'false', 'result': False})
@ddt.unpack
def test_validate_setup_for_nested_quota_use_with_param(self, param,
result):
with mock.patch(
'cinder.quota_utils.validate_setup_for_nested_quota_use') as \
mock_quota_utils:
if param:
self.req.params['fix_allocated_quotas'] = param
self.controller.validate_setup_for_nested_quota_use(self.req)
mock_quota_utils.assert_called_once_with(
self.req.environ['cinder.context'],
mock.ANY, mock.ANY,
fix_allocated_quotas=result)
def test_validate_setup_for_nested_quota_use_with_invalid_param(self):
self.req.params['fix_allocated_quotas'] = 'non_boolean'
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.validate_setup_for_nested_quota_use,
self.req)
def test_validate_nested_quotas_no_in_use_vols(self):
# Update the project A quota.
self.req.environ['cinder.context'].project_id = self.A.id
quota = {'volumes': 5}
body = {'quota_set': quota}
self.controller.update(self.req, self.A.id, body=body)
quota['volumes'] = 3
self.controller.update(self.req, self.B.id, body=body)
# Allocated value for quota A is borked, because update was done
# without nested quota driver
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_setup_for_nested_quota_use,
self.req)
# Fix the allocated values in DB
self.req.params['fix_allocated_quotas'] = True
self.controller.validate_setup_for_nested_quota_use(
self.req)
self.req.params['fix_allocated_quotas'] = False
# Ensure that we've properly fixed the allocated quotas
self.controller.validate_setup_for_nested_quota_use(self.req)
# Over-allocate the quotas between children
self.controller.update(self.req, self.C.id, body=body)
# This is we should fail because the child limits are too big
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_setup_for_nested_quota_use,
self.req)
quota['volumes'] = 1
self.controller.update(self.req, self.C.id, body=body)
# Make sure we're validating all hierarchy trees
self.req.environ['cinder.context'].project_id = self.E.id
quota['volumes'] = 1
self.controller.update(self.req, self.E.id, body=body)
quota['volumes'] = 3
self.controller.update(self.req, self.F.id, body=body)
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.validate_setup_for_nested_quota_use,
self.req)
# Put quotas in a good state
quota['volumes'] = 1
self.controller.update(self.req, self.F.id, body=body)
self.req.params['fix_allocated_quotas'] = True
self.controller.validate_setup_for_nested_quota_use(self.req)
@mock.patch('cinder.db.quota_usage_get_all_by_project')
def test_validate_nested_quotas_in_use_vols(self, mock_usage):
self._create_fake_quota_usages(
{self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3,
self.E.id: 0, self.F.id: 0, self.G.id: 0})
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
# Update the project A quota.
self.req.environ['cinder.context'].project_id = self.A.id
quota_limit = {'volumes': 7}
body = {'quota_set': quota_limit}
self.controller.update(self.req, self.A.id, body=body)
quota_limit['volumes'] = 3
self.controller.update(self.req, self.B.id, body=body)
quota_limit['volumes'] = 3
self.controller.update(self.req, self.C.id, body=body)
self.req.params['fix_allocated_quotas'] = True
self.controller.validate_setup_for_nested_quota_use(self.req)
quota_limit['volumes'] = 6
self.controller.update(self.req, self.A.id, body=body)
# Should fail because the one in_use volume of 'A'
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.validate_setup_for_nested_quota_use,
self.req)
@mock.patch('cinder.db.quota_usage_get_all_by_project')
def test_validate_nested_quotas_quota_borked(self, mock_usage):