From 06e6542f153a1ba197361f3f8cb0010c7d2d4285 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 26 Apr 2021 07:35:15 -0700 Subject: [PATCH] Add unified quotas infrastructure This adds some infrastructure to be able to query and honor limits declared in keystone. It adds a single initial quota value for the total size of all active images for bootstrapping the tests. Checking these values is controlled by a new configuration option that globally enables and disables the checking, defaulting to False. Related to blueprint glance-unified-quotas Change-Id: I8d8f4aaed465486e80be85bc9a5d2c2be7f1ecad --- glance/common/config.py | 17 +++- glance/quota/keystone.py | 101 +++++++++++++++++++++++ glance/tests/unit/fixtures.py | 34 +++++++- glance/tests/unit/test_quota.py | 137 ++++++++++++++++++++++++++++++++ requirements.txt | 1 + 5 files changed, 288 insertions(+), 2 deletions(-) create mode 100644 glance/quota/keystone.py diff --git a/glance/common/config.py b/glance/common/config.py index 31deb0accd..401751712c 100644 --- a/glance/common/config.py +++ b/glance/common/config.py @@ -441,14 +441,29 @@ TeraBytes respectively. Note that there should not be any space between the value and unit. Value ``0`` signifies no quota enforcement. Negative values are invalid and result in errors. +This has no effect if ``use_keystone_limits`` is enabled. + Possible values: * A string that is a valid concatenation of a non-negative integer representing the storage value and an optional string literal representing storage units as mentioned above. Related options: - * None + * use_keystone_limits +""")), + cfg.BoolOpt('use_keystone_limits', default=False, + help=_(""" +Utilize per-tenant resource limits registered in Keystone. + +Enabling this feature will cause Glance to retrieve limits set in keystone +for resource consumption and enforce them against API users. Before turning +this on, the limits need to be registered in Keystone or all quotas will be +considered to be zero, and thus reject all new resource requests. + +These per-tenant resource limits are independent from the static +global ones configured in this config file. If this is enabled, the +relevant static global limits will be ignored. """)), cfg.HostAddressOpt('pydev_worker_debug_host', sample_default='localhost', diff --git a/glance/quota/keystone.py b/glance/quota/keystone.py new file mode 100644 index 0000000000..29ed3e74b1 --- /dev/null +++ b/glance/quota/keystone.py @@ -0,0 +1,101 @@ +# Copyright 2021 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_limit import exception as ol_exc +from oslo_limit import limit +from oslo_log import log as logging +from oslo_utils import units + +from glance.common import exception +from glance.db.sqlalchemy import api as db +from glance.i18n import _LE + +CONF = cfg.CONF +CONF.import_opt('use_keystone_limits', 'glance.common.config') +LOG = logging.getLogger(__name__) +limit.opts.register_opts(CONF) + +QUOTA_IMAGE_SIZE_TOTAL = 'image_size_total' + + +def _enforce_some(context, project_id, quota_value_fns, deltas): + """Helper method to enforce a set of quota values. + + :param context: The RequestContext + :param project_id: The project_id of the tenant being checked + :param get_value_fns: A mapping of quota names to functions that will be + called with no arguments to return the numerical + value representing current usage. + :param deltas: A mapping of quota names to the amount of resource being + requested for each (to be added to the current usage before + determining if over-quota). + :raises: exception.LimitExceeded if the current usage is over the defined + limit. + :returns: None if the tenant is not currently over their quota. + """ + if not CONF.use_keystone_limits: + return + + def callback(project_id, resource_names): + return {name: quota_value_fns[name]() + for name in resource_names} + + enforcer = limit.Enforcer(callback) + try: + enforcer.enforce(project_id, + {quota_name: deltas.get(quota_name, 0) + for quota_name in quota_value_fns}) + except ol_exc.ProjectOverLimit as e: + raise exception.LimitExceeded(body=str(e)) + except ol_exc.SessionInitError as e: + LOG.error(_LE('Failed to initialize oslo_limit, likely due to ' + 'incorrect or insufficient configuration: %(err)s'), + {'err': str(e)}) + # We could just raise LimitExceeded here, but a 500 is + # appropriate for incorrect server-side configuration, so we + # re-raise here after the above error message to make sure we + # are noticed. + raise + + +def _enforce_one(context, project_id, quota_name, get_value_fn, delta=0): + """Helper method to enforce a single named quota value. + + :param context: The RequestContext + :param project_id: The project_id of the tenant being checked + :param quota_name: One of the quota names defined above + :param get_value_fn: A function that will be called with no arguments to + return the numerical value representing current usage. + :param delta: The amount of resource being requested (to be added to the + current usage before determining if over-quota). + :raises: exception.LimitExceeded if the current usage is over the defined + limit. + :returns: None if the tenant is not currently over their quota. + """ + + return _enforce_some(context, project_id, + {quota_name: get_value_fn}, + {quota_name: delta}) + + +def enforce_image_size_total(context, project_id, delta=0): + """Enforce the image_size_total quota. + + This enforces the total image size quota for the supplied project_id. + """ + _enforce_one( + context, project_id, QUOTA_IMAGE_SIZE_TOTAL, + lambda: db.user_get_storage_usage(context, project_id) // units.Mi, + delta=delta) diff --git a/glance/tests/unit/fixtures.py b/glance/tests/unit/fixtures.py index e6e48c32c5..120660fbac 100644 --- a/glance/tests/unit/fixtures.py +++ b/glance/tests/unit/fixtures.py @@ -15,10 +15,13 @@ import logging as std_logging import os +from unittest import mock import warnings import fixtures as pyfixtures - +from openstack.identity.v3 import endpoint +from openstack.identity.v3 import limit as klimit +from oslo_limit import limit _TRUE_VALUES = ('True', 'true', '1', 'yes') @@ -136,3 +139,32 @@ class WarningsFixture(pyfixtures.Fixture): 'error', message="Property '.*' has moved to '.*'") self.addCleanup(warnings.resetwarnings) + + +class KeystoneQuotaFixture(pyfixtures.Fixture): + def __init__(self, **defaults): + self.defaults = defaults + + def setUp(self): + super(KeystoneQuotaFixture, self).setUp() + + self.mock_conn = mock.MagicMock() + limit._SDK_CONNECTION = self.mock_conn + + mock_gem = self.useFixture( + pyfixtures.MockPatch('oslo_limit.limit.Enforcer.' + '_get_enforcement_model')).mock + mock_gem.return_value = 'flat' + + fake_endpoint = endpoint.Endpoint() + fake_endpoint.service_id = "service_id" + fake_endpoint.region_id = "region_id" + self.mock_conn.get_endpoint.return_value = fake_endpoint + + def fake_limits(service_id, region_id, resource_name, project_id): + this_limit = klimit.Limit() + this_limit.resource_name = resource_name + this_limit.resource_limit = self.defaults[resource_name] + return iter([this_limit]) + + self.mock_conn.limits.side_effect = fake_limits diff --git a/glance/tests/unit/test_quota.py b/glance/tests/unit/test_quota.py index 04da4943ba..3420c546df 100644 --- a/glance/tests/unit/test_quota.py +++ b/glance/tests/unit/test_quota.py @@ -13,10 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. import copy +import fixtures from unittest import mock from unittest.mock import patch import uuid +from oslo_limit import exception as ol_exc from oslo_utils import encodeutils from oslo_utils import units @@ -26,6 +28,8 @@ from six.moves import range from glance.common import exception from glance.common import store_utils import glance.quota +from glance.quota import keystone as ks_quota +from glance.tests.unit import fixtures as glance_fixtures from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils @@ -735,3 +739,136 @@ class TestImageLocationQuotas(test_utils.BaseTestCase): self.config(image_location_quota=0) self.image.locations.remove(location1) self.assertEqual(0, len(self.image.locations)) + + +class TestImageKeystoneQuota(test_utils.BaseTestCase): + def setUp(self): + super(TestImageKeystoneQuota, self).setUp() + + default_limits = { + ks_quota.QUOTA_IMAGE_SIZE_TOTAL: 500, + 'another_limit': 2, + } + ksqf = glance_fixtures.KeystoneQuotaFixture(**default_limits) + + self.useFixture(ksqf) + + self.db_api = unit_test_utils.FakeDB() + self.useFixture(fixtures.MockPatch('glance.quota.keystone.db', + self.db_api)) + + def _create_fake_image(self, context, size): + location_count = 2 + locations = [] + for i in range(location_count): + locations.append({'url': 'file:///g/there/it/is%d' % i, + 'status': 'active', + 'metadata': {}}) + image_values = {'id': str(uuid.uuid4()), 'owner': context.owner, + 'status': 'active', 'size': size * units.Mi, + 'locations': locations} + self.db_api.image_create(context, image_values) + + def test_enforce_overquota(self): + # Check that a single large image with multiple locations will + # trip the quota check. + self.config(use_keystone_limits=True) + context = FakeContext() + self._create_fake_image(context, 300) + exc = self.assertRaises(exception.LimitExceeded, + ks_quota.enforce_image_size_total, + context, context.owner) + self.assertIn('image_size_total is over limit of 500', str(exc)) + + def test_enforce_overquota_with_delta(self): + # Check that delta is honored, if used. + self.config(use_keystone_limits=True) + context = FakeContext() + self._create_fake_image(context, 200) + ks_quota.enforce_image_size_total(context, context.owner) + ks_quota.enforce_image_size_total(context, context.owner, + delta=50) + self.assertRaises(exception.LimitExceeded, + ks_quota.enforce_image_size_total, + context, context.owner, delta=200) + + def test_enforce_overquota_disabled(self): + # Just like the overquota case above, but without being enabled, + # so no failure + self.config(use_keystone_limits=False) + context = FakeContext() + self._create_fake_image(context, 300) + # Does not raise because keystone limits are disabled + ks_quota.enforce_image_size_total(context, context.owner) + + def test_enforce_overquota_multiple(self): + # Check that multiple images with a combined amount + # (2*2*150=600) over the quota will trip the quota check. + self.config(use_keystone_limits=True) + context = FakeContext() + self._create_fake_image(context, 150) + self._create_fake_image(context, 150) + exc = self.assertRaises(exception.LimitExceeded, + ks_quota.enforce_image_size_total, + context, context.owner) + self.assertIn('image_size_total is over limit of 500', str(exc)) + + def test_enforce_underquota(self): + self.config(use_keystone_limits=True) + context = FakeContext() + self._create_fake_image(context, 100) + # We are under quota, so no exception expected + ks_quota.enforce_image_size_total(context, context.owner) + + def test_enforce_underquota_with_others_over_quota(self): + self.config(use_keystone_limits=True) + # Put the first tenant over quota + context = FakeContext() + self._create_fake_image(context, 300) + self._create_fake_image(context, 300) + + # Create an image for another tenant that is not over quota + other_context = FakeContext() + other_context.owner = 'someone_else' + self._create_fake_image(other_context, 100) + # This tenant should pass the quota check, because it is under quota, + # even though the other is over. + ks_quota.enforce_image_size_total(other_context, other_context.owner) + + def test_enforce_multiple_limits_under_quota(self): + self.config(use_keystone_limits=True) + context = FakeContext() + # Make sure that we can call the multi-limit handler and pass when + # we are under quota. + ks_quota._enforce_some(context, context.owner, + {ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200, + 'another_limit': lambda: 1}, + {'another_limit': 1}) + + def test_enforce_multiple_limits_over_quota(self): + self.config(use_keystone_limits=True) + context = FakeContext() + # Make sure that even if one of a multi-limit call is over + # quota, we get the exception. + self.assertRaises(exception.LimitExceeded, + ks_quota._enforce_some, + context, context.owner, + {ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200, + 'another_limit': lambda: 1}, + {'another_limit': 5}) + + @mock.patch('oslo_limit.limit.Enforcer') + @mock.patch.object(ks_quota, 'LOG') + def test_oslo_limit_config_fail(self, mock_LOG, mock_enforcer): + self.config(use_keystone_limits=True) + mock_enforcer.return_value.enforce.side_effect = ( + ol_exc.SessionInitError('test')) + context = FakeContext() + self._create_fake_image(context, 100) + self.assertRaises(ol_exc.SessionInitError, + ks_quota.enforce_image_size_total, + context, context.owner) + mock_LOG.error.assert_called_once_with( + 'Failed to initialize oslo_limit, likely due to ' + 'incorrect or insufficient configuration: %(err)s', + {'err': "Can't initialise OpenStackSDK session: test."}) diff --git a/requirements.txt b/requirements.txt index 0e43a7fb44..4b2ac1746f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,6 +38,7 @@ six>=1.11.0 # MIT oslo.db>=5.0.0 # Apache-2.0 oslo.i18n>=5.0.0 # Apache-2.0 +oslo.limit>=1.0.0 # Apache-2.0 oslo.log>=4.3.0 # Apache-2.0 oslo.messaging>=5.29.0,!=9.0.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0