Merge "Follow up for unified limits"

This commit is contained in:
Zuul 2022-03-09 10:33:14 +00:00 committed by Gerrit Code Review
commit d1b036fdb1
5 changed files with 55 additions and 13 deletions

View File

@ -1,6 +1,7 @@
nova/compute/manager.py
nova/crypto.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
nova/pci
nova/privsep/path.py

View File

@ -20,7 +20,6 @@ from oslo_limit import limit
from oslo_log import log as logging
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.limit import utils as nova_limit_utils
from nova import objects
@ -80,7 +79,9 @@ LEGACY_LIMITS = {
}
def get_in_use(context, project_id):
def get_in_use(
context: 'nova.context.RequestContext', project_id: str
) -> ty.Dict[str, int]:
"""Returns in use counts for each resource, for given project.
This sounds simple but many resources can't be counted per project,
@ -144,7 +145,7 @@ def enforce_api_limit(entity_type: str, count: int) -> None:
def enforce_db_limit(
context: nova_context.RequestContext,
context: 'nova.context.RequestContext',
entity_type: str,
entity_scope: ty.Any,
delta: int
@ -192,7 +193,9 @@ def enforce_db_limit(
raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
def _convert_keys_to_legacy_name(new_dict):
def _convert_keys_to_legacy_name(
new_dict: ty.Dict[str, int]
) -> ty.Dict[str, int]:
legacy = {}
for new_name, old_name in LEGACY_LIMITS.items():
# defensive incase oslo or keystone doesn't give us an answer
@ -200,7 +203,7 @@ def _convert_keys_to_legacy_name(new_dict):
return legacy
def get_legacy_default_limits():
def get_legacy_default_limits() -> ty.Dict[str, int]:
# TODO(johngarbutt): need oslo.limit API for this, it should do caching
enforcer = limit.Enforcer(lambda: None)
new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import typing as ty
import os_resource_classes as orc
from oslo_limit import exception as limit_exceptions
@ -39,17 +40,23 @@ LEGACY_LIMITS = {
}
def _get_placement_usages(context, project_id):
def _get_placement_usages(
context: 'nova.context.RequestContext', project_id: str
) -> ty.Dict[str, int]:
global PLACEMENT_CLIENT
if not PLACEMENT_CLIENT:
PLACEMENT_CLIENT = report.SchedulerReportClient()
return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)
def _get_usage(context, project_id, resource_names):
def _get_usage(
context: 'nova.context.RequestContext',
project_id: str,
resource_names: ty.List[str],
) -> ty.Dict[str, int]:
"""Called by oslo_limit's enforcer"""
if not limit_utils.use_unified_limits():
raise NotImplementedError("unified limits is disabled")
raise NotImplementedError("Unified limits support is disabled")
count_servers = False
resource_classes = []
@ -113,7 +120,9 @@ def _get_usage(context, project_id, resource_names):
return resource_counts
def _get_deltas_by_flavor(flavor, is_bfv, count):
def _get_deltas_by_flavor(
flavor: 'objects.Flavor', is_bfv: bool, count: int
) -> ty.Dict[str, int]:
if flavor is None:
raise ValueError("flavor")
if count < 0:
@ -132,7 +141,9 @@ def _get_deltas_by_flavor(flavor, is_bfv, count):
return deltas
def _get_enforcer(context, project_id):
def _get_enforcer(
context: 'nova.context.RequestContext', project_id: str
) -> limit.Enforcer:
# NOTE(johngarbutt) should we move context arg into oslo.limit?
def callback(project_id, resource_names):
return _get_usage(context, project_id, resource_names)
@ -140,8 +151,15 @@ def _get_enforcer(context, project_id):
return limit.Enforcer(callback)
def enforce_num_instances_and_flavor(context, project_id, flavor, is_bfvm,
min_count, max_count, enforcer=None):
def enforce_num_instances_and_flavor(
context: 'nova.context.RequestContext',
project_id: str,
flavor: 'objects.Flavor',
is_bfvm: bool,
min_count: int,
max_count: int,
enforcer: ty.Optional[limit.Enforcer] = None
) -> int:
"""Return max instances possible, else raise TooManyInstances exception."""
if not limit_utils.use_unified_limits():
return max_count

View File

@ -1243,6 +1243,26 @@ def _server_group_count_members_by_user_legacy(context, group, user_id):
def is_qfd_populated(context):
"""Check if user_id and queued_for_delete fields are populated.
This method is related to counting quota usage from placement. It is not
yet possible to count instances from placement, so in the meantime we can
use instance mappings for counting. This method is used to determine
whether the user_id and queued_for_delete columns are populated in the API
database's instance_mappings table. Instance mapping records are not
deleted from the database until the database is archived, so
queued_for_delete tells us whether or not we should count them for instance
quota usage. The user_id field enables us to scope instance quota usage to
a user (legacy quota).
Scoping instance quota to a user is only possible
when counting quota usage from placement is configured and unified limits
is not configured. When unified limits is configured, quotas are scoped
only to projects.
In the future when it is possible to count instance usage from placement,
this method will no longer be needed.
"""
global UID_QFD_POPULATED_CACHE_ALL
if not UID_QFD_POPULATED_CACHE_ALL:
LOG.debug('Checking whether user_id and queued_for_delete are '

View File

@ -79,7 +79,7 @@ class TestGetUsage(test.NoDBTestCase):
self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
e = self.assertRaises(NotImplementedError, placement_limits._get_usage,
self.context, uuids.project, [])
self.assertEqual("unified limits is disabled", str(e))
self.assertEqual("Unified limits support is disabled", str(e))
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")