Tell oslo.limit how to count nova resources

A follow on patch will use this code to enforce the limits, this patch
provides integration with oslo.limit and a new internal nova API that is
able to enforce those limits.

The first part is providing a callback for oslo.limit to be able to count
the resources being used. We only count resources grouped by project_id.

For counting servers, we make use of the instance mappings list in the
api database, just as the existing quota code does. While we do check to
ensure the queued for delete migration has been completed, we simply
error out if that is not the case, rather than attempting to fallback to
any other counting system. We hope one day we can count this in
placement using consumer records, or similar.

For counting all other resource usage, they must refer to some usage
relating to a resource class being consumed in placement. This is similar
to how the count with placement variant of the existing placement code
works today. This is not restricted to RAM and VCPU, it is open to any
resource class that is known to placement.

The second part is the enforcement method, that keeps a similar
signature to the existing enforce_num_instnaces call that is use to
check quotas using the legacy quota system.

From the flavor we extract the current resource usage. This is
considered the simplest first step that helps us deliver Ironic limits
alongside all the existing RAM and VCPU limits. At a later date, we
would ideally get passed a more complete view of what resources are
being requested from placement.

NOTE: given the instance object doesn't exist when enforce is called, we
can't just pass the instance into here.

A [workarounds] option is also available for operators who need the
legacy quota usage behavior where VCPU = VCPU + PCPU.

blueprint unified-limits-nova

Change-Id: I272b59b7bc8975bfd602640789f80d2d5f7ee698
changes/01/713301/35
John Garbutt 4 years ago committed by melanie witt
parent c384824683
commit d984a6d886

@ -383,6 +383,24 @@ facilitate a Fast-Forward upgrade where new control services are being started
before compute nodes have been able to update their service record. In an FFU,
the service records in the database will be more than one version old until
the compute nodes start up, but control services need to be online first.
"""),
cfg.BoolOpt('unified_limits_count_pcpu_as_vcpu',
default=False,
help="""
When using unified limits, use VCPU + PCPU for VCPU quota usage.
If the deployment is configured to use unified limits via
``[quota]driver=nova.quota.UnifiedLimitsDriver``, by default VCPU resources are
counted independently from PCPU resources, consistent with how they are
represented in the placement service.
Legacy quota behavior counts PCPU as VCPU and returns the sum of VCPU + PCPU
usage as the usage count for VCPU. Operators relying on the aggregation of
VCPU and PCPU resource usage counts should set this option to True.
Related options:
* :oslo.config:option:`quota.driver`
"""),
]

@ -0,0 +1,168 @@
# Copyright 2022 StackHPC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
from oslo_limit import exception as limit_exceptions
from oslo_limit import limit
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.limit import utils as limit_utils
from nova import objects
from nova import quota
from nova.scheduler.client import report
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# Cache to avoid repopulating ksa state
PLACEMENT_CLIENT = None
def _get_placement_usages(context, project_id):
global PLACEMENT_CLIENT
if not PLACEMENT_CLIENT:
PLACEMENT_CLIENT = report.SchedulerReportClient()
return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)
def _get_usage(context, project_id, resource_names):
"""Called by oslo_limit's enforcer"""
if not limit_utils.use_unified_limits():
raise NotImplementedError("unified limits is disabled")
count_servers = False
resource_classes = []
for resource in resource_names:
if resource == "servers":
count_servers = True
continue
if not resource.startswith("class:"):
raise ValueError("Unknown resource type: %s" % resource)
# Temporarily strip resource class prefix as placement does not use it.
# Example: limit resource 'class:VCPU' will be returned as 'VCPU' from
# placement.
r_class = resource.lstrip("class:")
if r_class in orc.STANDARDS or orc.is_custom(r_class):
resource_classes.append(r_class)
else:
raise ValueError("Unknown resource class: %s" % r_class)
if not count_servers and len(resource_classes) == 0:
raise ValueError("no resources to check")
resource_counts = {}
if count_servers:
# TODO(melwitt): Change this to count servers from placement once nova
# is using placement consumer types and is able to differentiate
# between "instance" allocations vs "migration" allocations.
if not quota.is_qfd_populated(context):
LOG.error('Must migrate all instance mappings before using '
'unified limits')
raise ValueError("must first migrate instance mappings")
mappings = objects.InstanceMappingList.get_counts(context, project_id)
resource_counts['servers'] = mappings['project']['instances']
try:
usages = _get_placement_usages(context, project_id)
except exception.UsagesRetrievalFailed as e:
msg = ("Failed to retrieve usages from placement while enforcing "
"%s quota limits." % ", ".join(resource_names))
LOG.error(msg + " Error: " + str(e))
raise exception.UsagesRetrievalFailed(msg)
# Use legacy behavior VCPU = VCPU + PCPU if configured.
if CONF.workarounds.unified_limits_count_pcpu_as_vcpu:
# If PCPU is in resource_classes, that means it was specified in the
# flavor explicitly. In that case, we expect it to have its own limit
# registered and we should not fold it into VCPU.
if orc.PCPU in usages and orc.PCPU not in resource_classes:
usages[orc.VCPU] = (usages.get(orc.VCPU, 0) +
usages.get(orc.PCPU, 0))
for resource_class in resource_classes:
# Need to add back resource class prefix that was stripped earlier
resource_name = 'class:' + resource_class
# Placement doesn't know about classes with zero usage
# so default to zero to tell oslo.limit usage is zero
resource_counts[resource_name] = usages.get(resource_class, 0)
return resource_counts
def _get_deltas_by_flavor(flavor, is_bfv, count):
if flavor is None:
raise ValueError("flavor")
if count < 0:
raise ValueError("count")
# NOTE(johngarbutt): this skips bfv, port, and cyborg resources
# but it still gives us better checks than before unified limits
# We need an instance in the DB to use the current is_bfv logic
# which doesn't work well for instances that don't yet have a uuid
deltas_from_flavor = utils.resources_for_limits(flavor, is_bfv)
deltas = {"servers": count}
for resource, amount in deltas_from_flavor.items():
if amount != 0:
deltas["class:%s" % resource] = amount * count
return deltas
def _get_enforcer(context, project_id):
# NOTE(johngarbutt) should we move context arg into oslo.limit?
def callback(project_id, resource_names):
return _get_usage(context, project_id, resource_names)
return limit.Enforcer(callback)
def enforce_num_instances_and_flavor(context, project_id, flavor, is_bfvm,
min_count, max_count, enforcer=None):
"""Return max instances possible, else raise TooManyInstances exception."""
if not limit_utils.use_unified_limits():
return max_count
# Ensure the recursion will always complete
if min_count < 0 or min_count > max_count:
raise ValueError("invalid min_count")
if max_count < 0:
raise ValueError("invalid max_count")
deltas = _get_deltas_by_flavor(flavor, is_bfvm, max_count)
enforcer = _get_enforcer(context, project_id)
try:
enforcer.enforce(project_id, deltas)
except limit_exceptions.ProjectOverLimit as e:
# NOTE(johngarbutt) we can do better, but this is very simple
LOG.debug("Limit check failed with count %s retrying with count %s",
max_count, max_count - 1)
try:
return enforce_num_instances_and_flavor(context, project_id,
flavor, is_bfvm, min_count,
max_count - 1,
enforcer=enforcer)
except ValueError:
# Copy the *original* exception message to a OverQuota to
# propagate to the API layer
raise exception.TooManyInstances(str(e))
# no problems with max_count, so we return max count
return max_count

@ -1223,6 +1223,17 @@ def _server_group_count_members_by_user_legacy(context, group, user_id):
return {'user': {'server_group_members': count}}
def is_qfd_populated(context):
global UID_QFD_POPULATED_CACHE_ALL
if not UID_QFD_POPULATED_CACHE_ALL:
LOG.debug('Checking whether user_id and queued_for_delete are '
'populated for all projects')
UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated(
context)
return UID_QFD_POPULATED_CACHE_ALL
def _server_group_count_members_by_user(context, group, user_id):
"""Get the count of server group members for a group by user.
@ -1240,14 +1251,7 @@ def _server_group_count_members_by_user(context, group, user_id):
# So, we check whether user_id/queued_for_delete is populated for all
# records and cache the result to prevent unnecessary checking once the
# data migration has been completed.
global UID_QFD_POPULATED_CACHE_ALL
if not UID_QFD_POPULATED_CACHE_ALL:
LOG.debug('Checking whether user_id and queued_for_delete are '
'populated for all projects')
UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated(
context)
if UID_QFD_POPULATED_CACHE_ALL:
if is_qfd_populated(context):
count = objects.InstanceMappingList.get_count_by_uuids_and_user(
context, group.members, user_id)
return {'user': {'server_group_members': count}}

@ -2486,6 +2486,30 @@ class SchedulerReportClient(object):
return self.get(url, version=GET_USAGES_VERSION,
global_request_id=context.global_id)
def get_usages_counts_for_limits(self, context, project_id):
"""Get the usages counts for the purpose of enforcing unified limits
The response from placement will not contain a resource class if
there is no usage. i.e. if there is no usage, you get an empty dict.
Note resources are counted as placement sees them, as such note
that VCPUs and PCPUs will be counted independently.
:param context: The request context
:param project_id: The project_id to count across
:return: A dict containing the project-scoped counts, for example:
{'VCPU': 2, 'MEMORY_MB': 1024}
:raises: `exception.UsagesRetrievalFailed` if a placement API call
fails
"""
LOG.debug('Getting usages for project_id %s from placement',
project_id)
resp = self._get_usages(context, project_id)
if resp:
data = resp.json()
return data['usages']
self._handle_usages_error_from_placement(resp, project_id)
def get_usages_counts_for_quota(self, context, project_id, user_id=None):
"""Get the usages counts for the purpose of counting quota usage.

@ -615,6 +615,10 @@ def resources_from_flavor(instance, flavor):
"""
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
instance)
return _get_resources(flavor, is_bfv)
def _get_resources(flavor, is_bfv):
# create a fake RequestSpec as a wrapper to the caller
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=is_bfv)
@ -628,6 +632,11 @@ def resources_from_flavor(instance, flavor):
return res_req.merged_resources()
def resources_for_limits(flavor, is_bfv):
"""Work out what unified limits may be exceeded."""
return _get_resources(flavor, is_bfv)
def resources_from_request_spec(ctxt, spec_obj, host_manager,
enable_pinning_translate=True):
"""Given a RequestSpec object, returns a ResourceRequest of the resources,

@ -0,0 +1,311 @@
# Copyright 2022 StackHPC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_limit import exception as limit_exceptions
from oslo_limit import limit
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova.limit import placement as placement_limits
from nova.limit import utils as limit_utils
from nova import objects
from nova import quota
from nova.scheduler.client import report
from nova import test
CONF = cfg.CONF
class TestGetUsage(test.NoDBTestCase):
def setUp(self):
super(TestGetUsage, self).setUp()
self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
self.context = context.RequestContext()
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
"get_usages_counts_for_limits")
def test_get_usage(self, mock_placement, mock_inst, mock_qfd):
resources = ["servers", "class:VCPU", "class:MEMORY_MB",
"class:CUSTOM_BAREMETAL"]
mock_qfd.return_value = True
mock_placement.return_value = {"VCPU": 1, "CUSTOM_BAREMETAL": 2}
mock_inst.return_value = {"project": {"instances": 42}}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
'class:CUSTOM_BAREMETAL': 2}
self.assertDictEqual(expected, usage)
def test_get_usage_bad_resources(self):
bad_resource = ["unknown_resource"]
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, bad_resource)
bad_class = ["class:UNKNOWN_CLASS"]
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, bad_class)
no_resources = []
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, no_resources)
@mock.patch.object(quota, "is_qfd_populated")
def test_get_usage_bad_qfd(self, mock_qfd):
mock_qfd.return_value = False
resources = ["servers"]
e = self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, resources)
self.assertEqual("must first migrate instance mappings", str(e))
def test_get_usage_unified_limits_disabled(self):
self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
e = self.assertRaises(NotImplementedError, placement_limits._get_usage,
self.context, uuids.project, [])
self.assertEqual("unified limits is disabled", str(e))
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
'get_usages_counts_for_limits')
def test_get_usage_placement_fail(self, mock_placement, mock_inst,
mock_qfd):
resources = ["servers", "class:VCPU", "class:MEMORY_MB",
"class:CUSTOM_BAREMETAL"]
mock_qfd.return_value = True
mock_placement.side_effect = exception.UsagesRetrievalFailed(
project_id=uuids.project, user_id=uuids.user)
mock_inst.return_value = {"project": {"instances": 42}}
e = self.assertRaises(
exception.UsagesRetrievalFailed, placement_limits._get_usage,
self.context, uuids.project, resources)
expected = ("Failed to retrieve usages from placement while enforcing "
"%s quota limits." % ", ".join(resources))
self.assertEqual(expected, str(e))
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
"get_usages_counts_for_limits")
def test_get_usage_pcpu_as_vcpu(self, mock_placement, mock_inst, mock_qfd):
# Test that when configured, PCPU count is merged into VCPU count when
# appropriate.
self.flags(unified_limits_count_pcpu_as_vcpu=True, group="workarounds")
mock_qfd.return_value = True
mock_inst.return_value = {"project": {"instances": 42}}
# PCPU was not specified in the flavor but usage was found in
# placement. PCPU count should be merged into VCPU count.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 3, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was not specified in the flavor and usage was found in placement
# and there was no VCPU usage in placement. The PCPU count should be
# returned as VCPU count.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"PCPU": 1}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was not specified in the flavor but only VCPU usage was found in
# placement.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"VCPU": 1}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was specified in the flavor, so the counts should be separate.
resources = ["servers", "class:VCPU", "class:MEMORY_MB", "class:PCPU"]
mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
'class:PCPU': 2}
self.assertDictEqual(expected, usage)
class TestGetDeltas(test.NoDBTestCase):
def test_get_deltas(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, False, 2)
expected = {'servers': 2,
'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 14}
self.assertDictEqual(expected, deltas)
def test_get_deltas_recheck(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, False, 0)
expected = {'servers': 0,
'class:VCPU': 0, 'class:MEMORY_MB': 0,
'class:DISK_GB': 0}
self.assertDictEqual(expected, deltas)
def test_get_deltas_check_baremetal(self):
extra_specs = {"resources:VCPU": 0, "resources:MEMORY_MB": 0,
"resources:DISK_GB": 0, "resources:CUSTOM_BAREMETAL": 1}
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5,
extra_specs=extra_specs)
deltas = placement_limits._get_deltas_by_flavor(flavor, True, 1)
expected = {'servers': 1, 'class:CUSTOM_BAREMETAL': 1}
self.assertDictEqual(expected, deltas)
def test_get_deltas_check_bfv(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, True, 2)
expected = {'servers': 2,
'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 4}
self.assertDictEqual(expected, deltas)
class TestEnforce(test.NoDBTestCase):
def setUp(self):
super(TestEnforce, self).setUp()
self.context = context.RequestContext()
self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
placement_limits._ENFORCER = mock.Mock(limit.Enforcer)
self.flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
def test_enforce_num_instances_and_flavor_disabled(self):
self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, "flavor", False, 0, 42)
self.assertEqual(42, count)
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, False, 0, 2)
self.assertEqual(2, count)
mock_limit.assert_called_once_with(mock.ANY)
mock_enforcer.enforce.assert_called_once_with(
uuids.project_id,
{'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 14})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_recheck(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, False, 0, 0)
self.assertEqual(0, count)
mock_limit.assert_called_once_with(mock.ANY)
mock_enforcer.enforce.assert_called_once_with(
uuids.project_id,
{'servers': 0, 'class:VCPU': 0, 'class:MEMORY_MB': 0,
'class:DISK_GB': 0})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_retry(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
over_limit_info_list = [
limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 30)
]
mock_enforcer.enforce.side_effect = [
limit_exceptions.ProjectOverLimit(
uuids.project_id, over_limit_info_list),
None]
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, True, 0, 3)
self.assertEqual(2, count)
self.assertEqual(2, mock_enforcer.enforce.call_count)
mock_enforcer.enforce.assert_called_with(
uuids.project_id,
{'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 4})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_fails(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
over_limit_info_list = [
limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 20),
limit_exceptions.OverLimitInfo("servers", 2, 1, 2)
]
expected = limit_exceptions.ProjectOverLimit(uuids.project_id,
over_limit_info_list)
mock_enforcer.enforce.side_effect = expected
# Verify that the oslo.limit ProjectOverLimit gets translated to a
# TooManyInstances that the API knows how to handle
e = self.assertRaises(
exception.TooManyInstances,
placement_limits.enforce_num_instances_and_flavor, self.context,
uuids.project_id, self.flavor, True, 2, 4)
self.assertEqual(str(expected), str(e))
self.assertEqual(3, mock_enforcer.enforce.call_count)
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_placement_fail(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
mock_enforcer.enforce.side_effect = exception.UsagesRetrievalFailed(
'Failed to retrieve usages')
e = self.assertRaises(
exception.UsagesRetrievalFailed,
placement_limits.enforce_num_instances_and_flavor, self.context,
uuids.project, self.flavor, True, 0, 5)
expected = str(mock_enforcer.enforce.side_effect)
self.assertEqual(expected, str(e))

@ -4637,3 +4637,31 @@ class TestUsages(SchedulerReportClientTestCase):
expected = {'project': {'cores': 4, 'ram': 0},
'user': {'cores': 4, 'ram': 0}}
self.assertDictEqual(expected, counts)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_counts_for_limits(self, mock_get):
fake_responses = fake_requests.FakeResponse(
200,
content=jsonutils.dumps({'usages': {orc.VCPU: 2, orc.PCPU: 2}}))
mock_get.return_value = fake_responses
counts = self.client.get_usages_counts_for_limits(
self.context, 'fake-project')
expected = {orc.VCPU: 2, orc.PCPU: 2}
self.assertDictEqual(expected, counts)
self.assertEqual(1, mock_get.call_count)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_counts_for_limits_fails(self, mock_get):
fake_failure_response = fake_requests.FakeResponse(500)
mock_get.side_effect = [ks_exc.ConnectFailure, fake_failure_response]
e = self.assertRaises(exception.UsagesRetrievalFailed,
self.client.get_usages_counts_for_limits,
self.context, 'fake-project')
expected = "Failed to retrieve usages for project 'fake-project' " \
"and user 'N/A'."
self.assertEqual(expected, str(e))
self.assertEqual(2, mock_get.call_count)

Loading…
Cancel
Save