Merge "RequestWideParams and RequestWideSearchContext"

This commit is contained in:
Zuul 2019-06-26 18:43:14 +00:00 committed by Gerrit Code Review
commit 0e24af410a
7 changed files with 214 additions and 165 deletions

@ -270,21 +270,13 @@ def list_allocation_candidates(req):
get_schema = schema.GET_SCHEMA_1_16
util.validate_query_params(req, get_schema)
requests = lib.RequestGroup.dict_from_request(req)
limit = req.GET.getall('limit')
# JSONschema has already confirmed that limit has the form
# of an integer.
if limit:
limit = int(limit[0])
groups = lib.RequestGroup.dict_from_request(req)
rqparams = lib.RequestWideParams.from_request(req)
group_policy = req.GET.getall('group_policy') or None
# Schema ensures we get either "none" or "isolate"
if group_policy:
group_policy = group_policy[0]
else:
if not rqparams.group_policy:
# group_policy is required if more than one numbered request group was
# specified.
if len([rg for rg in requests.values() if rg.use_same_provider]) > 1:
if len([rg for rg in groups.values() if rg.use_same_provider]) > 1:
raise webob.exc.HTTPBadRequest(
'The "group_policy" parameter is required when specifying '
'more than one "resources{N}" parameter.')
@ -294,8 +286,7 @@ def list_allocation_candidates(req):
try:
cands = ac_obj.AllocationCandidates.get_by_requests(
context, requests, limit=limit, group_policy=group_policy,
nested_aware=nested_aware)
context, groups, rqparams, nested_aware=nested_aware)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
'Invalid resource class in resources parameter: %(error)s' %
@ -304,7 +295,7 @@ def list_allocation_candidates(req):
raise webob.exc.HTTPBadRequest(six.text_type(exc))
response = req.response
trx_cands = _transform_allocation_candidates(cands, requests, want_version)
trx_cands = _transform_allocation_candidates(cands, groups, want_version)
json_data = jsonutils.dumps(trx_cands)
response.body = encodeutils.to_utf8(json_data)
response.content_type = 'application/json'

@ -272,3 +272,45 @@ class RequestGroup(object):
cls._fix_forbidden(by_suffix)
return by_suffix
class RequestWideParams(object):
"""GET /allocation_candidates params that apply to the request as a whole.
This is in contrast with individual request groups (list of RequestGroup
above).
"""
def __init__(self, limit=None, group_policy=None):
"""Create a RequestWideParams.
:param limit: An integer, N, representing the maximum number of
allocation candidates to return. If
CONF.placement.randomize_allocation_candidates is True this
will be a random sampling of N of the available results. If
False then the first N results, in whatever order the database
picked them, will be returned. In either case if there are
fewer than N total results, all the results will be returned.
:param group_policy: String indicating how RequestGroups with
use_same_provider=True should interact with each other. If the
value is "isolate", we will filter out allocation requests
where any such RequestGroups are satisfied by the same RP.
"""
self.limit = limit
self.group_policy = group_policy
@classmethod
def from_request(cls, req):
limit = req.GET.getall('limit')
# JSONschema has already confirmed that limit has the form
# of an integer.
if limit:
limit = int(limit[0])
group_policy = req.GET.getall('group_policy') or None
# Schema ensures we get either "none" or "isolate"
if group_policy:
group_policy = group_policy[0]
return cls(
limit=limit,
group_policy=group_policy)

@ -13,7 +13,6 @@
import collections
import copy
import itertools
import random
import os_traits
from oslo_log import log as logging
@ -55,8 +54,7 @@ class AllocationCandidates(object):
self.provider_summaries = provider_summaries
@classmethod
def get_by_requests(cls, context, requests, limit=None, group_policy=None,
nested_aware=True):
def get_by_requests(cls, context, groups, rqparams, nested_aware=True):
"""Returns an AllocationCandidates object containing all resource
providers matching a set of supplied resource constraints, with a set
of allocation requests constructed from that list of resource
@ -64,21 +62,9 @@ class AllocationCandidates(object):
contex.config) is True (default is False) then the order of the
allocation requests will be randomized.
:param context: Nova RequestContext.
:param requests: Dict, keyed by suffix, of placement.lib.RequestGroup
:param limit: An integer, N, representing the maximum number of
allocation candidates to return. If
CONF.placement.randomize_allocation_candidates is True
this will be a random sampling of N of the available
results. If False then the first N results, in whatever
order the database picked them, will be returned. In
either case if there are fewer than N total results,
all the results will be returned.
:param group_policy: String indicating how RequestGroups with
use_same_provider=True should interact with each
other. If the value is "isolate", we will filter
out allocation requests where any such
RequestGroups are satisfied by the same RP.
:param context: placement.context.RequestContext object.
:param groups: Dict, keyed by suffix, of placement.lib.RequestGroup
:param rqparams: A RequestWideParams.
:param nested_aware: If False, we are blind to nested architecture and
can't pick resources from multiple providers even
if they come from the same tree.
@ -87,8 +73,7 @@ class AllocationCandidates(object):
according to `limit`.
"""
alloc_reqs, provider_summaries = cls._get_by_requests(
context, requests, limit=limit, group_policy=group_policy,
nested_aware=nested_aware)
context, groups, rqparams, nested_aware=nested_aware)
return cls(
allocation_requests=alloc_reqs,
provider_summaries=provider_summaries,
@ -133,31 +118,31 @@ class AllocationCandidates(object):
@classmethod
@db_api.placement_context_manager.reader
def _get_by_requests(cls, context, requests, limit=None,
group_policy=None, nested_aware=True):
has_trees = res_ctx.has_provider_trees(context)
def _get_by_requests(cls, context, groups, rqparams, nested_aware=True):
rw_ctx = res_ctx.RequestWideSearchContext(
context, rqparams, nested_aware)
sharing = res_ctx.get_sharing_providers(context)
candidates = {}
for suffix, request in requests.items():
for suffix, group in groups.items():
try:
rg_ctx = res_ctx.RequestGroupSearchContext(
context, request, has_trees, sharing, suffix)
context, group, rw_ctx.has_trees, sharing, suffix)
except exception.ResourceProviderNotFound:
return [], []
alloc_reqs, summaries = cls._get_by_one_request(rg_ctx)
LOG.debug("%s (suffix '%s') returned %d matches",
str(request), str(suffix), len(alloc_reqs))
str(group), str(suffix), len(alloc_reqs))
if not alloc_reqs:
# Shortcut: If any one request resulted in no candidates, the
# Shortcut: If any one group resulted in no candidates, the
# whole operation is shot.
return [], []
# Mark each allocation request according to whether its
# corresponding RequestGroup required it to be restricted to a
# single provider. We'll need this later to evaluate group_policy.
for areq in alloc_reqs:
areq.use_same_provider = request.use_same_provider
areq.use_same_provider = group.use_same_provider
candidates[suffix] = alloc_reqs, summaries
# At this point, each (alloc_requests, summary_obj) in `candidates` is
@ -166,49 +151,12 @@ class AllocationCandidates(object):
# `candidates` dict is guaranteed to contain entries for all suffixes,
# or we would have short-circuited above.
alloc_request_objs, summary_objs = _merge_candidates(
candidates, group_policy=group_policy)
candidates, rw_ctx)
if not nested_aware and has_trees:
alloc_request_objs, summary_objs = _exclude_nested_providers(
alloc_request_objs, summary_objs)
alloc_request_objs, summary_objs = rw_ctx.exclude_nested_providers(
alloc_request_objs, summary_objs)
return cls._limit_results(context, alloc_request_objs, summary_objs,
limit)
@staticmethod
def _limit_results(context, alloc_request_objs, summary_objs, limit):
# Limit the number of allocation request objects. We do this after
# creating all of them so that we can do a random slice without
# needing to mess with the complex sql above or add additional
# columns to the DB.
if limit and limit < len(alloc_request_objs):
if context.config.placement.randomize_allocation_candidates:
alloc_request_objs = random.sample(alloc_request_objs, limit)
else:
alloc_request_objs = alloc_request_objs[:limit]
# Limit summaries to only those mentioned in the allocation reqs.
kept_summary_objs = []
alloc_req_root_uuids = set()
# Extract root resource provider uuids from the resource requests.
for aro in alloc_request_objs:
for arr in aro.resource_requests:
alloc_req_root_uuids.add(
arr.resource_provider.root_provider_uuid)
for summary in summary_objs:
rp_root_uuid = summary.resource_provider.root_provider_uuid
# Skip a summary if we are limiting and haven't selected an
# allocation request that uses the resource provider.
if rp_root_uuid not in alloc_req_root_uuids:
continue
kept_summary_objs.append(summary)
summary_objs = kept_summary_objs
LOG.debug('Limiting results yields %d allocation requests and '
'%d provider summaries', len(alloc_request_objs),
len(summary_objs))
elif context.config.placement.randomize_allocation_candidates:
random.shuffle(alloc_request_objs)
return alloc_request_objs, summary_objs
return rw_ctx.limit_results(alloc_request_objs, summary_objs)
class AllocationRequest(object):
@ -749,7 +697,8 @@ def _exceeds_capacity(areq, psum_res_by_rp_rc):
return False
def _merge_candidates(candidates, group_policy=None):
# TODO(efried): Move _merge_candidates to rw_ctx?
def _merge_candidates(candidates, rw_ctx):
"""Given a dict, keyed by RequestGroup suffix, of tuples of
(allocation_requests, provider_summaries), produce a single tuple of
(allocation_requests, provider_summaries) that appropriately incorporates
@ -764,10 +713,7 @@ def _merge_candidates(candidates, group_policy=None):
:param candidates: A dict, keyed by integer suffix or '', of tuples of
(allocation_requests, provider_summaries) to be merged.
:param group_policy: String indicating how RequestGroups should interact
with each other. If the value is "isolate", we will filter out
candidates where AllocationRequests that came from RequestGroups
keyed by nonempty suffixes are satisfied by the same provider.
:param rw_ctx: RequestWideSearchContext.
:return: A tuple of (allocation_requests, provider_summaries).
"""
# Build a dict, keyed by anchor root provider UUID, of dicts, keyed by
@ -836,8 +782,9 @@ def _merge_candidates(candidates, group_policy=None):
# At this point, each AllocationRequest in areq_list is still
# marked as use_same_provider. This is necessary to filter by group
# policy, which enforces how these interact with each other.
# TODO(efried): Move _satisfies_group_policy to rw_ctx?
if not _satisfies_group_policy(
areq_list, group_policy, num_granular_groups):
areq_list, rw_ctx.group_policy, num_granular_groups):
continue
# Now we go from this (where 'arr' is AllocationRequestResource):
# [ areq__B(arrX, arrY, arrZ),
@ -856,6 +803,7 @@ def _merge_candidates(candidates, group_policy=None):
# *independent* queries, it's possible that the combined result
# now exceeds capacity where amounts of the same RP+RC were
# folded together. So do a final capacity check/filter.
# TODO(efried): Move _exceeds_capacity to rw_ctx?
if _exceeds_capacity(areq, psum_res_by_rp_rc):
continue
areqs.add(areq)
@ -929,40 +877,3 @@ def _satisfies_group_policy(areqs, group_policy, num_granular_groups):
'request (%d): %s',
num_granular_groups_in_areqs, num_granular_groups, str(areqs))
return False
def _exclude_nested_providers(allocation_requests, provider_summaries):
"""Exclude allocation requests and provider summaries for old microversions
if they involve more than one provider from the same tree.
"""
# Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all RPs
# in that tree.
tree_rps_by_root = collections.defaultdict(set)
for ps in provider_summaries:
rp_uuid = ps.resource_provider.uuid
root_uuid = ps.resource_provider.root_provider_uuid
tree_rps_by_root[root_uuid].add(rp_uuid)
# We use this to get a list of sets of providers in each tree
tree_sets = list(tree_rps_by_root.values())
for a_req in allocation_requests[:]:
alloc_rp_uuids = set([
arr.resource_provider.uuid for arr in a_req.resource_requests])
# If more than one allocation is provided by the same tree, kill
# that allocation request.
if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in tree_sets):
allocation_requests.remove(a_req)
# Exclude eliminated providers from the provider summaries.
all_rp_uuids = set()
for a_req in allocation_requests:
all_rp_uuids |= set(
arr.resource_provider.uuid for arr in a_req.resource_requests)
for ps in provider_summaries[:]:
if ps.resource_provider.uuid not in all_rp_uuids:
provider_summaries.remove(ps)
LOG.debug('Excluding nested providers yields %d allocation requests and '
'%d provider summaries', len(allocation_requests),
len(provider_summaries))
return allocation_requests, provider_summaries

@ -14,6 +14,7 @@
import collections
import os_traits
from oslo_log import log as logging
import random
import sqlalchemy as sa
from sqlalchemy import sql
@ -48,7 +49,7 @@ class RequestGroupSearchContext(object):
"""An adapter object that represents the search for allocation candidates
for a single request group.
"""
def __init__(self, context, request, has_trees, sharing, suffix=''):
def __init__(self, context, group, has_trees, sharing, suffix=''):
"""Initializes the object retrieving and caching matching providers
for each conditions like resource and aggregates from database.
@ -65,16 +66,16 @@ class RequestGroupSearchContext(object):
# resource class being requested by the group.
self.resources = {
rc_cache.RC_CACHE.id_from_string(key): value
for key, value in request.resources.items()
for key, value in group.resources.items()
}
# A list of lists of aggregate UUIDs that the providers matching for
# that request group must be members of
self.member_of = request.member_of
self.member_of = group.member_of
# A list of aggregate UUIDs that the providers matching for
# that request group must not be members of
self.forbidden_aggs = request.forbidden_aggs
self.forbidden_aggs = group.forbidden_aggs
# A set of provider ids that matches the requested positive aggregates
self.rps_in_aggs = set()
@ -88,22 +89,22 @@ class RequestGroupSearchContext(object):
# satisfied by a single resource provider. If False, represents a
# request for resources in any resource provider in the same tree,
# or a sharing provider.
self.use_same_provider = request.use_same_provider
self.use_same_provider = group.use_same_provider
# maps the trait name to the trait internal ID
self.required_trait_map = {}
self.forbidden_trait_map = {}
for trait_map, traits in (
(self.required_trait_map, request.required_traits),
(self.forbidden_trait_map, request.forbidden_traits)):
(self.required_trait_map, group.required_traits),
(self.forbidden_trait_map, group.forbidden_traits)):
if traits:
trait_map.update(trait_obj.ids_from_names(context, traits))
# Internal id of a root provider. If provided, this RequestGroup must
# be satisfied by resource provider(s) under the root provider.
self.tree_root_id = None
if request.in_tree:
tree_ids = provider_ids_from_uuid(context, request.in_tree)
if group.in_tree:
tree_ids = provider_ids_from_uuid(context, group.in_tree)
if tree_ids is None:
raise exception.ResourceProviderNotFound()
self.tree_root_id = tree_ids.root_id
@ -160,6 +161,101 @@ class RequestGroupSearchContext(object):
return self._rps_with_resource.get(rc_id)
class RequestWideSearchContext(object):
"""An adapter object that represents the search for allocation candidates
for a request-wide parameters.
"""
def __init__(self, context, rqparams, nested_aware):
"""Create a RequestWideSearchContext.
:param context: placement.context.RequestContext object
:param rqparams: A RequestWideParams.
:param nested_aware: Boolean, True if we are at a microversion that
supports trees; False otherwise.
"""
self._ctx = context
self._limit = rqparams.limit
self.group_policy = rqparams.group_policy
self._nested_aware = nested_aware
self.has_trees = _has_provider_trees(context)
def exclude_nested_providers(
self, allocation_requests, provider_summaries):
"""Exclude allocation requests and provider summaries for old
microversions if they involve more than one provider from the same
tree.
"""
if self._nested_aware or not self.has_trees:
return allocation_requests, provider_summaries
# Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all
# RPs in that tree.
tree_rps_by_root = collections.defaultdict(set)
for ps in provider_summaries:
rp_uuid = ps.resource_provider.uuid
root_uuid = ps.resource_provider.root_provider_uuid
tree_rps_by_root[root_uuid].add(rp_uuid)
# We use this to get a list of sets of providers in each tree
tree_sets = list(tree_rps_by_root.values())
for a_req in allocation_requests[:]:
alloc_rp_uuids = set([
arr.resource_provider.uuid for arr in a_req.resource_requests])
# If more than one allocation is provided by the same tree, kill
# that allocation request.
if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in
tree_sets):
allocation_requests.remove(a_req)
# Exclude eliminated providers from the provider summaries.
all_rp_uuids = set()
for a_req in allocation_requests:
all_rp_uuids |= set(
arr.resource_provider.uuid for arr in a_req.resource_requests)
for ps in provider_summaries[:]:
if ps.resource_provider.uuid not in all_rp_uuids:
provider_summaries.remove(ps)
LOG.debug(
'Excluding nested providers yields %d allocation requests and '
'%d provider summaries', len(allocation_requests),
len(provider_summaries))
return allocation_requests, provider_summaries
def limit_results(self, alloc_request_objs, summary_objs):
# Limit the number of allocation request objects. We do this after
# creating all of them so that we can do a random slice without
# needing to mess with complex sql or add additional columns to the DB.
if self._limit and self._limit < len(alloc_request_objs):
if self._ctx.config.placement.randomize_allocation_candidates:
alloc_request_objs = random.sample(
alloc_request_objs, self._limit)
else:
alloc_request_objs = alloc_request_objs[:self._limit]
# Limit summaries to only those mentioned in the allocation reqs.
kept_summary_objs = []
alloc_req_root_uuids = set()
# Extract root resource provider uuids from the resource requests.
for aro in alloc_request_objs:
for arr in aro.resource_requests:
alloc_req_root_uuids.add(
arr.resource_provider.root_provider_uuid)
for summary in summary_objs:
rp_root_uuid = summary.resource_provider.root_provider_uuid
# Skip a summary if we are limiting and haven't selected an
# allocation request that uses the resource provider.
if rp_root_uuid not in alloc_req_root_uuids:
continue
kept_summary_objs.append(summary)
summary_objs = kept_summary_objs
LOG.debug('Limiting results yields %d allocation requests and '
'%d provider summaries', len(alloc_request_objs),
len(summary_objs))
elif self._ctx.config.placement.randomize_allocation_candidates:
random.shuffle(alloc_request_objs)
return alloc_request_objs, summary_objs
def provider_ids_from_rp_ids(context, rp_ids):
"""Given an iterable of internal resource provider IDs, returns a dict,
keyed by internal provider Id, of ProviderIds namedtuples describing those
@ -1007,7 +1103,7 @@ def anchors_for_sharing_providers(context, rp_ids):
@db_api.placement_context_manager.reader
def has_provider_trees(ctx):
def _has_provider_trees(ctx):
"""Simple method that returns whether provider trees (i.e. nested resource
providers) are in use in the deployment at all. This information is used to
switch code paths when attempting to retrieve allocation candidate

@ -41,7 +41,7 @@ def _req_group_search_context(context, **kwargs):
forbidden_aggs=kwargs.get('forbidden_aggs', []),
in_tree=kwargs.get('in_tree', None),
)
has_trees = res_ctx.has_provider_trees(context)
has_trees = res_ctx._has_provider_trees(context)
sharing = res_ctx.get_sharing_providers(context)
rg_ctx = res_ctx.RequestGroupSearchContext(
context, request, has_trees, sharing)
@ -930,14 +930,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# _validate_allocation_requests to make failure results more readable.
self.rp_uuid_to_name = {}
def _get_allocation_candidates(self, requests=None, limit=None,
group_policy=None):
if requests is None:
requests = {'': placement_lib.RequestGroup(
def _get_allocation_candidates(self, groups=None, rqparams=None):
if groups is None:
groups = {'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources)}
return ac_obj.AllocationCandidates.get_by_requests(self.ctx, requests,
limit, group_policy)
if rqparams is None:
rqparams = placement_lib.RequestWideParams()
return ac_obj.AllocationCandidates.get_by_requests(
self.ctx, groups, rqparams)
def _validate_allocation_requests(self, expected, candidates,
expect_suffix=False):
@ -1044,9 +1045,10 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
requests = {'': placement_lib.RequestGroup(
use_same_provider=False, resources=self.requested_resources,
required_traits=missing)}
self.assertRaises(exception.TraitNotFound,
ac_obj.AllocationCandidates.get_by_requests,
self.ctx, requests)
self.assertRaises(
exception.TraitNotFound,
ac_obj.AllocationCandidates.get_by_requests,
self.ctx, requests, placement_lib.RequestWideParams())
def test_allc_req_and_prov_summary(self):
"""Simply test with one resource provider that the allocation
@ -1221,7 +1223,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Ask for just one candidate.
limit = 1
alloc_cands = self._get_allocation_candidates(limit=limit)
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(limit, len(allocation_requests))
@ -1235,7 +1238,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Ask for two candidates.
limit = 2
alloc_cands = self._get_allocation_candidates(limit=limit)
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(limit, len(allocation_requests))
@ -1246,7 +1250,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
limit = 5
# We still only expect 2 because cn3 does not match default requests.
expected_length = 2
alloc_cands = self._get_allocation_candidates(limit=limit)
alloc_cands = self._get_allocation_candidates(
rqparams=placement_lib.RequestWideParams(limit=limit))
allocation_requests = alloc_cands.allocation_requests
self.assertEqual(expected_length, len(allocation_requests))
@ -1327,7 +1332,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# #1705071, this resulted in a KeyError
alloc_cands = self._get_allocation_candidates(
requests={'': placement_lib.RequestGroup(
groups={'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
@ -1506,7 +1511,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
}
alloc_cands = self._get_allocation_candidates(
requests={'': placement_lib.RequestGroup(
groups={'': placement_lib.RequestGroup(
use_same_provider=False, resources=requested_resources)})
# Verify the allocation requests that are returned. There should be 2
@ -1628,7 +1633,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
alloc_cands = self._get_allocation_candidates(
requests={'': placement_lib.RequestGroup(
groups={'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
@ -1949,7 +1954,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(ss2, orc.SRIOV_NET_VF, 16)
tb.add_inventory(ss2, orc.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(requests={
alloc_cands = self._get_allocation_candidates(groups={
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
@ -2973,7 +2978,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
resources={
orc.SRIOV_NET_VF: 1,
}),
}, group_policy='none')
}, rqparams=placement_lib.RequestWideParams(group_policy='none'))
# 4 VF providers each providing 2, 1, or 0 inventory makes 6
# different combinations, plus two more that are effectively
# the same but satisfying different suffix mappings.
@ -3002,7 +3007,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
resources={
orc.SRIOV_NET_VF: 2,
}),
}, group_policy='isolate')
}, rqparams=placement_lib.RequestWideParams(group_policy='isolate'))
self.assertEqual(4, len(alloc_cands.allocation_requests))
def test_nested_result_suffix_mappings(self):
@ -3027,7 +3032,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
resources={
orc.SRIOV_NET_VF: 1,
}),
}, group_policy='isolate')
}, rqparams=placement_lib.RequestWideParams(group_policy='isolate'))
expected = [
[('cn1', orc.VCPU, 2, ''),

@ -314,19 +314,19 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
root_rp.destroy()
def test_has_provider_trees(self):
"""The has_provider_trees() helper method should return False unless
"""The _has_provider_trees() helper method should return False unless
there is a resource provider that is a parent.
"""
self.assertFalse(res_ctx.has_provider_trees(self.ctx))
self.assertFalse(res_ctx._has_provider_trees(self.ctx))
self._create_provider('cn')
# No parents yet. Should still be False.
self.assertFalse(res_ctx.has_provider_trees(self.ctx))
self.assertFalse(res_ctx._has_provider_trees(self.ctx))
self._create_provider('numa0', parent=uuidsentinel.cn)
# OK, now we've got a parent, so should be True
self.assertTrue(res_ctx.has_provider_trees(self.ctx))
self.assertTrue(res_ctx._has_provider_trees(self.ctx))
def test_destroy_resource_provider(self):
created_resource_provider = self._create_provider(
@ -1120,7 +1120,7 @@ class SharedProviderTestCase(tb.PlacementDbBaseTestCase):
resources={orc.VCPU: 2,
orc.MEMORY_MB: 256,
orc.DISK_GB: 1500})
has_trees = res_ctx.has_provider_trees(self.ctx)
has_trees = res_ctx._has_provider_trees(self.ctx)
sharing = res_ctx.get_sharing_providers(self.ctx)
rg_ctx = res_ctx.RequestGroupSearchContext(
self.ctx, request, has_trees, sharing)

@ -12,11 +12,14 @@
import mock
from placement.objects import allocation_candidate
from placement import lib as placement_lib
from placement.objects import research_context as res_ctx
from placement.tests.unit.objects import base
class TestAllocationCandidatesNoDB(base.TestCase):
@mock.patch('placement.objects.research_context._has_provider_trees',
new=mock.Mock(return_value=True))
def test_limit_results(self):
# Results are limited based on their root provider uuid, not uuid.
# For a more "real" test of this functionality, one that exercises
@ -47,7 +50,8 @@ class TestAllocationCandidatesNoDB(base.TestCase):
sum7 = mock.Mock(resource_provider=mock.Mock(root_provider_uuid=7))
sum6 = mock.Mock(resource_provider=mock.Mock(root_provider_uuid=6))
sum_in = [sum1, sum0, sum4, sum8, sum5, sum7, sum6]
aro, sum = allocation_candidate.AllocationCandidates._limit_results(
self.context, aro_in, sum_in, 2)
rw_ctx = res_ctx.RequestWideSearchContext(
self.context, placement_lib.RequestWideParams(limit=2), True)
aro, sum = rw_ctx.limit_results(aro_in, sum_in)
self.assertEqual(aro_in[:2], aro)
self.assertEqual(set([sum1, sum0, sum4, sum8, sum5]), set(sum))