Merge "Add any-traits support for allocation candidates"

This commit is contained in:
Zuul 2022-02-22 19:40:27 +00:00 committed by Gerrit Code Review
commit 75a4af0409
6 changed files with 624 additions and 163 deletions

@ -64,7 +64,8 @@ class RequestGroup(object):
provider. If False, represents a request for resources and traits
in any resource provider in the same tree, or a sharing provider.
:param resources: A dict of { resource_class: amount, ... }
:param required_traits: A set of { trait_name, ... }
:param required_traits: A list of set of trait names. E.g.:
[{T1, T2}, {T3}] means ((T1 or T2) and T3)
:param forbidden_traits: A set of { trait_name, ... }
:param member_of: A list of [ [aggregate_UUID],
[aggregate_UUID, aggregate_UUID] ... ]
@ -73,7 +74,7 @@ class RequestGroup(object):
"""
self.use_same_provider = use_same_provider
self.resources = resources or {}
self.required_traits = required_traits or set()
self.required_traits = required_traits or []
self.forbidden_traits = forbidden_traits or set()
self.member_of = member_of or []
self.in_tree = in_tree
@ -84,11 +85,26 @@ class RequestGroup(object):
ret += ', resources={%s}' % ', '.join(
'%s:%d' % (rc, amount)
for rc, amount in sorted(list(self.resources.items())))
ret += ', traits=[%s]' % ', '.join(
sorted(self.required_traits) +
['!%s' % ft for ft in self.forbidden_traits])
all_traits = set()
fragments = []
for any_traits in self.required_traits:
if len(any_traits) == 1:
all_traits.add(list(any_traits)[0])
else:
fragments.append('(' + ' or '.join(sorted(any_traits)) + ')')
if all_traits:
fragments.append(', '.join(trait for trait in sorted(all_traits)))
if self.forbidden_traits:
fragments.append(
', '.join(
'!' + trait for trait in sorted(self.forbidden_traits)))
ret += ', traits=(%s)' % ' and '.join(fragments)
ret += ', aggregates=[%s]' % ', '.join(
sorted('[%s]' % ', '.join(agglist)
sorted('[%s]' % ', '.join(sorted(agglist))
for agglist in sorted(self.member_of)))
ret += ')'
return ret
@ -113,11 +129,10 @@ class RequestGroup(object):
request_group.resources = util.normalize_resources_qs_param(
val)
elif prefix == _QS_REQUIRED:
# TODO(gibi): switch this to normalize_traits_qs_params when
# the data model can handle nested required_traits structure
# as part of the any-traits feature
request_group.required_traits = (
util.normalize_traits_qs_params_legacy(req, suffix))
(
request_group.required_traits,
request_group.forbidden_traits,
) = util.normalize_traits_qs_params(req, suffix)
elif prefix == _QS_MEMBER_OF:
# special handling of member_of qparam since we allow multiple
# member_of params at microversion 1.24.
@ -196,21 +211,26 @@ class RequestGroup(object):
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def _fix_forbidden(by_suffix):
def _check_forbidden(by_suffix):
conflicting_traits = []
for suff, group in by_suffix.items():
group.required_traits, group.forbidden_traits, conflicts = (
_fix_one_forbidden(group.required_traits))
if conflicts:
conflicting_traits.append('required%s: (%s)'
% (suff, ', '.join(conflicts)))
for any_traits in group.required_traits:
if all(
trait in group.forbidden_traits
for trait in any_traits
):
conflicting_traits.append(
'required%s: (%s)' %
(suff, ', '.join(sorted(any_traits))))
if conflicting_traits:
msg = (
'Conflicting required and forbidden traits found in the '
'following traits keys: %s')
# TODO(efried): comment=errors.QUERYPARAM_BAD_VALUE
raise webob.exc.HTTPBadRequest(
msg % ', '.join(conflicting_traits))
msg % ', '.join(sorted(conflicting_traits)))
@classmethod
def dict_from_request(cls, req, rqparams):
@ -226,6 +246,7 @@ class RequestGroup(object):
&required1=$TRAIT_NAME,$TRAIT_NAME&member_of1=$AGG_UUID
&resources2=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT
&required2=$TRAIT_NAME,$TRAIT_NAME&member_of2=$AGG_UUID
&required2=in:$TRAIT_NAME,$TRAIT_NAME
These are parsed in groups according to the arbitrary suffix of the key.
For each group, a RequestGroup instance is created containing that
@ -238,6 +259,9 @@ class RequestGroup(object):
providers in the group. That is, the trait is forbidden. Forbidden
traits are processed only if the microversion supports.
If the value of a `required*` is prefixed with 'in:' then the traits in
the value are ORred together.
The return is a dict, keyed by the suffix of these RequestGroup
instances (or the empty string for the unidentified group).
@ -252,6 +276,8 @@ class RequestGroup(object):
&required1=CUSTOM_PHYSNET_PUBLIC,CUSTOM_SWITCH_A
&resources2=SRIOV_NET_VF:1
&required2=!CUSTOM_PHYSNET_PUBLIC
&required2=CUSTOM_GOLD
&required2=in:CUSTOM_FOO,CUSTOM_BAR
...the return value will be:
@ -263,8 +289,8 @@ class RequestGroup(object):
"DISK_GB" 50,
},
required_traits=[
"HW_CPU_X86_VMX",
"CUSTOM_STORAGE_RAID",
{"HW_CPU_X86_VMX"},
{"CUSTOM_STORAGE_RAID"},
],
member_of=[
[9323b2b1-82c9-4e91-bdff-e95e808ef954],
@ -279,8 +305,8 @@ class RequestGroup(object):
"SRIOV_NET_VF": 2,
},
required_traits=[
"CUSTOM_PHYSNET_PUBLIC",
"CUSTOM_SWITCH_A",
{"CUSTOM_PHYSNET_PUBLIC"},
{"CUSTOM_SWITCH_A"},
],
),
'2': RequestGroup(
@ -288,6 +314,9 @@ class RequestGroup(object):
resources={
"SRIOV_NET_VF": 1,
},
required_traits=[
{"CUSTOM_GOLD"},
{"CUSTOM_FOO", "CUSTOM_BAR"},
forbidden_traits=[
"CUSTOM_PHYSNET_PUBLIC",
],
@ -321,10 +350,9 @@ class RequestGroup(object):
else:
cls._check_for_orphans(by_suffix)
# Make adjustments for forbidden traits by stripping forbidden out
# of required.
# check conflicting traits in the request
if allow_forbidden:
cls._fix_forbidden(by_suffix)
cls._check_forbidden(by_suffix)
return by_suffix
@ -353,7 +381,9 @@ class RequestWideParams(object):
where any such RequestGroups are satisfied by the same RP.
:param anchor_required_traits: Set of trait names which the anchor of
each returned allocation candidate must possess, regardless of
any RequestGroup filters.
any RequestGroup filters. Note that anchor_required_traits
does not support the any-trait format the
RequestGroup.required_traits does.
:param anchor_forbidden_traits: Set of trait names which the anchor of
each returned allocation candidate must NOT possess, regardless
of any RequestGroup filters.

@ -96,13 +96,24 @@ class AllocationCandidates(object):
# each resource class requested.
# If there aren't any providers that have any of the
# required traits, just exit early...
if rg_ctx.required_trait_map:
if rg_ctx.required_traits:
# TODO(cdent): Now that there is also a forbidden_trait_map
# it should be possible to further optimize this attempt at
# a quick return, but we leave that to future patches for
# now.
# NOTE(gibi): this optimization works by flattening the
# required_trait nested list. So if the request contains
# ((A or B) and C) trait request then we check if there is any
# RP with either A, B or C. If none then we know that there is
# no RP that can satisfy the original query either.
trait_rps = res_ctx.get_provider_ids_having_any_trait(
rg_ctx.context, rg_ctx.required_trait_map.values())
rg_ctx.context,
{
trait
for any_traits in rg_ctx.required_traits
for trait in any_traits
},
)
if not trait_rps:
return set()
rp_candidates = res_ctx.get_trees_matching_all(rg_ctx, rw_ctx)
@ -360,8 +371,8 @@ def _alloc_candidates_multiple_providers(rg_ctx, rw_ctx, rp_candidates):
for res_requests in itertools.product(*request_groups):
if not _check_traits_for_alloc_request(
res_requests, rw_ctx.summaries_by_id,
rg_ctx.required_trait_map,
rg_ctx.forbidden_trait_map):
rg_ctx.required_trait_names,
rg_ctx.forbidden_traits.keys()):
# This combination doesn't satisfy trait constraints
continue
@ -585,13 +596,13 @@ def _check_traits_for_alloc_request(res_requests, summaries, required_traits,
:param summaries: dict, keyed by resource provider id, of ProviderSummary
objects containing usage and trait information for
resource providers involved in the overall request
:param required_traits: A map, keyed by trait string name, of required
trait internal IDs that each *allocation request's
set of providers* must *collectively* have
associated with them
:param forbidden_traits: A map, keyed by trait string name, of trait
internal IDs that a resource provider must
not have.
:param required_traits: A list of set of trait names where traits
in the sets are in OR relationship while traits in
two different sets are in AND relationship. Each
*allocation request's set of providers* must
*collectively* fulfill this trait expression.
:param forbidden_traits: A set of trait names that a resource provider must
not have.
"""
all_prov_ids = []
all_traits = set()
@ -611,12 +622,27 @@ def _check_traits_for_alloc_request(res_requests, summaries, required_traits,
all_prov_ids.append(rp_id)
all_traits |= rp_traits
# Check if there are missing traits
missing_traits = set(required_traits) - all_traits
if missing_traits:
LOG.debug('Excluding a set of allocation candidate %s : '
'missing traits %s are not satisfied.',
all_prov_ids, ','.join(missing_traits))
# We need a match for *all* the items from the outer list of the
# required_traits as that describes AND relationship, and we need at least
# *one match* per nested trait set as that set describes OR relationship
# so collect all the matches with the nested sets
trait_matches = [
any_traits.intersection(all_traits) for any_traits in required_traits]
# if some internal sets do not match to the provided traits then we have
# missing trait (trait set)
if not all(trait_matches):
missing_traits = [
'(' + ' or '.join(any_traits) + ')'
for any_traits, match in zip(required_traits, trait_matches)
if not match
]
LOG.debug(
'Excluding a set of allocation candidate %s : '
'missing traits %s are not satisfied.',
all_prov_ids,
' and '.join(any_traits for any_traits in missing_traits))
return []
return all_prov_ids

@ -92,14 +92,24 @@ class RequestGroupSearchContext(object):
# or a sharing provider.
self.use_same_provider = group.use_same_provider
# maps the trait name to the trait internal ID
self.required_trait_map = {}
self.forbidden_trait_map = {}
for trait_map, traits in (
(self.required_trait_map, group.required_traits),
(self.forbidden_trait_map, group.forbidden_traits)):
if traits:
trait_map.update(trait_obj.ids_from_names(context, traits))
# Both required_trait_names and required_traits expresses the same
# request with the same nested list of sets structure but
# required_trait_names contains trait names while required_traits
# contains trait internal IDs
self.required_trait_names = group.required_traits
# let's map the trait names to internal IDs this is useful for DB calls
# expecting trait IDs. The structure of this field is the same as the
# required_trait_names field.
self.required_traits = []
# forbidden_traits is a dict mapping trait names to trait internal IDs
self.forbidden_traits = {}
for any_traits in group.required_traits:
self.required_traits.append(
set(trait_obj.ids_from_names(context, any_traits).values()))
if group.forbidden_traits:
self.forbidden_traits = trait_obj.ids_from_names(
context, group.forbidden_traits)
# Internal id of a root provider. If provided, this RequestGroup must
# be satisfied by resource provider(s) under the root provider.
@ -716,7 +726,7 @@ def get_trees_matching_all(rg_ctx, rw_ctx):
if not provs_with_inv:
return rp_candidates.RPCandidateList()
if (not rg_ctx.required_trait_map and not rg_ctx.forbidden_trait_map) or (
if (not rg_ctx.required_traits and not rg_ctx.forbidden_traits) or (
rg_ctx.exists_sharing):
# If there were no traits required, there's no difference in how we
# calculate allocation requests between nested and non-nested
@ -729,14 +739,14 @@ def get_trees_matching_all(rg_ctx, rw_ctx):
# capacity and that set of providers (grouped by their tree) have all
# of the required traits and none of the forbidden traits
rp_tuples_with_trait = _get_trees_with_traits(
rg_ctx.context, provs_with_inv.rps, rg_ctx.required_trait_map.values(),
rg_ctx.forbidden_trait_map.values())
rg_ctx.context, provs_with_inv.rps, rg_ctx.required_traits,
rg_ctx.forbidden_traits)
provs_with_inv.filter_by_rp(rp_tuples_with_trait)
LOG.debug("found %d providers under %d trees after applying "
"traits filter - required: %s, forbidden: %s",
len(provs_with_inv.rps), len(provs_with_inv.trees),
list(rg_ctx.required_trait_map),
list(rg_ctx.forbidden_trait_map))
list(rg_ctx.required_trait_names),
list(rg_ctx.forbidden_traits))
return provs_with_inv
@ -1151,13 +1161,13 @@ def get_provider_ids_for_traits_and_aggs(rg_ctx):
specified forbidden_traits.
"""
filtered_rps = set()
if rg_ctx.required_trait_map:
if rg_ctx.required_traits:
trait_rps = provider_ids_matching_required_traits(
rg_ctx.context, rg_ctx.required_trait_map.values())
rg_ctx.context, rg_ctx.required_traits)
filtered_rps = trait_rps
LOG.debug("found %d providers after applying required traits filter "
"(%s)",
len(filtered_rps), list(rg_ctx.required_trait_map))
len(filtered_rps), list(rg_ctx.required_trait_names))
if not filtered_rps:
return None, []
@ -1185,15 +1195,15 @@ def get_provider_ids_for_traits_and_aggs(rg_ctx):
if not filtered_rps:
return None, []
if rg_ctx.forbidden_trait_map:
if rg_ctx.forbidden_traits:
rps_bad_traits = get_provider_ids_having_any_trait(
rg_ctx.context, rg_ctx.forbidden_trait_map.values())
rg_ctx.context, rg_ctx.forbidden_traits.values())
forbidden_rp_ids |= rps_bad_traits
if filtered_rps:
filtered_rps -= rps_bad_traits
LOG.debug("found %d providers after applying forbidden traits "
"filter (%s)", len(filtered_rps),
list(rg_ctx.forbidden_trait_map))
list(rg_ctx.forbidden_traits))
if not filtered_rps:
return None, []

@ -36,8 +36,8 @@ def _req_group_search_context(context, **kwargs):
request = placement_lib.RequestGroup(
use_same_provider=False,
resources=kwargs.get('resources', resources),
required_traits=kwargs.get('required_traits', {}),
forbidden_traits=kwargs.get('forbidden_traits', {}),
required_traits=kwargs.get('required_traits', []),
forbidden_traits=kwargs.get('forbidden_traits', set()),
member_of=kwargs.get('member_of', []),
forbidden_aggs=kwargs.get('forbidden_aggs', []),
in_tree=kwargs.get('in_tree', None),
@ -186,9 +186,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
# associated any traits with the providers
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
# get_provider_ids_matching()'s required_traits and forbidden_traits
# arguments maps, keyed by trait name, of the trait internal ID
req_traits = {os_traits.HW_CPU_X86_AVX2: avx2_t.id}
req_traits = [{os_traits.HW_CPU_X86_AVX2}]
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
@ -212,6 +210,41 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# ask for a complex required trait query: (AVX2 and (SEE or SSE2))
# first it should match no RPs as neither has SSE nor SSE2
req_traits = [
{os_traits.HW_CPU_X86_AVX2},
{os_traits.HW_CPU_X86_SSE, os_traits.HW_CPU_X86_SSE2}
]
rg_ctx = _req_group_search_context(
self.ctx,
resources=resources,
required_traits=req_traits,
)
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual([], res)
# now add SSE to an RP that has no AVX2 so we still not have a match
sse_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_SSE)
incl_extra_full.set_traits([sse_t])
res = res_ctx.get_provider_ids_matching(rg_ctx)
self.assertEqual([], res)
# now add SSE2 to an RP which also has AVX2. We expect that RP is a
# match
sse2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_SSE2)
incl_biginv_noalloc.set_traits([avx2_t, sse2_t])
res = res_ctx.get_provider_ids_matching(rg_ctx)
rp_ids = [r[0] for r in res]
self.assertEqual([incl_biginv_noalloc.id], rp_ids)
# Let's see if the in_tree filter works
rg_ctx = _req_group_search_context(
self.ctx,
@ -242,8 +275,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(rp3, orc.VCPU, 64)
resources = {orc.VCPU: 4}
forbidden_traits = {trait_two.name: trait_two.id,
trait_three.name: trait_three.id}
forbidden_traits = {trait_two.name, trait_three.name}
member_of = [[uuids.agg1]]
rg_ctx = _req_group_search_context(
@ -633,10 +665,7 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
geneve_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE)
# required_traits parameter is a dict of trait name to internal ID
req_traits = {
geneve_t.name: geneve_t.id,
}
req_traits = [{geneve_t.name}]
expected_trees = ['cn3']
# NOTE(tetsuro): Actually we also get providers without traits here.
# This is reported as bug#1771707 and from users' view the bug is now
@ -652,18 +681,12 @@ class ProviderTreeDBHelperTestCase(tb.PlacementDbBaseTestCase):
# verify that there are no returned allocation candidates
avx2_t = trait_obj.Trait.get_by_name(
self.ctx, os_traits.HW_CPU_X86_AVX2)
# required_traits parameter is a dict of trait name to internal ID
req_traits = {
geneve_t.name: geneve_t.id,
avx2_t.name: avx2_t.id,
}
req_traits = [{geneve_t.name}, {avx2_t.name}]
_run_test([], [], required_traits=req_traits)
# If we add the AVX2 trait as forbidden, not required, then we
# should get back the original cn3
req_traits = {
geneve_t.name: geneve_t.id,
}
req_traits = [{geneve_t.name}]
forbidden_traits = {
avx2_t.name: avx2_t.id,
}
@ -1333,7 +1356,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(expected, observed)
def test_unknown_traits(self):
missing = set(['UNKNOWN_TRAIT'])
missing = [{'UNKNOWN_TRAIT'}]
requests = {'': placement_lib.RequestGroup(
use_same_provider=False, resources=self.requested_resources,
required_traits=missing)}
@ -1440,7 +1463,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
required_traits=[{os_traits.HW_CPU_X86_AVX2}]
)},
)
self._validate_allocation_requests([], alloc_cands)
@ -1453,7 +1476,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
required_traits=[{os_traits.HW_CPU_X86_AVX2}]
)},
)
# Only cn2 should be in our allocation requests now since that's the
@ -1496,6 +1519,107 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
]
self._validate_allocation_requests(expected, alloc_cands)
# Now create a more complex trait query: (AVX2 and (SSE or SSE2))
# First no result is expected as none of the RPs has SSE or SSE2 traits
required_traits = [
{os_traits.HW_CPU_X86_AVX2},
{os_traits.HW_CPU_X86_SSE, os_traits.HW_CPU_X86_SSE2}
]
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)},
)
self._validate_allocation_requests([], alloc_cands)
# Next we add SSE to one of the RPs that has no AVX2, so we still
# expect empty result
tb.set_traits(cn1, 'HW_CPU_X86_SSE')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)},
)
self._validate_allocation_requests([], alloc_cands)
# Next we add SSE2 to the cn2 where there are AVX2 too, and we expect
# that cn2 is a match now
tb.set_traits(cn2, 'HW_CPU_X86_AVX2', 'HW_CPU_X86_SSE2')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)},
)
expected = [
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('cn2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
p_sums = alloc_cands.provider_summaries
self.assertEqual(1, len(p_sums))
expected = {
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 32768 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
# Next forbid SSE2 in the request so the trait query becomes
# (AVX2 and (SSE or SSE2) and !SSE2) this should lead to no candidate
# as cn2 has SSE2
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
forbidden_traits={'HW_CPU_X86_SSE2'},
)},
)
self._validate_allocation_requests([], alloc_cands)
# But if we forbid SSE instead of SSE2 then we get back cn2
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
forbidden_traits={'HW_CPU_X86_SSE'}
)},
)
expected = [
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('cn2', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
p_sums = alloc_cands.provider_summaries
self.assertEqual(1, len(p_sums))
expected = {
'cn2': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 32768 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
def test_all_local_limit(self):
"""Create some resource providers that can satisfy the request for
resources with local (non-shared) resources, limit them, and verify
@ -1658,7 +1782,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
)}
)
@ -1679,7 +1803,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
)}
)
@ -1724,12 +1848,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Require the AVX2 trait but forbid CUSTOM_EXTRA_FASTER, which is
# added to cn2
tb.set_traits(cn2, 'CUSTOM_EXTRA_FASTER')
tb.set_traits(cn2, 'HW_CPU_X86_AVX2', 'CUSTOM_EXTRA_FASTER')
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
forbidden_traits=set(['CUSTOM_EXTRA_FASTER']),
)}
)
@ -1747,7 +1871,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
forbidden_traits=set(['MISC_SHARES_VIA_AGGREGATE']),
)}
)
@ -1758,6 +1882,97 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
]
self._validate_allocation_requests(expected, alloc_cands)
# Now create a more complex trait query. (AVX2 and (SSE or SSE2)
# Right now none of the RPs has SEE nor SSE2 so we expect no candidates
required_traits = [
{os_traits.HW_CPU_X86_AVX2},
{os_traits.HW_CPU_X86_SSE, os_traits.HW_CPU_X86_SSE2}
]
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)}
)
# We have not yet associated the SSE or SSE2 traits to any provider,
# so we should get zero allocation candidates
p_sums = alloc_cands.provider_summaries
self.assertEqual([], alloc_cands.allocation_requests)
self.assertEqual(0, len(p_sums))
# Next associate SSE to the sharing provider that is enough to get
# matches. cn1 with shared storage is a match as ss provides SSE but
# cn1 with local disk is not a match as then ss is not used and
# therefore no SSE is provided. cn2 is a match with ss.
tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE", "HW_CPU_X86_SSE")
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)}
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Now add SSE2 to cn1 so cn1 + local disk will also be a match
tb.set_traits(cn1, "HW_CPU_X86_AVX2", "HW_CPU_X86_SSE2")
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
)}
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('cn1', orc.DISK_GB, 1500)],
[('cn2', orc.VCPU, 1),
('cn2', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
# Now change the trait query to
# (AVX2 and (SSE or SSE2) and not CUSTOM_EXTRA_FASTER)
# cn2 has the CUSTOM_EXTRA_FASTER trait so that is expected to be
# filtered out
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
forbidden_traits={'CUSTOM_EXTRA_FASTER'},
)}
)
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('cn1', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
def test_local_with_shared_custom_resource(self):
"""Create some resource providers that can satisfy the request for
resources with local VCPU and MEMORY_MB but rely on a shared resource
@ -1907,7 +2122,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
)}
)
@ -1928,7 +2143,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
groups={'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
required_traits=[{os_traits.HW_CPU_X86_AVX2}],
)}
)
@ -1976,9 +2191,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([
os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD
]),
required_traits=[
{os_traits.HW_CPU_X86_AVX2}, {os_traits.STORAGE_DISK_SSD}
],
)
})
@ -2004,6 +2219,76 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Let's have an even more complex trait query
# (AVX2 and (SSD or SSE) and not SSE2). As no SEE or SSE2 is in the
# current trees we still get back cn3 that has AVX and SSD
required_traits = [
{os_traits.HW_CPU_X86_AVX2},
{os_traits.STORAGE_DISK_SSD, os_traits.HW_CPU_X86_SSE}
]
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
forbidden_traits={os_traits.HW_CPU_X86_SSE2}
)
})
# There should be only cn3 in the returned allocation candidates
expected = [
[('cn3', orc.VCPU, 1),
('cn3', orc.MEMORY_MB, 64),
('cn3', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
expected = {
'cn3': set([
(orc.VCPU, 24 * 16.0, 0),
(orc.MEMORY_MB, 1024 * 1.5, 0),
(orc.DISK_GB, 2000 - 100, 0),
]),
}
self._validate_provider_summary_resources(expected, alloc_cands)
expected = {
'cn3': set(['HW_CPU_X86_AVX2', 'STORAGE_DISK_SSD'])
}
self._validate_provider_summary_traits(expected, alloc_cands)
# Next we add SSE to cn1 and both SSE and SSE2 to cn2. This will make
# cn1 a match while cn2 still be ignored due to SSE2. cn3 is good as
# before
tb.set_traits(
cn1, os_traits.HW_CPU_X86_AVX2, os_traits.HW_CPU_X86_SSE)
tb.set_traits(
cn2,
os_traits.HW_CPU_X86_AVX2,
os_traits.HW_CPU_X86_SSE,
os_traits.HW_CPU_X86_SSE2
)
alloc_cands = self._get_allocation_candidates({
'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=required_traits,
forbidden_traits={os_traits.HW_CPU_X86_SSE2}
)
})
expected = [
[('cn1', orc.VCPU, 1),
('cn1', orc.MEMORY_MB, 64),
('shared storage', orc.DISK_GB, 1500)],
[('cn3', orc.VCPU, 1),
('cn3', orc.MEMORY_MB, 64),
('cn3', orc.DISK_GB, 1500)],
]
self._validate_allocation_requests(expected, alloc_cands)
def test_common_rc(self):
"""Candidates when cn and shared have inventory in the same class."""
cn = self._create_provider('cn', uuids.agg1)
@ -2097,8 +2382,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set(['HW_CPU_X86_SSE', 'STORAGE_DISK_SSD',
'CUSTOM_RAID'])
required_traits=[
{'HW_CPU_X86_SSE'}, {'STORAGE_DISK_SSD'}, {'CUSTOM_RAID'}]
)}
)
@ -2921,7 +3206,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
required_traits=[{os_traits.HW_NIC_OFFLOAD_GENEVE}],
)}
)
@ -3056,7 +3341,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
orc.MEMORY_MB: 256,
orc.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
required_traits=[{os_traits.HW_NIC_OFFLOAD_GENEVE}],
)
})
@ -3271,7 +3556,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
orc.SRIOV_NET_VF: 1,
orc.DISK_GB: 1500,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE])
required_traits=[{os_traits.HW_NIC_OFFLOAD_GENEVE}])
})
# cn1_numa0_pf0 is not in the allocation candidates because it

@ -686,10 +686,9 @@ class TestParseQsRequestGroups(testtools.TestCase):
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_GOLD',
},
required_traits=[
{'HW_CPU_X86_VMX'}, {'CUSTOM_GOLD'}
],
),
]
self.assertRequestGroupsEqual(expected, self.do_parse(qs))
@ -778,18 +777,17 @@ class TestParseQsRequestGroups(testtools.TestCase):
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_GOLD',
},
required_traits=[
{'HW_CPU_X86_VMX'}, {'CUSTOM_GOLD'}
],
),
pl.RequestGroup(
resources={
'CUSTOM_MAGIC': 123,
},
required_traits={
'CUSTOM_GOLD',
},
required_traits=[
{'CUSTOM_GOLD'}
],
),
pl.RequestGroup(
resources={
@ -816,19 +814,19 @@ class TestParseQsRequestGroups(testtools.TestCase):
'MEMORY_MB': 4096,
'DISK_GB': 10,
},
required_traits={
'HW_CPU_X86_VMX',
'CUSTOM_MEM_FLASH',
'STORAGE_DISK_SSD',
},
required_traits=[
{'HW_CPU_X86_VMX'},
{'CUSTOM_MEM_FLASH'},
{'STORAGE_DISK_SSD'}
],
),
pl.RequestGroup(
resources={
'SRIOV_NET_VF': 2,
},
required_traits={
'CUSTOM_PHYSNET_PRIVATE',
},
required_traits=[
{'CUSTOM_PHYSNET_PRIVATE'},
],
),
pl.RequestGroup(
resources={
@ -836,10 +834,10 @@ class TestParseQsRequestGroups(testtools.TestCase):
'NET_INGRESS_BYTES_SEC': 20000,
'NET_EGRESS_BYTES_SEC': 10000,
},
required_traits={
'CUSTOM_SWITCH_BIG',
'CUSTOM_PHYSNET_PROD',
},
required_traits=[
{'CUSTOM_SWITCH_BIG'},
{'CUSTOM_PHYSNET_PROD'},
],
),
pl.RequestGroup(
resources={
@ -1033,9 +1031,9 @@ class TestParseQsRequestGroups(testtools.TestCase):
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'CUSTOM_PHYSNET1',
},
required_traits=[
{'CUSTOM_PHYSNET1'},
],
forbidden_traits={
'CUSTOM_SWITCH_BIG',
}
@ -1077,9 +1075,9 @@ class TestParseQsRequestGroups(testtools.TestCase):
resources={
'CUSTOM_MAGIC': 1,
},
required_traits={
'CUSTOM_PHYSNET1',
},
required_traits=[
{'CUSTOM_PHYSNET1'},
],
forbidden_traits={
'CUSTOM_PHYSNET2',
}
@ -1098,9 +1096,9 @@ class TestParseQsRequestGroups(testtools.TestCase):
resources={
'CUSTOM_MAGIC': 1,
},
required_traits={
'CUSTOM_PHYSNET1',
}
required_traits=[
{'CUSTOM_PHYSNET1'},
],
),
pl.RequestGroup(
use_same_provider=True,
@ -1150,9 +1148,9 @@ class TestParseQsRequestGroups(testtools.TestCase):
resources={
'CUSTOM_MAGIC': 1,
},
required_traits={
'CUSTOM_PHYSNET1',
}
required_traits=[
{'CUSTOM_PHYSNET1'},
],
),
]
self.assertRequestGroupsEqual(
@ -1160,6 +1158,147 @@ class TestParseQsRequestGroups(testtools.TestCase):
self.assertRaises(
webob.exc.HTTPBadRequest, self.do_parse, qs, version=(1, 22))
def test_any_traits_1_38(self):
qs = 'resources1=RABBIT:1&required1=in:WHITE,BLACK'
exc = self.assertRaises(
webob.exc.HTTPBadRequest, self.do_parse, qs, version=(1, 38))
self.assertIn(
"The format 'in:HW_CPU_X86_VMX,CUSTOM_MAGIC' only supported since "
"microversion 1.39.",
str(exc))
# TODO(gibi): remove the mock when microversion 1.39 is fully added
@mock.patch(
'placement.microversion.max_version_string',
new=mock.Mock(return_value='1.39'))
def test_any_traits_1_39(self):
qs = 'resources1=RABBIT:1&required1=in:WHITE,BLACK'
expected = [
pl.RequestGroup(
use_same_provider=True,
resources={
'RABBIT': 1,
},
required_traits=[
{'WHITE', 'BLACK'},
],
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 39)))
# TODO(gibi): remove the mock when microversion 1.39 is fully added
@mock.patch(
'placement.microversion.max_version_string',
new=mock.Mock(return_value='1.39'))
def test_any_traits_repeated(self):
qs = 'resources1=CUSTOM_MAGIC:1&required1=in:T1,T2&required1=T3,!T4'
expected = [
pl.RequestGroup(
use_same_provider=True,
resources={
'CUSTOM_MAGIC': 1,
},
required_traits=[
{'T1', 'T2'},
{'T3'},
],
forbidden_traits={
'T4'
},
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 39)))
# TODO(gibi): remove the mock when microversion 1.39 is fully added
@mock.patch(
'placement.microversion.max_version_string',
new=mock.Mock(return_value='1.39'))
def test_any_traits_multiple_groups(self):
qs = ('resources=RABBIT:1&required=in:WHITE,BLACK&'
'resources2=CAT:2&required2=in:SILVER,RED&required2=!SPOTTED')
expected = [
pl.RequestGroup(
use_same_provider=False,
resources={
'RABBIT': 1,
},
required_traits=[
{'WHITE', 'BLACK'},
],
forbidden_traits={
},
),
pl.RequestGroup(
use_same_provider=True,
resources={
'CAT': 2,
},
required_traits=[
{'SILVER', 'RED'},
],
forbidden_traits={
'SPOTTED'
},
),
]
self.assertRequestGroupsEqual(
expected, self.do_parse(qs, version=(1, 39)))
# TODO(gibi): remove the mock when microversion 1.39 is fully added
@mock.patch(
'placement.microversion.max_version_string',
new=mock.Mock(return_value='1.39'))
def test_any_traits_forbidden_conflict(self):
# going against one part of an OR expression is not a conflict as the
# other parts still can match and fulfill the query
qs = ('resources=VCPU:2'
'&required=in:CUSTOM_PHYSNET1,CUSTOM_PHYSNET2'
'&required=!CUSTOM_PHYSNET1')
rgs = self.do_parse(qs, version=(1, 39))
self.assertEqual(1, len(rgs))
# but going against all parts of an OR expression is a conflict
qs = ('resources=VCPU:2'
'&required=in:CUSTOM_PHYSNET1,CUSTOM_PHYSNET2'
'&required=!CUSTOM_PHYSNET1,!CUSTOM_PHYSNET2')
expected_message = (
'Conflicting required and forbidden traits found '
'in the following traits keys: required: '
'(CUSTOM_PHYSNET1, CUSTOM_PHYSNET2)')
exc = self.assertRaises(
webob.exc.HTTPBadRequest, self.do_parse, qs, version=(1, 39))
self.assertEqual(expected_message, str(exc))
# TODO(gibi): remove the mock when microversion 1.39 is fully added
@mock.patch(
'placement.microversion.max_version_string',
new=mock.Mock(return_value='1.39'))
def test_stringification(self):
agg1 = uuidsentinel.agg1
agg2 = uuidsentinel.agg2
qs = (f'resources1=CAT:2&required1=in:SILVER,RED&'
f'required1=TABBY,!SPOTTED&member_of1=in:{agg1},{agg2}')
rgs = self.do_parse(qs, version=(1, 39))
self.assertEqual(1, len(rgs))
self.assertEqual(
'RequestGroup('
'use_same_provider=True, '
'resources={CAT:2}, '
'traits=((RED or SILVER) and TABBY and !SPOTTED), '
f'aggregates=[[{", ".join(sorted([agg1, agg2]))}]])',
str(rgs[0])
)
class TestPickLastModified(base.ContextTestCase):

@ -336,35 +336,6 @@ def normalize_traits_qs_param_to_legacy_value(val, allow_forbidden=False):
return legacy_traits
# TODO(gibi): remove this once the allocation candidate code path also
# supports nested required_traits structure.
def normalize_traits_qs_params_legacy(req, suffix=''):
"""Given a webob.Request object, validate and collect required querystring
parameters.
We begin supporting forbidden traits in microversion 1.22.
:param req: a webob.Request object to read the params from
:param suffix: the string suffix of the request group to read from the
request. If empty then the unnamed request group is processed.
:returns: a set of trait names, including forbidden traits with an '!'
prefix.
:raises webob.exc.HTTPBadRequest: if the format of the query param is not
valid
"""
want_version = req.environ[placement.microversion.MICROVERSION_ENVIRON]
allow_forbidden = want_version.matches((1, 22))
traits = set()
# NOTE(gibi): This means if the same query param is repeated then only
# the last one will be considered
for value in req.GET.getall('required' + suffix):
traits = normalize_traits_qs_param_to_legacy_value(
value, allow_forbidden)
return traits
def normalize_traits_qs_param(
val, allow_forbidden=False, allow_any_traits=False
):