placement: Granular GET /allocation_candidates

In a new microversion, the GET /allocation_candidates API now accepts
granular resource request syntax:
?resourcesN=...&requiredN=...&member_ofN=...&group_policy={isolate|none}

Change-Id: I4e99974443aa513fd9f837a6057f67d744caf1b4
blueprint: granular-resource-requests
This commit is contained in:
Eric Fried 2018-04-25 16:55:58 -05:00
parent 7f996eb2c4
commit 9af073384c
15 changed files with 1315 additions and 226 deletions

View File

@ -212,7 +212,9 @@ def list_allocation_candidates(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
get_schema = schema.GET_SCHEMA_1_10
if want_version.matches((1, 21)):
if want_version.matches((1, 25)):
get_schema = schema.GET_SCHEMA_1_25
elif want_version.matches((1, 21)):
get_schema = schema.GET_SCHEMA_1_21
elif want_version.matches((1, 17)):
get_schema = schema.GET_SCHEMA_1_17
@ -227,9 +229,21 @@ def list_allocation_candidates(req):
if limit:
limit = int(limit[0])
group_policy = req.GET.getall('group_policy') or None
# Schema ensures we get either "none" or "isolate"
if group_policy:
group_policy = group_policy[0]
else:
# group_policy is required if more than one numbered request group was
# specified.
if len([rg for rg in requests.values() if rg.use_same_provider]) > 1:
raise webob.exc.HTTPBadRequest(
_('The "group_policy" parameter is required when specifying '
'more than one "resources{N}" parameter.'))
try:
cands = rp_obj.AllocationCandidates.get_by_requests(context, requests,
limit)
cands = rp_obj.AllocationCandidates.get_by_requests(
context, requests, limit=limit, group_policy=group_policy)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid resource class in resources parameter: %(error)s') %

View File

@ -66,6 +66,8 @@ VERSIONS = [
'1.23', # Add support for error codes in error response JSON
'1.24', # Support multiple ?member_of=<agg UUIDs> queryparams on
# GET /resource_providers
'1.25', # Adds support for granular resource requests via numbered
# querystring groups in GET /allocation_candidates
]

View File

@ -3507,7 +3507,8 @@ def _alloc_candidates_single_provider(ctx, requested_resources, rps):
alloc_requests.append(req_obj)
# If this is a sharing provider, we have to include an extra
# AllocationRequest for every possible anchor.
if os_traits.MISC_SHARES_VIA_AGGREGATE in rp_summary.traits:
traits = [trait.name for trait in rp_summary.traits]
if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
for anchor in _anchors_for_sharing_provider(
ctx, rp_summary.resource_provider.id):
# We already added self
@ -3778,6 +3779,265 @@ def _trait_ids_from_names(ctx, names):
return {r[0]: r[1] for r in ctx.session.execute(sel)}
def _rp_rc_key(rp, rc):
"""Creates hashable key unique to a provider + resource class."""
return rp.uuid, rc
def _consolidate_allocation_requests(areqs):
"""Consolidates a list of AllocationRequest into one.
:param areqs: A list containing one AllocationRequest for each input
RequestGroup. This may mean that multiple resource_requests
contain resource amounts of the same class from the same provider.
:return: A single consolidated AllocationRequest, containing no
resource_requests with duplicated (resource_provider,
resource_class).
"""
# Construct a dict, keyed by resource provider UUID + resource class, of
# AllocationRequestResource, consolidating as we go.
arrs_by_rp_rc = {}
# areqs must have at least one element. Save the anchor to populate the
# returned AllocationRequest.
anchor_rp_uuid = areqs[0].anchor_root_provider_uuid
for areq in areqs:
# Sanity check: the anchor should be the same for every areq
if anchor_rp_uuid != areq.anchor_root_provider_uuid:
# This should never happen. If it does, it's a dev bug.
raise ValueError(
_("Expected every AllocationRequest in `deflate` to have the "
"same anchor!"))
for arr in areq.resource_requests:
key = _rp_rc_key(arr.resource_provider, arr.resource_class)
if key not in arrs_by_rp_rc:
arrs_by_rp_rc[key] = copy.deepcopy(arr)
else:
arrs_by_rp_rc[key].amount += arr.amount
return AllocationRequest(
resource_requests=list(arrs_by_rp_rc.values()),
anchor_root_provider_uuid=anchor_rp_uuid)
def _satisfies_group_policy(areqs, group_policy, num_granular_groups):
"""Applies group_policy to a list of AllocationRequest.
Returns True or False, indicating whether this list of
AllocationRequest satisfies group_policy, as follows:
* "isolate": Each AllocationRequest with use_same_provider=True
is satisfied by a single resource provider. If the "isolate"
policy is in effect, each such AllocationRequest must be
satisfied by a *unique* resource provider.
* "none" or None: Always returns True.
:param areqs: A list containing one AllocationRequest for each input
RequestGroup.
:param group_policy: String indicating how RequestGroups should interact
with each other. If the value is "isolate", we will return False
if AllocationRequests that came from RequestGroups keyed by
nonempty suffixes are satisfied by the same provider.
:param num_granular_groups: The number of granular (use_same_provider=True)
RequestGroups in the request.
:return: True if areqs satisfies group_policy; False otherwise.
"""
if group_policy != 'isolate':
# group_policy="none" means no filtering
return True
# The number of unique resource providers referenced in the request groups
# having use_same_provider=True must be equal to the number of granular
# groups.
return num_granular_groups == len(set(
# We can reliably use the first resource_request's provider: all the
# resource_requests are satisfied by the same provider by definition
# because use_same_provider is True.
areq.resource_requests[0].resource_provider.uuid
for areq in areqs
if areq.use_same_provider))
def _exceeds_capacity(areq, psum_res_by_rp_rc):
"""Checks a (consolidated) AllocationRequest against the provider summaries
to ensure that it does not exceed capacity.
Exceeding capacity can mean the total amount (already used plus this
allocation) exceeds the total inventory amount; or this allocation exceeds
the max_unit in the inventory record.
:param areq: An AllocationRequest produced by the
`_consolidate_allocation_requests` method.
:param psum_res_by_rp_rc: A dict, keyed by provider + resource class via
_rp_rc_key, of ProviderSummaryResource.
:return: True if areq exceeds capacity; False otherwise.
"""
for arr in areq.resource_requests:
key = _rp_rc_key(arr.resource_provider, arr.resource_class)
psum_res = psum_res_by_rp_rc[key]
if psum_res.used + arr.amount > psum_res.capacity:
return True
if arr.amount > psum_res.max_unit:
return True
return False
def _merge_candidates(candidates, group_policy=None):
"""Given a dict, keyed by RequestGroup suffix, of tuples of
(allocation_requests, provider_summaries), produce a single tuple of
(allocation_requests, provider_summaries) that appropriately incorporates
the elements from each.
Each (alloc_reqs, prov_sums) in `candidates` satisfies one RequestGroup.
This method creates a list of alloc_reqs, *each* of which satisfies *all*
of the RequestGroups.
For that merged list of alloc_reqs, a corresponding provider_summaries is
produced.
:param candidates: A dict, keyed by integer suffix or '', of tuples of
(allocation_requests, provider_summaries) to be merged.
:param group_policy: String indicating how RequestGroups should interact
with each other. If the value is "isolate", we will filter out
candidates where AllocationRequests that came from RequestGroups
keyed by nonempty suffixes are satisfied by the same provider.
:return: A tuple of (allocation_requests, provider_summaries).
"""
# Build a dict, keyed by anchor root provider UUID, of dicts, keyed by
# suffix, of nonempty lists of AllocationRequest. Each inner dict must
# possess all of the suffix keys to be viable (i.e. contains at least
# one AllocationRequest per RequestGroup).
#
# areq_lists_by_anchor =
# { anchor_root_provider_uuid: {
# '': [AllocationRequest, ...], \ This dict must contain
# '1': [AllocationRequest, ...], \ exactly one nonempty list per
# ... / suffix to be viable. That
# '42': [AllocationRequest, ...], / filtering is done later.
# },
# ...
# }
areq_lists_by_anchor = collections.defaultdict(
lambda: collections.defaultdict(list))
# Save off all the provider summaries lists - we'll use 'em later.
all_psums = []
# Construct a dict, keyed by resource provider + resource class, of
# ProviderSummaryResource. This will be used to do a final capacity
# check/filter on each merged AllocationRequest.
psum_res_by_rp_rc = {}
for suffix, (areqs, psums) in candidates.items():
for areq in areqs:
anchor = areq.anchor_root_provider_uuid
areq_lists_by_anchor[anchor][suffix].append(areq)
for psum in psums:
all_psums.append(psum)
for psum_res in psum.resources:
key = _rp_rc_key(
psum.resource_provider, psum_res.resource_class)
psum_res_by_rp_rc[key] = psum_res
# Create all combinations picking one AllocationRequest from each list
# for each anchor.
areqs = []
all_suffixes = set(candidates)
num_granular_groups = len(all_suffixes - set(['']))
for areq_lists_by_suffix in areq_lists_by_anchor.values():
# Filter out any entries that don't have allocation requests for
# *all* suffixes (i.e. all RequestGroups)
if set(areq_lists_by_suffix) != all_suffixes:
continue
# We're using itertools.product to go from this:
# areq_lists_by_suffix = {
# '': [areq__A, areq__B, ...],
# '1': [areq_1_A, areq_1_B, ...],
# ...
# '42': [areq_42_A, areq_42_B, ...],
# }
# to this:
# [ [areq__A, areq_1_A, ..., areq_42_A], Each of these lists is one
# [areq__A, areq_1_A, ..., areq_42_B], areq_list in the loop below.
# [areq__A, areq_1_B, ..., areq_42_A], each areq_list contains one
# [areq__A, areq_1_B, ..., areq_42_B], AllocationRequest from each
# [areq__B, areq_1_A, ..., areq_42_A], RequestGroup. So taken as a
# [areq__B, areq_1_A, ..., areq_42_B], whole, each list is a viable
# [areq__B, areq_1_B, ..., areq_42_A], (preliminary) candidate to
# [areq__B, areq_1_B, ..., areq_42_B], return.
# ...,
# ]
for areq_list in itertools.product(
*list(areq_lists_by_suffix.values())):
# At this point, each AllocationRequest in areq_list is still
# marked as use_same_provider. This is necessary to filter by group
# policy, which enforces how these interact with each other.
if not _satisfies_group_policy(
areq_list, group_policy, num_granular_groups):
continue
# Now we go from this (where 'arr' is AllocationRequestResource):
# [ areq__B(arrX, arrY, arrZ),
# areq_1_A(arrM, arrN),
# ...,
# areq_42_B(arrQ)
# ]
# to this:
# areq_combined(arrX, arrY, arrZ, arrM, arrN, arrQ)
# Note that this discards the information telling us which
# RequestGroup led to which piece of the final AllocationRequest.
# We needed that to be present for the previous filter; we need it
# to be *absent* for the next one (and for the final output).
areq = _consolidate_allocation_requests(areq_list)
# Since we sourced this AllocationRequest from multiple
# *independent* queries, it's possible that the combined result
# now exceeds capacity where amounts of the same RP+RC were
# folded together. So do a final capacity check/filter.
if _exceeds_capacity(areq, psum_res_by_rp_rc):
continue
areqs.append(areq)
# It's possible we've filtered out everything. If so, short out.
if not areqs:
return [], []
# Now we have to produce provider summaries. The provider summaries in
# the candidates input contain all the information; we just need to
# filter it down to only the providers and resource classes* in our
# merged list of allocation requests.
# *With blueprint placement-return-all-resources, all resource classes
# should be included, so that condition will need to be removed either
# here or there, depending which lands first.
# To make this easier, first index all our allocation requests as a
# dict, keyed by resource provider UUID, of sets of resource class
# names.
rcs_by_rp = collections.defaultdict(set)
for areq in areqs:
for arr in areq.resource_requests:
rcs_by_rp[arr.resource_provider.uuid].add(arr.resource_class)
# Now walk the input candidates' provider summaries, building a dict,
# keyed by resource provider UUID, of ProviderSummary representing
# that provider, and including any of its resource classes found in the
# index we built from our allocation requests above*.
# *See above.
psums_by_rp = {}
for psum in all_psums:
rp_uuid = psum.resource_provider.uuid
# If everything from this provider was filtered out, don't add an
# (empty) entry for it.
if rp_uuid not in rcs_by_rp:
continue
if rp_uuid not in psums_by_rp:
psums_by_rp[rp_uuid] = ProviderSummary(
resource_provider=psum.resource_provider, resources=[],
# Should always be the same; no need to check/update below.
traits=psum.traits)
# NOTE(efried): To subsume blueprint placement-return-all-resources
# replace this loop with:
# psums_by_rp[rp_uuid].resources = psum.resources
resources = set(psums_by_rp[rp_uuid].resources)
for psumres in psum.resources:
if psumres.resource_class in rcs_by_rp[rp_uuid]:
resources.add(psumres)
psums_by_rp[rp_uuid].resources = list(resources)
return areqs, list(psums_by_rp.values())
@base.VersionedObjectRegistry.register_if(False)
class AllocationCandidates(base.VersionedObject):
"""The AllocationCandidates object is a collection of possible allocations
@ -3797,7 +4057,7 @@ class AllocationCandidates(base.VersionedObject):
}
@classmethod
def get_by_requests(cls, context, requests, limit=None):
def get_by_requests(cls, context, requests, limit=None, group_policy=None):
"""Returns an AllocationCandidates object containing all resource
providers matching a set of supplied resource constraints, with a set
of allocation requests constructed from that list of resource
@ -3805,7 +4065,9 @@ class AllocationCandidates(base.VersionedObject):
(default is False) then the order of the allocation requests will
be randomized.
:param requests: List of nova.api.openstack.placement.util.RequestGroup
:param context: Nova RequestContext.
:param requests: Dict, keyed by suffix, of
nova.api.openstack.placement.util.RequestGroup
:param limit: An integer, N, representing the maximum number of
allocation candidates to return. If
CONF.placement.randomize_allocation_candidates is True
@ -3814,12 +4076,19 @@ class AllocationCandidates(base.VersionedObject):
order the database picked them, will be returned. In
either case if there are fewer than N total results,
all the results will be returned.
:param group_policy: String indicating how RequestGroups with
use_same_provider=True should interact with each
other. If the value is "isolate", we will filter
out allocation requests where any such
RequestGroups are satisfied by the same RP.
:return: An instance of AllocationCandidates with allocation_requests
and provider_summaries satisfying `requests`, limited
according to `limit`.
"""
_ensure_rc_cache(context)
_ensure_trait_sync(context)
alloc_reqs, provider_summaries = cls._get_by_requests(context,
requests,
limit)
alloc_reqs, provider_summaries = cls._get_by_requests(
context, requests, limit=limit, group_policy=group_policy)
return cls(
context,
allocation_requests=alloc_reqs,
@ -3827,36 +4096,29 @@ class AllocationCandidates(base.VersionedObject):
)
@staticmethod
# TODO(efried): This is only a writer context because it accesses the
# resource_providers table via ResourceProvider.get_by_uuid, which does
# data migration to populate the root_provider_uuid. Change this back to a
# reader when that migration is no longer happening.
@db_api.api_context_manager.writer
def _get_by_requests(context, requests, limit=None):
# We first get the list of "root providers" that either have the
# requested resources or are associated with the providers that
# share one or more of the requested resource(s)
# TODO(efried): Handle non-sharing groups.
# For now, this extracts just the sharing group's resources & traits.
sharing_groups = [request_group for request_group in requests
if not request_group.use_same_provider]
if len(sharing_groups) != 1 or not sharing_groups[0].resources:
raise ValueError(_("The requests parameter must contain one "
"RequestGroup with use_same_provider=False and "
"nonempty resources."))
def _get_by_one_request(context, request):
"""Get allocation candidates for one RequestGroup.
Must be called from within an api_context_manager.reader (or writer)
context.
:param context: Nova RequestContext.
:param request: One nova.api.openstack.placement.util.RequestGroup
:return: A tuple of (allocation_requests, provider_summaries)
satisfying `request`.
"""
# Transform resource string names to internal integer IDs
resources = {
_RC_CACHE.id_from_string(key): value
for key, value in sharing_groups[0].resources.items()
for key, value in request.resources.items()
}
# maps the trait name to the trait internal ID
required_trait_map = {}
forbidden_trait_map = {}
for trait_map, traits in (
(required_trait_map, sharing_groups[0].required_traits),
(forbidden_trait_map, sharing_groups[0].forbidden_traits)):
(required_trait_map, request.required_traits),
(forbidden_trait_map, request.forbidden_traits)):
if traits:
trait_map.update(_trait_ids_from_names(context, traits))
# Double-check that we found a trait ID for each requested name
@ -3866,66 +4128,100 @@ class AllocationCandidates(base.VersionedObject):
# Microversions prior to 1.21 will not have 'member_of' in the groups.
# This allows earlier microversions to continue to work.
member_of = ""
if hasattr(sharing_groups[0], "member_of"):
member_of = sharing_groups[0].member_of
member_of = getattr(request, "member_of", "")
# Contains a set of resource provider IDs that share some inventory for
# each resource class requested. We do this here as an optimization. If
# we have no sharing providers, the SQL to find matching providers for
# the requested resources is much simpler.
# TODO(jaypipes): Consider caching this for some amount of time since
# sharing providers generally don't change often and here we aren't
# concerned with how *much* inventory/capacity the sharing provider
# has, only that it is sharing *some* inventory of a particular
# resource class.
sharing_providers = {
rc_id: _get_providers_with_shared_capacity(context, rc_id, amount)
for rc_id, amount in resources.items()
}
have_sharing = any(sharing_providers.values())
if not have_sharing:
# We know there's no sharing providers, so we can more efficiently
# get a list of resource provider IDs that have ALL the requested
# resources and more efficiently construct the allocation requests
# NOTE(jaypipes): When we start handling nested providers, we may
# add new code paths or modify this code path to return root
# provider IDs of provider trees instead of the resource provider
# IDs.
rp_ids = _get_provider_ids_matching(context, resources,
required_trait_map,
forbidden_trait_map,
member_of)
alloc_request_objs, summary_objs = (
_alloc_candidates_single_provider(context, resources, rp_ids))
else:
if required_trait_map:
# TODO(cdent): Now that there is also a forbidden_trait_map
# it should be possible to further optimize this attempt at
# a quick return, but we leave that to future patches for now.
trait_rps = _get_provider_ids_having_any_trait(
context, required_trait_map)
if not trait_rps:
# If there aren't any providers that have any of the
# required traits, just exit early...
return [], []
if not request.use_same_provider:
# TODO(jaypipes): The check/callout to handle trees goes here.
# Build a dict, keyed by resource class internal ID, of lists of
# internal IDs of resource providers that share some inventory for
# each resource class requested.
# TODO(jaypipes): Consider caching this for some amount of time
# since sharing providers generally don't change often and here we
# aren't concerned with how *much* inventory/capacity the sharing
# provider has, only that it is sharing *some* inventory of a
# particular resource class.
sharing_providers = {
rc_id: _get_providers_with_shared_capacity(context, rc_id,
amount)
for rc_id, amount in resources.items()
}
# We check this here as an optimization: if we have no sharing
# providers, we fall through to the (simpler, more efficient)
# algorithm below.
if any(sharing_providers.values()):
# Okay, we have to do it the hard way: the request may be
# satisfied by one or more sharing providers as well as (maybe)
# the non-sharing provider.
if required_trait_map:
# TODO(cdent): Now that there is also a forbidden_trait_map
# it should be possible to further optimize this attempt at
# a quick return, but we leave that to future patches for
# now.
trait_rps = _get_provider_ids_having_any_trait(
context, required_trait_map)
if not trait_rps:
# If there aren't any providers that have any of the
# required traits, just exit early...
return [], []
# rp_ids contains a list of resource provider IDs that EITHER have
# all the requested resources themselves OR have some resources
# and are related to a provider that is sharing some resources
# with it. In other words, this is the list of resource provider
# IDs that are NOT sharing resources.
rps = _get_all_with_shared(context, resources, member_of)
rp_ids = set([r[0] for r in rps])
alloc_request_objs, summary_objs = _alloc_candidates_with_shared(
context, resources, required_trait_map, forbidden_trait_map,
rp_ids, sharing_providers)
# rp_ids contains a list of resource provider IDs that EITHER
# have all the requested resources themselves OR have some
# resources and are related to a provider that is sharing some
# resources with it. In other words, this is the list of
# resource provider IDs that are NOT sharing resources.
rps = _get_all_with_shared(context, resources, member_of)
rp_ids = set([r[0] for r in rps])
return _alloc_candidates_with_shared(
context, resources, required_trait_map,
forbidden_trait_map, rp_ids, sharing_providers)
# Either we are processing a single-RP request group, or there are no
# sharing providers that (help) satisfy the request. Get a list of
# resource provider IDs that have ALL the requested resources and more
# efficiently construct the allocation requests.
# NOTE(jaypipes): When we start handling nested providers, we may
# add new code paths or modify this code path to return root
# provider IDs of provider trees instead of the resource provider
# IDs.
rp_ids = _get_provider_ids_matching(context, resources,
required_trait_map,
forbidden_trait_map, member_of)
return _alloc_candidates_single_provider(context, resources, rp_ids)
@classmethod
# TODO(efried): This is only a writer context because it accesses the
# resource_providers table via ResourceProvider.get_by_uuid, which does
# data migration to populate the root_provider_uuid. Change this back to a
# reader when that migration is no longer happening.
@db_api.api_context_manager.writer
def _get_by_requests(cls, context, requests, limit=None,
group_policy=None):
candidates = {}
for suffix, request in requests.items():
alloc_reqs, summaries = cls._get_by_one_request(context, request)
if not alloc_reqs:
# Shortcut: If any one request resulted in no candidates, the
# whole operation is shot.
return [], []
# Mark each allocation request according to whether its
# corresponding RequestGroup required it to be restricted to a
# single provider. We'll need this later to evaluate group_policy.
for areq in alloc_reqs:
areq.use_same_provider = request.use_same_provider
candidates[suffix] = alloc_reqs, summaries
# At this point, each (alloc_requests, summary_obj) in `candidates` is
# independent of the others. We need to fold them together such that
# each allocation request satisfies *all* the incoming `requests`. The
# `candidates` dict is guaranteed to contain entries for all suffixes,
# or we would have short-circuited above.
alloc_request_objs, summary_objs = _merge_candidates(
candidates, group_policy=group_policy)
# Limit the number of allocation request objects. We do this after
# creating all of them so that we can do a random slice without
# needing to mess with the complex sql above or add additional
# columns to the DB.
if limit and limit <= len(alloc_request_objs):
if CONF.placement.randomize_allocation_candidates:
alloc_request_objs = random.sample(alloc_request_objs, limit)

View File

@ -292,3 +292,34 @@ the resource providers that are associated with BOTH agg1 and agg2. Issuing a
request for ``GET /resource_providers?member_of=in:agg1,agg2&member_of=agg3``
means get the resource providers that are associated with agg3 and are also
associated with *any of* (agg1, agg2).
1.25 Granular resource requests to ``GET /allocation_candidates``
-----------------------------------------------------------------
``GET /allocation_candidates`` is enhanced to accept numbered groupings of
resource, required/forbidden trait, and aggregate association requests. A
``resources`` query parameter key with a positive integer suffix (e.g.
``resources42``) will be logically associated with ``required`` and/or
``member_of`` query parameter keys with the same suffix (e.g. ``required42``,
``member_of42``). The resources, required/forbidden traits, and aggregate
associations in that group will be satisfied by the same resource provider in
the response. When more than one numbered grouping is supplied, the
``group_policy`` query parameter is required to indicate how the groups should
interact. With ``group_policy=none``, separate groupings - numbered or
unnumbered - may or may not be satisfied by the same provider. With
``group_policy=isolate``, numbered groups are guaranteed to be satisfied by
*different* providers - though there may still be overlap with the unnumbered
group. In all cases, each ``allocation_request`` will be satisfied by providers
in a single non-sharing provider tree and/or sharing providers associated via
aggregate with any of the providers in that tree.
The ``required`` and ``member_of`` query parameters for a given group are
optional. That is, you may specify ``resources42=XXX`` without a corresponding
``required42=YYY`` or ``member_of42=ZZZ``. However, the reverse (specifying
``required42=YYY`` or ``member_of42=ZZZ`` without ``resources42=XXX``) will
result in an error.
The semantic of the (unnumbered) ``resources``, ``required``, and ``member_of``
query parameters is unchanged: the resources, traits, and aggregate
associations specified thereby may be satisfied by any provider in the same
non-sharing tree or associated via the specified aggregate(s).

View File

@ -52,3 +52,27 @@ GET_SCHEMA_1_21 = copy.deepcopy(GET_SCHEMA_1_17)
GET_SCHEMA_1_21['properties']['member_of'] = {
"type": ["string"]
}
GET_SCHEMA_1_25 = copy.deepcopy(GET_SCHEMA_1_21)
# We're going to *replace* 'resources', 'required', and 'member_of'.
del GET_SCHEMA_1_25["properties"]["resources"]
del GET_SCHEMA_1_25["required"]
del GET_SCHEMA_1_25["properties"]["required"]
del GET_SCHEMA_1_25["properties"]["member_of"]
# Pattern property key format for a numbered or un-numbered grouping
_GROUP_PAT_FMT = "^%s([1-9][0-9]*)?$"
GET_SCHEMA_1_25["patternProperties"] = {
_GROUP_PAT_FMT % "resources": {
"type": "string",
},
_GROUP_PAT_FMT % "required": {
"type": "string",
},
_GROUP_PAT_FMT % "member_of": {
"type": "string",
},
}
GET_SCHEMA_1_25["properties"]["group_policy"] = {
"type": "string",
"enum": ["none", "isolate"],
}

View File

@ -426,7 +426,8 @@ def parse_qs_request_groups(req):
are only processed if ``allow_forbidden`` is True. This allows the
caller to control processing based on microversion handling.
The return is a list of these RequestGroup instances.
The return is a dict, keyed by the numeric suffix of these RequestGroup
instances (or the empty string for the unnumbered group).
As an example, if qsdict represents the query string:
@ -440,42 +441,43 @@ def parse_qs_request_groups(req):
...the return value will be:
[ RequestGroup(
use_same_provider=False,
resources={
"VCPU": 2,
"MEMORY_MB": 1024,
"DISK_GB" 50,
},
required_traits=[
"HW_CPU_X86_VMX",
"CUSTOM_STORAGE_RAID",
],
member_of=[
9323b2b1-82c9-4e91-bdff-e95e808ef954,
8592a199-7d73-4465-8df6-ab00a6243c82,
],
),
RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 2,
},
required_traits=[
"CUSTOM_PHYSNET_PUBLIC",
"CUSTOM_SWITCH_A",
],
),
RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 1,
},
forbidden_traits=[
"CUSTOM_PHYSNET_PUBLIC",
],
),
]
{ '': RequestGroup(
use_same_provider=False,
resources={
"VCPU": 2,
"MEMORY_MB": 1024,
"DISK_GB" 50,
},
required_traits=[
"HW_CPU_X86_VMX",
"CUSTOM_STORAGE_RAID",
],
member_of=[
[9323b2b1-82c9-4e91-bdff-e95e808ef954],
[8592a199-7d73-4465-8df6-ab00a6243c82,
ddbd9226-d6a6-475e-a85f-0609914dd058],
],
),
'1': RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 2,
},
required_traits=[
"CUSTOM_PHYSNET_PUBLIC",
"CUSTOM_SWITCH_A",
],
),
'2': RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 1,
},
forbidden_traits=[
"CUSTOM_PHYSNET_PUBLIC",
],
),
}
:param req: webob.Request object
:return: A list of RequestGroup instances.
@ -533,8 +535,18 @@ def parse_qs_request_groups(req):
if orphans:
msg = _('All member_of parameters must be associated with '
'resources. Found the following orphaned member_of '
' values: %s')
'keys: %s')
raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans))
# All request groups must have resources (which is almost, but not quite,
# verified by the orphan checks above).
if not all(grp.resources for grp in by_suffix.values()):
msg = _("All request groups must specify resources.")
raise webob.exc.HTTPBadRequest(msg)
# The above would still pass if there were no request groups
if not by_suffix:
msg = _("At least one request group (`resources` or `resources{N}`) "
"is required.")
raise webob.exc.HTTPBadRequest(msg)
# Make adjustments for forbidden traits by stripping forbidden out
# of required.
@ -555,6 +567,4 @@ def parse_qs_request_groups(req):
'following traits keys: %s')
raise webob.exc.HTTPBadRequest(msg % ', '.join(conflicting_traits))
# NOTE(efried): The sorting is not necessary for the API, but it makes
# testing easier.
return [by_suffix[suff] for suff in sorted(by_suffix)]
return by_suffix

View File

@ -264,9 +264,9 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
def _get_allocation_candidates(self, requests=None, limit=None):
if requests is None:
requests = [placement_lib.RequestGroup(
requests = {'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources)]
resources=self.requested_resources)}
return rp_obj.AllocationCandidates.get_by_requests(self.ctx, requests,
limit)
@ -363,18 +363,11 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(expected, observed)
def test_no_resources_in_first_request_group(self):
requests = [placement_lib.RequestGroup(use_same_provider=False,
resources={})]
self.assertRaises(ValueError,
rp_obj.AllocationCandidates.get_by_requests,
self.ctx, requests)
def test_unknown_traits(self):
missing = set(['UNKNOWN_TRAIT'])
requests = [placement_lib.RequestGroup(
requests = {'': placement_lib.RequestGroup(
use_same_provider=False, resources=self.requested_resources,
required_traits=missing)]
required_traits=missing)}
self.assertRaises(exception.TraitNotFound,
rp_obj.AllocationCandidates.get_by_requests,
self.ctx, requests)
@ -389,13 +382,13 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048)
tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
fields.ResourceClass.VCPU: 1
}
)]
)}
)
expected = [
@ -476,12 +469,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Now let's add traits into the mix. Currently, none of the compute
# nodes has the AVX2 trait associated with it, so we should get 0
# results if we required AVX2
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
)],
)},
)
self._validate_allocation_requests([], alloc_cands)
@ -489,12 +482,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# get back just that compute node in the provider summaries
tb.set_traits(cn2, 'HW_CPU_X86_AVX2')
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2])
)],
)},
)
# Only cn2 should be in our allocation requests now since that's the
# only one with the required trait
@ -522,12 +515,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
self._validate_provider_summary_traits(expected, alloc_cands)
# Confirm that forbidden traits changes the results to get cn1.
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
forbidden_traits=set([os_traits.HW_CPU_X86_AVX2])
)],
)},
)
expected = [
[('cn1', fields.ResourceClass.VCPU, 1),
@ -663,12 +656,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# #1705071, this resulted in a KeyError
alloc_cands = self._get_allocation_candidates(
requests=[placement_lib.RequestGroup(
requests={'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
}
)]
)}
)
# We should only have provider summary information for the sharing
@ -693,12 +686,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Now we're going to add a set of required traits into the request mix.
# To start off, let's request a required trait that we know has not
# been associated yet with any provider, and ensure we get no results
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)]
)}
)
# We have not yet associated the AVX2 trait to any provider, so we
@ -713,12 +706,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
cn1.set_traits([avx2_t])
cn2.set_traits([avx2_t])
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)]
)}
)
# There should be 2 compute node providers and 1 shared storage
@ -749,12 +742,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
self._validate_provider_summary_traits(expected, alloc_cands)
# Forbid the AVX2 trait
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
forbidden_traits=set([os_traits.HW_CPU_X86_AVX2]),
)]
)}
)
# Should be no results as both cn1 and cn2 have the trait.
expected = []
@ -763,13 +756,13 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Require the AVX2 trait but forbid CUSTOM_EXTRA_FASTER, which is
# added to cn2
tb.set_traits(cn2, 'CUSTOM_EXTRA_FASTER')
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
forbidden_traits=set(['CUSTOM_EXTRA_FASTER']),
)]
)}
)
expected = [
[('cn1', fields.ResourceClass.VCPU, 1),
@ -782,13 +775,13 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# This should result in getting only cn1.
tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2048,
allocation_ratio=1.5)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
forbidden_traits=set(['MISC_SHARES_VIA_AGGREGATE']),
)]
)}
)
expected = [
[('cn1', fields.ResourceClass.VCPU, 1),
@ -843,8 +836,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
}
alloc_cands = self._get_allocation_candidates(
requests=[placement_lib.RequestGroup(
use_same_provider=False, resources=requested_resources)])
requests={'': placement_lib.RequestGroup(
use_same_provider=False, resources=requested_resources)})
# Verify the allocation requests that are returned. There should be 2
# allocation requests, one for each compute node, containing 3
@ -946,12 +939,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Now we're going to add a set of required traits into the request mix.
# To start off, let's request a required trait that we know has not
# been associated yet with any provider, and ensure we get no results
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)]
)}
)
# We have not yet associated the AVX2 trait to any provider, so we
@ -967,12 +960,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
for cn in (cn1, cn2, cn3):
tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2)
alloc_cands = self._get_allocation_candidates(requests=[
alloc_cands = self._get_allocation_candidates(requests={'':
placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([os_traits.HW_CPU_X86_AVX2]),
)]
)}
)
# There should be 3 compute node providers and 1 shared storage
@ -1015,14 +1008,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(cn3, os_traits.HW_CPU_X86_AVX2,
os_traits.STORAGE_DISK_SSD)
alloc_cands = self._get_allocation_candidates([
alloc_cands = self._get_allocation_candidates(
{'':
placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set([
os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD
]),
)]
)}
)
# There should be only cn3 in the returned allocation candidates
@ -1103,13 +1097,13 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# The shared storage's disk is RAID
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_RAID')
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources=self.requested_resources,
required_traits=set(['HW_CPU_X86_SSE', 'STORAGE_DISK_SSD',
'CUSTOM_RAID'])
)]
)}
)
# TODO(efried): Bug #1724633: we'd *like* to get no candidates, because
@ -1128,7 +1122,10 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
'cn': set([
(fields.ResourceClass.VCPU, 24, 0),
(fields.ResourceClass.MEMORY_MB, 2048, 0),
(fields.ResourceClass.DISK_GB, 1600, 0),
# NOTE(efried): We don't (yet) populate provider summaries with
# provider resources that aren't part of the result. With
# blueprint placement-return-all-requests, uncomment this line:
# (fields.ResourceClass.DISK_GB, 1600, 0),
]),
'ss': set([
(fields.ResourceClass.DISK_GB, 1600, 0),
@ -1143,15 +1140,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16)
tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)]
)}
)
expected = [
@ -1179,14 +1176,14 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'DISK_GB': 1500,
}
)]
)}
)
expected = [
@ -1215,15 +1212,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)]
)}
)
expected = [
@ -1255,15 +1252,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates(requests=[
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(requests={
'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'IPV4_ADDRESS': 2,
'SRIOV_NET_VF': 1,
'DISK_GB': 1500,
}
)]
)}
)
# We expect two candidates: one that gets all the resources from ss1;
@ -1311,14 +1308,14 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
}
)]
)}
)
expected = [
[('cn1', fields.ResourceClass.VCPU, 2),
@ -1370,15 +1367,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE")
tb.add_inventory(ss3, fields.ResourceClass.IPV4_ADDRESS, 24)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'DISK_GB': 1500,
'IPV4_ADDRESS': 2,
}
)]
)}
)
expected = [
@ -1613,15 +1610,15 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE)
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
fields.ResourceClass.VCPU: 2,
fields.ResourceClass.MEMORY_MB: 256,
fields.ResourceClass.SRIOV_NET_VF: 1,
}
)]
)}
)
# TODO(jaypipes): This should be the following once nested providers
@ -1671,8 +1668,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# Now add required traits to the mix and verify we still get the same
# result (since we haven't yet consumed the second physical function's
# inventory of SRIOV_NET_VF.
alloc_cands = self._get_allocation_candidates([
placement_lib.RequestGroup(
alloc_cands = self._get_allocation_candidates(
{'': placement_lib.RequestGroup(
use_same_provider=False,
resources={
fields.ResourceClass.VCPU: 2,
@ -1680,7 +1677,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
fields.ResourceClass.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
)]
)}
)
# TODO(jaypipes): This should be the following once nested providers
@ -1730,7 +1727,8 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# function with the required trait no longer has any inventory.
tb.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
alloc_cands = self._get_allocation_candidates([
alloc_cands = self._get_allocation_candidates(
{'':
placement_lib.RequestGroup(
use_same_provider=False,
resources={
@ -1739,7 +1737,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
fields.ResourceClass.SRIOV_NET_VF: 1,
},
required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE],
)]
)}
)
self._validate_allocation_requests([], alloc_cands)

View File

@ -17,11 +17,13 @@ from oslo_middleware import cors
from oslo_utils import uuidutils
from nova.api.openstack.placement import deploy
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova import conf
from nova import config
from nova import context
from nova.tests import fixtures
from nova.tests import uuidsentinel as uuids
CONF = conf.CONF
@ -390,3 +392,125 @@ class CORSFixture(APIFixture):
# wants to load the CORS middleware, it will not.
self.conf.set_override('allowed_origin', 'http://valid.example.com',
group='cors')
# TODO(efried): Common with test_allocation_candidates
def _add_inventory(rp, rc, total, **kwargs):
kwargs.setdefault('max_unit', total)
inv = rp_obj.Inventory(rp._context, resource_provider=rp,
resource_class=rc, total=total, **kwargs)
inv.obj_set_defaults()
rp.add_inventory(inv)
# TODO(efried): Common with test_allocation_candidates
def _set_traits(rp, *traits):
tlist = []
for tname in traits:
try:
trait = rp_obj.Trait.get_by_name(rp._context, tname)
except exception.TraitNotFound:
trait = rp_obj.Trait(rp._context, name=tname)
trait.create()
tlist.append(trait)
rp.set_traits(rp_obj.TraitList(objects=tlist))
class GranularFixture(APIFixture):
"""An APIFixture that sets up the following provider environment for
testing granular resource requests.
+========================++========================++========================+
|cn_left ||cn_middle ||cn_right |
|VCPU: 8 ||VCPU: 8 ||VCPU: 8 |
|MEMORY_MB: 4096 ||MEMORY_MB: 4096 ||MEMORY_MB: 4096 |
|DISK_GB: 500 ||SRIOV_NET_VF: 8 ||DISK_GB: 500 |
|VGPU: 8 ||CUSTOM_NET_MBPS: 4000 ||VGPU: 8 |
|SRIOV_NET_VF: 8 ||traits: HW_CPU_X86_AVX, || - max_unit: 2 |
|CUSTOM_NET_MBPS: 4000 || HW_CPU_X86_AVX2,||traits: HW_CPU_X86_MMX, |
|traits: HW_CPU_X86_AVX, || HW_CPU_X86_SSE, || HW_GPU_API_DXVA,|
| HW_CPU_X86_AVX2,|| HW_NIC_ACCEL_TLS|| CUSTOM_DISK_SSD,|
| HW_GPU_API_DXVA,|+=+=====+================++==+========+============+
| HW_NIC_DCB_PFC, | : : : : a
| CUSTOM_FOO +..+ +--------------------+ : g
+========================+ : a : : g
: g : : C
+========================+ : g : +===============+======+
|shr_disk_1 | : A : |shr_net |
|DISK_GB: 1000 +..+ : |SRIOV_NET_VF: 16 |
|traits: CUSTOM_DISK_SSD,| : : a |CUSTOM_NET_MBPS: 40000|
| MISC_SHARES_VIA_AGG...| : : g |traits: MISC_SHARES...|
+========================+ : : g +======================+
+=======================+ : : B
|shr_disk_2 +...+ :
|DISK_GB: 1000 | :
|traits: MISC_SHARES... +.........+
+=======================+
"""
def _create_provider(self, name, *aggs, **kwargs):
# TODO(efried): Common with test_allocation_candidates.ProviderDBBase
parent = kwargs.get('parent')
rp = rp_obj.ResourceProvider(self.ctx, name=name,
uuid=getattr(uuids, name))
if parent:
rp.parent_provider_uuid = parent
rp.create()
if aggs:
rp.set_aggregates(aggs)
return rp
def start_fixture(self):
super(GranularFixture, self).start_fixture()
self.ctx = context.get_admin_context()
rp_obj.ResourceClass(context=self.ctx, name='CUSTOM_NET_MBPS').create()
os.environ['AGGA'] = uuids.aggA
os.environ['AGGB'] = uuids.aggB
os.environ['AGGC'] = uuids.aggC
cn_left = self._create_provider('cn_left', uuids.aggA)
os.environ['CN_LEFT'] = cn_left.uuid
_add_inventory(cn_left, 'VCPU', 8)
_add_inventory(cn_left, 'MEMORY_MB', 4096)
_add_inventory(cn_left, 'DISK_GB', 500)
_add_inventory(cn_left, 'VGPU', 8)
_add_inventory(cn_left, 'SRIOV_NET_VF', 8)
_add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000)
_set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO')
cn_middle = self._create_provider('cn_middle', uuids.aggA, uuids.aggB)
os.environ['CN_MIDDLE'] = cn_middle.uuid
_add_inventory(cn_middle, 'VCPU', 8)
_add_inventory(cn_middle, 'MEMORY_MB', 4096)
_add_inventory(cn_middle, 'SRIOV_NET_VF', 8)
_add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000)
_set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2',
'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS')
cn_right = self._create_provider('cn_right', uuids.aggB, uuids.aggC)
os.environ['CN_RIGHT'] = cn_right.uuid
_add_inventory(cn_right, 'VCPU', 8)
_add_inventory(cn_right, 'MEMORY_MB', 4096)
_add_inventory(cn_right, 'DISK_GB', 500)
_add_inventory(cn_right, 'VGPU', 8, max_unit=2)
_set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA',
'CUSTOM_DISK_SSD')
shr_disk_1 = self._create_provider('shr_disk_1', uuids.aggA)
os.