From 8395e3f099022d8506ed5e6624582ec03e46c3b9 Mon Sep 17 00:00:00 2001 From: Tetsuro Nakamura Date: Sun, 2 Jun 2019 15:29:03 +0000 Subject: [PATCH] Support `same_subtree` queryparam A new same_subtree query parameter will be accepted. The value is a comma-separated list of request group suffix strings $S. Each must exactly match a suffix on a granular group somewhere else in the request. Importantly, the identified request groups need not have a resources$S. If this is provided, at least one of the resource providers satisfying the specified request group must be an ancestor of the rest. The same_subtree query parameter can be repeated and each repeat group is treated independently. Co-Authored-By: Chris Dent Change-Id: I7fdeac24606359d37f1a7405d22c5797840e1a9e Story: 2005575 Task: 30784 --- api-ref/source/allocation_candidates.inc | 1 + api-ref/source/parameters.yaml | 13 + placement/errors.py | 1 + placement/handlers/allocation_candidate.py | 6 +- placement/lib.py | 82 ++++- placement/microversion.py | 2 + placement/objects/allocation_candidate.py | 82 ++++- placement/objects/research_context.py | 35 ++ placement/rest_api_version_history.rst | 14 + placement/schemas/allocation_candidate.py | 6 + .../tests/functional/fixtures/gabbits.py | 2 +- .../gabbits/granular-same-subtree.yaml | 313 ++++++++++++++++++ .../functional/gabbits/microversion.yaml | 4 +- .../unit/objects/test_allocation_candidate.py | 42 +++ placement/tests/unit/test_util.py | 3 +- ...ndidate-same_subtree-aeed7b2570293dfb.yaml | 11 + 16 files changed, 594 insertions(+), 23 deletions(-) create mode 100644 placement/tests/functional/gabbits/granular-same-subtree.yaml create mode 100644 releasenotes/notes/allocation-candidate-same_subtree-aeed7b2570293dfb.yaml diff --git a/api-ref/source/allocation_candidates.inc b/api-ref/source/allocation_candidates.inc index 1505f448f..648174455 100644 --- a/api-ref/source/allocation_candidates.inc +++ b/api-ref/source/allocation_candidates.inc @@ -43,6 +43,7 @@ Request - group_policy: allocation_candidates_group_policy - limit: allocation_candidates_limit - root_required: allocation_candidates_root_required + - same_subtree: allocation_candidates_same_subtree Response (microversions 1.12 - ) -------------------------------- diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index c7ec126c9..cfef117f8 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -188,6 +188,19 @@ allocation_candidates_root_required: (non-sharing) tree's root provider satisfies the specified trait requirements. Traits which are forbidden (must **not** be present on the root provider) are expressed by prefixing the trait with a ``!``. +allocation_candidates_same_subtree: + type: string + in: query + required: false + min_version: 1.36 + description: | + A comma-separated list of request group suffix strings ($S). Each must + exactly match a suffix on a granular group somewhere else in the request. + Importantly, the identified request groups need not have a resources[$S]. + If this is provided, at least one of the resource providers satisfying the + specified request group must be an ancestor of the rest. + The ``same_subtree`` query parameter can be repeated and each repeat group + is treated independently. project_id: &project_id type: string in: query diff --git a/placement/errors.py b/placement/errors.py index 8925a1c0f..14d55c80a 100644 --- a/placement/errors.py +++ b/placement/errors.py @@ -49,3 +49,4 @@ RESOURCE_PROVIDER_NOT_FOUND = 'placement.resource_provider.not_found' ILLEGAL_DUPLICATE_QUERYPARAM = 'placement.query.duplicate_key' # Failure of a post-schema value check QUERYPARAM_BAD_VALUE = 'placement.query.bad_value' +QUERYPARAM_MISSING_VALUE = 'placement.query.missing_value' diff --git a/placement/handlers/allocation_candidate.py b/placement/handlers/allocation_candidate.py index cb0d1eb6f..0ee813a64 100644 --- a/placement/handlers/allocation_candidate.py +++ b/placement/handlers/allocation_candidate.py @@ -251,7 +251,9 @@ def list_allocation_candidates(req): context.can(policies.LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] get_schema = schema.GET_SCHEMA_1_10 - if want_version.matches((1, 35)): + if want_version.matches((1, 36)): + get_schema = schema.GET_SCHEMA_1_36 + elif want_version.matches((1, 35)): get_schema = schema.GET_SCHEMA_1_35 elif want_version.matches((1, 33)): get_schema = schema.GET_SCHEMA_1_33 @@ -267,8 +269,8 @@ def list_allocation_candidates(req): get_schema = schema.GET_SCHEMA_1_16 util.validate_query_params(req, get_schema) - groups = lib.RequestGroup.dict_from_request(req) rqparams = lib.RequestWideParams.from_request(req) + groups = lib.RequestGroup.dict_from_request(req, rqparams) if not rqparams.group_policy: # group_policy is required if more than one numbered request group was diff --git a/placement/lib.py b/placement/lib.py index 8e119b0de..6405b9284 100644 --- a/placement/lib.py +++ b/placement/lib.py @@ -38,6 +38,11 @@ _QS_KEY_PATTERN_1_33 = re.compile( (_QS_RESOURCES, _QS_REQUIRED, _QS_MEMBER_OF, _QS_IN_TREE)), common.GROUP_PAT_1_33)) +# In newer microversion we no longer check for orphaned member_of +# and required because "providers providing no inventory to this +# request" are now legit with `same_subtree` queryparam accompanied. +SAME_SUBTREE_VERSION = (1, 36) + def _fix_one_forbidden(traits): forbidden = [trait for trait in traits if trait.startswith('!')] @@ -126,6 +131,37 @@ class RequestGroup(object): val) return ret + @staticmethod + def _check_for_one_resources(by_suffix, resourceless_suffixes): + if len(resourceless_suffixes) == len(by_suffix): + msg = ('There must be at least one resources or resources[$S] ' + 'parameter.') + raise webob.exc.HTTPBadRequest( + msg, comment=errors.QUERYPARAM_MISSING_VALUE) + + @staticmethod + def _check_resourceless_suffix(subtree_suffixes, resourceless_suffixes): + bad_suffixes = [suffix for suffix in resourceless_suffixes + if suffix not in subtree_suffixes] + if bad_suffixes: + msg = ("Resourceless suffixed group request should be specified " + "in `same_subtree` query param: bad group(s) - " + "%(suffixes)s.") % {'suffixes': bad_suffixes} + raise webob.exc.HTTPBadRequest( + msg, comment=errors.QUERYPARAM_BAD_VALUE) + + @staticmethod + def _check_actual_suffix(subtree_suffixes, by_suffix): + bad_suffixes = [suffix for suffix in subtree_suffixes + if suffix not in by_suffix] + if bad_suffixes: + msg = ("Real suffixes should be specified in `same_subtree`: " + "%(bad_suffixes)s not found in %(suffixes)s.") % { + 'bad_suffixes': bad_suffixes, + 'suffixes': list(by_suffix.keys())} + raise webob.exc.HTTPBadRequest( + msg, comment=errors.QUERYPARAM_BAD_VALUE) + @staticmethod def _check_for_orphans(by_suffix): # Ensure any group with 'required' or 'member_of' also has 'resources'. @@ -174,7 +210,7 @@ class RequestGroup(object): msg % ', '.join(conflicting_traits)) @classmethod - def dict_from_request(cls, req): + def dict_from_request(cls, req, rqparams): """Parse suffixed resources, traits, and member_of groupings out of a querystring dict found in a webob Request. @@ -257,9 +293,11 @@ class RequestGroup(object): } :param req: webob.Request object + :param rqparams: RequestWideParams object :return: A dict, keyed by suffix, of RequestGroup instances. - :raises `webob.exc.HTTPBadRequest` if any value is malformed, or if a - trait list is given without corresponding resources. + :raises `webob.exc.HTTPBadRequest` if any value is malformed, or if + the suffix of a resourceless request is not in the + `rqparams.same_subtrees`. """ want_version = req.environ[microversion.MICROVERSION_ENVIRON] # Control whether we handle forbidden traits. @@ -270,7 +308,17 @@ class RequestGroup(object): by_suffix = cls._parse_request_items( req, allow_forbidden, verbose_suffix) - cls._check_for_orphans(by_suffix) + if want_version.matches(SAME_SUBTREE_VERSION): + resourceless_suffixes = set( + suffix for suffix, grp in by_suffix.items() + if not grp.resources) + subtree_suffixes = set().union(*rqparams.same_subtrees) + cls._check_for_one_resources(by_suffix, resourceless_suffixes) + cls._check_resourceless_suffix( + subtree_suffixes, resourceless_suffixes) + cls._check_actual_suffix(subtree_suffixes, by_suffix) + else: + cls._check_for_orphans(by_suffix) # Make adjustments for forbidden traits by stripping forbidden out # of required. @@ -287,7 +335,8 @@ class RequestWideParams(object): above). """ def __init__(self, limit=None, group_policy=None, - anchor_required_traits=None, anchor_forbidden_traits=None): + anchor_required_traits=None, anchor_forbidden_traits=None, + same_subtrees=None): """Create a RequestWideParams. :param limit: An integer, N, representing the maximum number of @@ -307,11 +356,18 @@ class RequestWideParams(object): :param anchor_forbidden_traits: Set of trait names which the anchor of each returned allocation candidate must NOT possess, regardless of any RequestGroup filters. + :param same_subtrees: A list of sets of request group suffix strings + where each set of strings represents the suffixes from one + same_subtree query param. If provided, all of the resource + providers satisfying the specified request groups must be + rooted at one of the resource providers satisfying the request + groups. """ self.limit = limit self.group_policy = group_policy self.anchor_required_traits = anchor_required_traits self.anchor_forbidden_traits = anchor_forbidden_traits + self.same_subtrees = same_subtrees or [] @classmethod def from_request(cls, req): @@ -347,8 +403,22 @@ class RequestWideParams(object): 'root_required: %s' % ', '.join(conflicts), comment=errors.QUERYPARAM_BAD_VALUE) + same_subtree = req.GET.getall('same_subtree') + # Construct a list of sets of request group suffixes strings. + same_subtrees = [] + if same_subtree: + for val in same_subtree: + suffixes = set(substr.strip() for substr in val.split(',')) + if '' in suffixes: + raise webob.exc.HTTPBadRequest( + 'Empty string (unsuffixed group) can not be specified ' + 'in `same_subtree` ', + comment=errors.QUERYPARAM_BAD_VALUE) + same_subtrees.append(suffixes) + return cls( limit=limit, group_policy=group_policy, anchor_required_traits=anchor_required_traits, - anchor_forbidden_traits=anchor_forbidden_traits) + anchor_forbidden_traits=anchor_forbidden_traits, + same_subtrees=same_subtrees) diff --git a/placement/microversion.py b/placement/microversion.py index eb05560cb..ff16cd428 100644 --- a/placement/microversion.py +++ b/placement/microversion.py @@ -86,6 +86,8 @@ VERSIONS = [ '1.34', # Include a mappings key in allocation requests that shows which # resource providers satisfied which request group suffix. '1.35', # Add a `root_required` queryparam on `GET /allocation_candidates` + '1.36', # Add a `same_subtree` parameter on GET /allocation_candidates + # and allow resourceless requests for groups in `same_subtree`. ] diff --git a/placement/objects/allocation_candidate.py b/placement/objects/allocation_candidate.py index d177150ca..d13457690 100644 --- a/placement/objects/allocation_candidate.py +++ b/placement/objects/allocation_candidate.py @@ -391,7 +391,7 @@ def _alloc_candidates_single_provider(rg_ctx, rw_ctx, rp_tuples): for rp_id, root_id in rp_tuples: rp_summary = summaries[rp_id] req_obj = _allocation_request_for_provider( - rg_ctx.context, rg_ctx.resources, rp_summary.resource_provider, + rg_ctx.resources, rp_summary.resource_provider, suffix=rg_ctx.suffix) # Exclude this if its anchor (which is its root) isn't in our # prefiltered list of anchors @@ -416,12 +416,10 @@ def _alloc_candidates_single_provider(rg_ctx, rw_ctx, rp_tuples): return alloc_requests, list(summaries.values()) -def _allocation_request_for_provider(ctx, requested_resources, provider, - suffix): +def _allocation_request_for_provider(requested_resources, provider, suffix): """Returns an AllocationRequest object containing AllocationRequestResource objects for each resource class in the supplied requested resources dict. - :param ctx: placement.context.RequestContext object :param requested_resources: dict, keyed by resource class ID, of amounts being requested for that resource class :param provider: ResourceProvider object representing the provider of the @@ -440,6 +438,8 @@ def _allocation_request_for_provider(ctx, requested_resources, provider, # anchor in its own tree. If the provider is a sharing provider, the # caller needs to identify the other anchors with which it might be # associated. + # NOTE(tetsuro): The AllocationRequest has empty resource_requests for a + # resourceless request. Still, it has the rp uuid in the mappings field. mappings = {suffix: set([provider.uuid])} return AllocationRequest( resource_requests=resource_requests, @@ -762,12 +762,16 @@ def _merge_candidates(candidates, rw_ctx): # ProviderSummaryResource. This will be used to do a final capacity # check/filter on each merged AllocationRequest. psum_res_by_rp_rc = {} + # A dict of parent uuids keyed by rp uuids + parent_uuid_by_rp_uuid = {} for suffix, (areqs, psums) in candidates.items(): for areq in areqs: anchor = areq.anchor_root_provider_uuid areq_lists_by_anchor[anchor][suffix].append(areq) for psum in psums: all_psums.append(psum) + parent_uuid_by_rp_uuid[psum.resource_provider.uuid] = ( + psum.resource_provider.parent_provider_uuid) for psum_res in psum.resources: key = _rp_rc_key( psum.resource_provider, psum_res.resource_class) @@ -810,6 +814,9 @@ def _merge_candidates(candidates, rw_ctx): if not _satisfies_group_policy( areq_list, rw_ctx.group_policy, num_granular_groups): continue + if not _satisfies_same_subtree( + areq_list, rw_ctx.same_subtrees, parent_uuid_by_rp_uuid): + continue # Now we go from this (where 'arr' is AllocationRequestResource): # [ areq__B(arrX, arrY, arrZ), # areq_1_A(arrM, arrN), @@ -890,13 +897,12 @@ def _satisfies_group_policy(areqs, group_policy, num_granular_groups): # The number of unique resource providers referenced in the request groups # having use_same_provider=True must be equal to the number of granular # groups. - num_granular_groups_in_areqs = len(set( - # We can reliably use the first resource_request's provider: all the - # resource_requests are satisfied by the same provider by definition - # because use_same_provider is True. - areq.resource_requests[0].resource_provider.uuid - for areq in areqs - if areq.use_same_provider)) + num_granular_groups_in_areqs = len(set().union(*( + # We can reliably use the first value of provider uuids in mappings: + # all the resource_requests are satisfied by the same provider + # by definition because use_same_provider is True. + list(areq.mappings.values())[0] for areq in areqs + if areq.use_same_provider))) if num_granular_groups == num_granular_groups_in_areqs: return True LOG.debug('Excluding the following set of AllocationRequest because ' @@ -905,3 +911,57 @@ def _satisfies_group_policy(areqs, group_policy, num_granular_groups): 'request (%d): %s', num_granular_groups_in_areqs, num_granular_groups, str(areqs)) return False + + +def _satisfies_same_subtree( + areqs, same_subtrees, parent_uuid_by_rp_uuid): + """Applies same_subtree policy to a list of AllocationRequest. + + :param areqs: A list containing one AllocationRequest for each input + RequestGroup. + :param same_subtrees: A list of sets of request group suffixes strings. + If provided, all of the resource providers satisfying the specified + request groups must be rooted at one of the resource providers + satisfying the request groups. + :param parent_uuid_by_rp_uuid: A dict of parent uuids keyed by rp uuids. + :return: True if areqs satisfies same_subtree policy; False otherwise. + """ + for same_subtree in same_subtrees: + # Collect RP uuids that must satisfy a single same_subtree constraint. + rp_uuids = set().union(*(areq.mappings.get(suffix) for areq in areqs + for suffix in same_subtree + if areq.mappings.get(suffix))) + if not _check_same_subtree(rp_uuids, parent_uuid_by_rp_uuid): + return False + return True + + +def _check_same_subtree(rp_uuids, parent_uuid_by_rp_uuid): + """Returns True if given rp uuids are all in the same subtree. + + Note: The rps are in the same subtree means all the providers are + rooted at one of the providers + """ + if len(rp_uuids) == 1: + return True + # A set of uuids of common ancestors of each rp in question + common_ancestors = set.intersection(*( + _get_ancestors_by_one_uuid(rp_uuid, parent_uuid_by_rp_uuid) + for rp_uuid in rp_uuids)) + # if any of the rp_uuid is in the common_ancestors set, then + # we know that, that rp_uuid is the root of the other rp_uuids + # in this same_subtree constraint. + return len(common_ancestors.intersection(rp_uuids)) != 0 + + +def _get_ancestors_by_one_uuid( + rp_uuid, parent_uuid_by_rp_uuid, ancestors=None): + """Returns a set of uuids of ancestors for a given rp uuid""" + if ancestors is None: + ancestors = set([rp_uuid]) + parent_uuid = parent_uuid_by_rp_uuid[rp_uuid] + if parent_uuid is None: + return ancestors + ancestors.add(parent_uuid) + return _get_ancestors_by_one_uuid( + parent_uuid, parent_uuid_by_rp_uuid, ancestors=ancestors) diff --git a/placement/objects/research_context.py b/placement/objects/research_context.py index ee69c7943..c56400c05 100644 --- a/placement/objects/research_context.py +++ b/placement/objects/research_context.py @@ -186,6 +186,7 @@ class RequestWideSearchContext(object): # IDs of root providers that conform to the requested filters. self.anchor_root_ids = None self._process_anchor_traits(rqparams) + self.same_subtrees = rqparams.same_subtrees def _process_anchor_traits(self, rqparams): """Set or filter self.anchor_root_ids according to anchor @@ -463,6 +464,33 @@ def get_providers_with_resource(ctx, rc_id, amount, tree_root_id=None): return res +@db_api.placement_context_manager.reader +def get_providers_with_root(ctx, allowed, forbidden): + """Returns a set of tuples of (provider ID, root provider ID) of given + resource providers + + :param ctx: Session context to use + :param allowed: resource provider ids to include + :param forbidden: resource provider ids to exclude + """ + # SELECT rp.id, rp.root_provider_id + # FROM resource_providers AS rp + # WHERE rp.id IN ($allowed) + # AND rp.id NOT IN ($forbidden) + sel = sa.select([_RP_TBL.c.id, _RP_TBL.c.root_provider_id]) + sel = sel.select_from(_RP_TBL) + cond = [] + if allowed: + cond.append(_RP_TBL.c.id.in_(allowed)) + if forbidden: + cond.append(~_RP_TBL.c.id.in_(forbidden)) + if cond: + sel = sel.where(sa.and_(*cond)) + res = ctx.session.execute(sel).fetchall() + res = set((r[0], r[1]) for r in res) + return res + + @db_api.placement_context_manager.reader def get_provider_ids_matching(rg_ctx): """Returns a list of tuples of (internal provider ID, root provider ID) @@ -537,6 +565,13 @@ def get_provider_ids_matching(rg_ctx): if not filtered_rps: return [] + if not rg_ctx.resources: + # NOTE(tetsuro): This does an extra sql query that could be avoided if + # all the smaller queries in get_provider_ids_for_traits_and_aggs() + # would return the internal ID and the root ID as well for each RP. + provs_with_resource = get_providers_with_root( + rg_ctx.context, filtered_rps, forbidden_rp_ids) + # provs_with_resource will contain a superset of providers with IDs still # in our filtered_rps set. We return the list of tuples of # (internal provider ID, root internal provider ID) diff --git a/placement/rest_api_version_history.rst b/placement/rest_api_version_history.rst index 818eb75cc..0e35fca10 100644 --- a/placement/rest_api_version_history.rst +++ b/placement/rest_api_version_history.rst @@ -651,3 +651,17 @@ format as the ``required`` query parameter. This restricts allocation requests in the response to only those whose (non-sharing) tree's root resource provider satisfies the specified trait requirements. See :ref:`filtering by root provider traits` for details. + +1.36 - Support 'same_subtree' queryparam on GET /allocation_candidates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: Train + +Add support for the ``same_subtree`` query parameter to the ``GET +/allocation_candidates`` API. It accepts a comma-separated list of request +group suffix strings $S. Each must exactly match a suffix on a granular group +somewhere else in the request. Importantly, the identified request groups need +not have a resources$S. If this is provided, at least one of the resource +providers satisfying the specified request group must be an ancestor of the +rest. The ``same_subtree`` query parameter can be repeated and each repeat +group is treated independently. diff --git a/placement/schemas/allocation_candidate.py b/placement/schemas/allocation_candidate.py index 5a6bcf7e4..4fd77f31f 100644 --- a/placement/schemas/allocation_candidate.py +++ b/placement/schemas/allocation_candidate.py @@ -96,3 +96,9 @@ GET_SCHEMA_1_35 = copy.deepcopy(GET_SCHEMA_1_33) GET_SCHEMA_1_35["properties"]['root_required'] = { "type": ["string"] } + +# Microversion 1.36 supports same_subtree. +GET_SCHEMA_1_36 = copy.deepcopy(GET_SCHEMA_1_35) +GET_SCHEMA_1_36["properties"]['same_subtree'] = { + "type": ["string"] +} diff --git a/placement/tests/functional/fixtures/gabbits.py b/placement/tests/functional/fixtures/gabbits.py index 560b0e62f..b473dde27 100644 --- a/placement/tests/functional/fixtures/gabbits.py +++ b/placement/tests/functional/fixtures/gabbits.py @@ -526,7 +526,7 @@ class NUMANetworkFixture(APIFixture): # TODO(efried): Use standard HW_NIC_ROOT trait tb.set_traits(nic, 'CUSTOM_HW_NIC_ROOT') nics.append(nic) - os.environ['NIC%d_UUID'] = nic.uuid + os.environ['NIC%s_UUID' % i] = nic.uuid # PFs for NIC1 for i in (1, 2): suf = '1_%d' % i diff --git a/placement/tests/functional/gabbits/granular-same-subtree.yaml b/placement/tests/functional/gabbits/granular-same-subtree.yaml new file mode 100644 index 000000000..afb178677 --- /dev/null +++ b/placement/tests/functional/gabbits/granular-same-subtree.yaml @@ -0,0 +1,313 @@ +# Tests of /allocation_candidates API with same_subtree. + +fixtures: + - NUMANetworkFixture + +defaults: + request_headers: + x-auth-token: admin + content-type: application/json + accept: application/json + # version of request in which `same_subtree` is supported + openstack-api-version: placement 1.36 + +tests: + +- name: resourceless traits without same_subtree + GET: /allocation_candidates + query_parameters: + resources1: VCPU:1 + required2: COMPUTE_VOLUME_MULTI_ATTACH + group_policy: none + status: 400 + response_strings: + - "Resourceless suffixed group request should be specified in `same_subtree` query param" + response_json_paths: + $.errors[0].title: Bad Request + $.errors[0].code: placement.query.bad_value + +- name: resourceless aggs without same_subtree + GET: /allocation_candidates + query_parameters: + resources1: VCPU:1 + member_of2: $ENVIRON['AGGA_UUID'] + group_policy: none + status: 400 + response_strings: + - "Resourceless suffixed group request should be specified in `same_subtree` query param" + response_json_paths: + $.errors[0].title: Bad Request + $.errors[0].code: placement.query.bad_value + +- name: resourceless without any resource + GET: /allocation_candidates?&member_of1=$ENVIRON['AGGA_UUID']&group_policy=none + query_parameters: + member_of1: $ENVIRON['AGGA_UUID'] + group_policy: none + status: 400 + response_strings: + - 'There must be at least one resources or resources[$S] parameter.' + response_json_paths: + $.errors[0].title: Bad Request + $.errors[0].code: placement.query.missing_value + +- name: invalid same subtree missing underscores + GET: /allocation_candidates + query_parameters: + resources_COMPUTE: VCPU:1 + resources_ACCEL: CUSTOM_FPGA:1 + same_subtree: COMPUTE,_ACCEL + group_policy: none + status: 400 + response_strings: + - "Real suffixes should be specified in `same_subtree`:" + response_json_paths: + $.errors[0].title: Bad Request + $.errors[0].code: placement.query.bad_value + +- name: invalid same subtree with empty suffix + GET: /allocation_candidates + query_parameters: + resources_COMPUTE: VCPU:1 + resources_ACCEL: CUSTOM_FPGA:1 + same_subtree: _COMPUTE,,_ACCEL + group_policy: none + status: 400 + response_strings: + - 'Empty string (unsuffixed group) can not be specified in `same_subtree`' + response_json_paths: + $.errors[0].title: Bad Request + $.errors[0].code: placement.query.bad_value + +- name: no resourceless without same subtree + GET: /allocation_candidates + query_parameters: + resources_COMPUTE: VCPU:1 + resources_ACCEL: CUSTOM_FPGA:1 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 6 + $.allocation_requests..allocations['$ENVIRON["NUMA0_UUID"]'].resources.VCPU: [1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.VCPU: [1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA0_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA1_0_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA1_1_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + +- name: no resourceless with single same subtree + GET: /allocation_candidates + query_parameters: + resources_COMPUTE: VCPU:1 + resources_ACCEL: CUSTOM_FPGA:1 + same_subtree: _COMPUTE + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 6 + $.allocation_requests..allocations['$ENVIRON["NUMA0_UUID"]'].resources.VCPU: [1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.VCPU: [1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA0_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA1_0_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA1_1_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + +- name: no resourceless with same subtree + GET: /allocation_candidates + query_parameters: + resources_COMPUTE: VCPU:1 + resources_ACCEL: CUSTOM_FPGA:1 + same_subtree: _COMPUTE,_ACCEL + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 3 + $.allocation_requests..allocations['$ENVIRON["NUMA0_UUID"]'].resources.VCPU: 1 + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.VCPU: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA0_UUID"]'].resources.CUSTOM_FPGA: 1 + $.allocation_requests..allocations['$ENVIRON["FPGA1_0_UUID"]'].resources.CUSTOM_FPGA: 1 + $.allocation_requests..allocations['$ENVIRON["FPGA1_1_UUID"]'].resources.CUSTOM_FPGA: 1 + +- name: no resourceless with same subtree same provider + # Ensure that "myself" is in the same subtree + GET: /allocation_candidates + query_parameters: + resources_COMPUTE1: VCPU:1 + resources_COMPUTE2: MEMORY_MB:1024 + same_subtree: _COMPUTE1,_COMPUTE2 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 3 + $.allocation_requests..allocations['$ENVIRON["NUMA0_UUID"]'].resources.VCPU: 1 + $.allocation_requests..allocations['$ENVIRON["NUMA0_UUID"]'].resources.MEMORY_MB: 1024 + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.VCPU: 1 + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.MEMORY_MB: 1024 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: 1 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.MEMORY_MB: 1024 + +- name: no resourceless with same subtree same provider isolate + GET: /allocation_candidates + query_parameters: + resources_COMPUTE1: VCPU:1 + resources_COMPUTE2: MEMORY_MB:1024 + same_subtree: _COMPUTE1,_COMPUTE2 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 0 + +- name: resourceful without same subtree + GET: /allocation_candidates + query_parameters: + resources: VCPU:1 + resources_PORT1: CUSTOM_VF:4 + required_PORT1: CUSTOM_PHYSNET1 + resources_PORT2: CUSTOM_VF:4 + required_PORT2: CUSTOM_PHYSNET2 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 2 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: [1, 1] + $.allocation_requests..allocations['$ENVIRON["PF1_1_UUID"]'].resources.CUSTOM_VF: 4 + $.allocation_requests..allocations['$ENVIRON["PF1_2_UUID"]'].resources.CUSTOM_VF: [4, 4] + $.allocation_requests..allocations['$ENVIRON["PF3_1_UUID"]'].resources.CUSTOM_VF: 4 + +- name: resourceless with same subtree 4VFs + GET: /allocation_candidates + query_parameters: + resources: VCPU:1 + required_NIC: CUSTOM_HW_NIC_ROOT + resources_PORT1: CUSTOM_VF:4 + required_PORT1: CUSTOM_PHYSNET1 + resources_PORT2: CUSTOM_VF:4 + required_PORT2: CUSTOM_PHYSNET2 + same_subtree: _NIC,_PORT1,_PORT2 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 1 + $.allocation_requests..allocations.`len`: 3 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: 1 + $.allocation_requests..allocations['$ENVIRON["PF1_1_UUID"]'].resources.CUSTOM_VF: 4 + $.allocation_requests..allocations['$ENVIRON["PF1_2_UUID"]'].resources.CUSTOM_VF: 4 + $.allocation_requests..mappings.`len`: 4 + $.allocation_requests..mappings[''][0]: $ENVIRON["CN2_UUID"] + $.allocation_requests..mappings['_NIC'][0]: $ENVIRON["NIC1_UUID"] + $.allocation_requests..mappings['_PORT1'][0]: $ENVIRON["PF1_1_UUID"] + $.allocation_requests..mappings['_PORT2'][0]: $ENVIRON["PF1_2_UUID"] + +- name: resourceless with same subtree 2VFs + GET: /allocation_candidates + query_parameters: + resources: VCPU:1 + required_NIC: CUSTOM_HW_NIC_ROOT + resources_PORT1: CUSTOM_VF:2 + required_PORT1: CUSTOM_PHYSNET1 + resources_PORT2: CUSTOM_VF:2 + required_PORT2: CUSTOM_PHYSNET2 + same_subtree: _NIC,_PORT1,_PORT2 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 5 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: [1, 1, 1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["PF1_1_UUID"]'].resources.CUSTOM_VF: 2 + $.allocation_requests..allocations['$ENVIRON["PF1_2_UUID"]'].resources.CUSTOM_VF: 2 + $.allocation_requests..allocations['$ENVIRON["PF2_1_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_2_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_3_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_4_UUID"]'].resources.CUSTOM_VF: [2, 2] + +- name: resourceless with same subtree 2VFs isolate + GET: /allocation_candidates + query_parameters: + resources: VCPU:1 + required_NIC: CUSTOM_HW_NIC_ROOT + resources_PORT1: CUSTOM_VF:2 + required_PORT1: CUSTOM_PHYSNET1 + resources_PORT2: CUSTOM_VF:2 + required_PORT2: CUSTOM_PHYSNET2 + same_subtree: _NIC,_PORT1,_PORT2 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 5 + $.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: [1, 1, 1, 1, 1] + $.allocation_requests..allocations['$ENVIRON["PF1_1_UUID"]'].resources.CUSTOM_VF: 2 + $.allocation_requests..allocations['$ENVIRON["PF1_2_UUID"]'].resources.CUSTOM_VF: 2 + $.allocation_requests..allocations['$ENVIRON["PF2_1_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_2_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_3_UUID"]'].resources.CUSTOM_VF: [2, 2] + $.allocation_requests..allocations['$ENVIRON["PF2_4_UUID"]'].resources.CUSTOM_VF: [2, 2] + +- name: resourceless with same subtree same provider + GET: /allocation_candidates + query_parameters: + resources_PORT1: CUSTOM_VF:8 + required_PORT2: CUSTOM_PHYSNET1 + same_subtree: _PORT1,_PORT2 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 1 + $.allocation_requests..allocations.`len`: 1 + $.allocation_requests..allocations['$ENVIRON["PF3_1_UUID"]'].resources.CUSTOM_VF: 8 + $.allocation_requests..mappings.`len`: 2 + $.allocation_requests..mappings['_PORT1'][0]: $ENVIRON["PF3_1_UUID"] + $.allocation_requests..mappings['_PORT2'][0]: $ENVIRON["PF3_1_UUID"] + +- name: resourceless with same subtree same provider isolate + GET: /allocation_candidates + query_parameters: + resources_PORT1: CUSTOM_VF:8 + required_PORT2: CUSTOM_PHYSNET1 + same_subtree: _PORT1,_PORT2 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 0 + +- name: multiple resourceless with same subtree same provider + GET: /allocation_candidates + query_parameters: + resources_COMPUTE1: VCPU:1 + required_COMPUTE2: CUSTOM_FOO + required_COMPUTE3: HW_NUMA_ROOT + same_subtree: _COMPUTE1,_COMPUTE2,_COMPUTE3 + group_policy: none + response_json_paths: + $.allocation_requests.`len`: 1 + $.allocation_requests..allocations.`len`: 1 + $.allocation_requests..allocations['$ENVIRON["NUMA1_UUID"]'].resources.VCPU: 1 + $.allocation_requests..mappings.`len`: 3 + $.allocation_requests..mappings['_COMPUTE1'][0]: $ENVIRON["NUMA1_UUID"] + $.allocation_requests..mappings['_COMPUTE2'][0]: $ENVIRON["NUMA1_UUID"] + $.allocation_requests..mappings['_COMPUTE3'][0]: $ENVIRON["NUMA1_UUID"] + +- name: multiple resourceless with same subtree same provider isolate + GET: /allocation_candidates + query_parameters: + resources_COMPUTE1: VCPU:1 + required_COMPUTE2: CUSTOM_FOO + required_COMPUTE3: HW_NUMA_ROOT + same_subtree: _COMPUTE1,_COMPUTE2,_COMPUTE3 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 0 + +- name: resourceless with same subtree 2FPGAs + GET: /allocation_candidates + query_parameters: + required_NUMA: HW_NUMA_ROOT + resources_ACCEL1: CUSTOM_FPGA:1 + resources_ACCEL2: CUSTOM_FPGA:1 + same_subtree: _NUMA,_ACCEL1,_ACCEL2 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 2 + $.allocation_requests..allocations['$ENVIRON["FPGA1_0_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..allocations['$ENVIRON["FPGA1_1_UUID"]'].resources.CUSTOM_FPGA: [1, 1] + $.allocation_requests..mappings.`len`: [3, 3] + $.allocation_requests..mappings['_NUMA'][0]: /(?:$ENVIRON['NUMA1_UUID']|$ENVIRON['NUMA1_UUID'])/ + $.allocation_requests..mappings['_ACCEL1'][0]: /(?:$ENVIRON['FPGA1_0_UUID']|$ENVIRON['FPGA1_1_UUID'])/ + $.allocation_requests..mappings['_ACCEL2'][0]: /(?:$ENVIRON['FPGA1_0_UUID']|$ENVIRON['FPGA1_1_UUID'])/ + +- name: resourceless with same subtree 2FPGAs forbidden + GET: /allocation_candidates + query_parameters: + required_NUMA: HW_NUMA_ROOT,!CUSTOM_FOO + resources_ACCEL1: CUSTOM_FPGA:1 + resources_ACCEL2: CUSTOM_FPGA:1 + same_subtree: _NUMA,_ACCEL1,_ACCEL2 + group_policy: isolate + response_json_paths: + $.allocation_requests.`len`: 0 diff --git a/placement/tests/functional/gabbits/microversion.yaml b/placement/tests/functional/gabbits/microversion.yaml index 2d476c474..22bf589cb 100644 --- a/placement/tests/functional/gabbits/microversion.yaml +++ b/placement/tests/functional/gabbits/microversion.yaml @@ -41,13 +41,13 @@ tests: response_json_paths: $.errors[0].title: Not Acceptable -- name: latest microversion is 1.35 +- name: latest microversion is 1.36 GET: / request_headers: openstack-api-version: placement latest response_headers: vary: /openstack-api-version/ - openstack-api-version: placement 1.35 + openstack-api-version: placement 1.36 - name: other accept header bad version GET: / diff --git a/placement/tests/unit/objects/test_allocation_candidate.py b/placement/tests/unit/objects/test_allocation_candidate.py index af03f5525..9c5ee80cd 100644 --- a/placement/tests/unit/objects/test_allocation_candidate.py +++ b/placement/tests/unit/objects/test_allocation_candidate.py @@ -13,6 +13,7 @@ import mock from placement import lib as placement_lib +from placement.objects import allocation_candidate as ac_obj from placement.objects import research_context as res_ctx from placement.tests.unit.objects import base @@ -55,3 +56,44 @@ class TestAllocationCandidatesNoDB(base.TestCase): aro, sum = rw_ctx.limit_results(aro_in, sum_in) self.assertEqual(aro_in[:2], aro) self.assertEqual(set([sum1, sum0, sum4, sum8, sum5]), set(sum)) + + def test_check_same_subtree(self): + # Construct a tree that look like this + # + # 0 -+- 00 --- 000 1 -+- 10 --- 100 + # | | + # +- 01 -+- 010 +- 11 -+- 110 + # | +- 011 | +- 111 + # +- 02 -+- 020 +- 12 -+- 120 + # +- 021 +- 121 + # + parent_by_rp = {"0": None, "00": "0", "000": "00", + "01": "0", "010": "01", "011": "01", + "02": "0", "020": "02", "021": "02", + "1": None, "10": "1", "100": "10", + "11": "1", "110": "11", "111": "11", + "12": "1", "120": "12", "121": "12"} + same_subtree = [ + set(["0", "00", "01"]), + set(["01", "010"]), + set(["02", "020", "021"]), + set(["02", "020", "021"]), + set(["0", "02", "010"]), + set(["000"]) + ] + + different_subtree = [ + set(["10", "11"]), + set(["110", "111"]), + set(["10", "11", "110"]), + set(["12", "120", "100"]), + set(["0", "1"]), + ] + + for group in same_subtree: + self.assertTrue( + ac_obj._check_same_subtree(group, parent_by_rp)) + + for group in different_subtree: + self.assertFalse( + ac_obj._check_same_subtree(group, parent_by_rp)) diff --git a/placement/tests/unit/test_util.py b/placement/tests/unit/test_util.py index d4becf20d..92fdb92b0 100644 --- a/placement/tests/unit/test_util.py +++ b/placement/tests/unit/test_util.py @@ -446,7 +446,8 @@ class TestParseQsRequestGroups(testtools.TestCase): mv_parsed.min_version = microversion_parse.parse_version_string( microversion.min_version_string()) req.environ['placement.microversion'] = mv_parsed - d = pl.RequestGroup.dict_from_request(req) + rqparam = pl.RequestWideParams.from_request(req) + d = pl.RequestGroup.dict_from_request(req, rqparam) # Sort for easier testing return [d[suff] for suff in sorted(d)] diff --git a/releasenotes/notes/allocation-candidate-same_subtree-aeed7b2570293dfb.yaml b/releasenotes/notes/allocation-candidate-same_subtree-aeed7b2570293dfb.yaml new file mode 100644 index 000000000..c1f0f0ae8 --- /dev/null +++ b/releasenotes/notes/allocation-candidate-same_subtree-aeed7b2570293dfb.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + From microversion ``1.36``, a new ``same_subtree`` queryparam on + ``GET /allocation_candidates`` is supported. It accepts a comma-separated + list of request group suffix strings ($S). Each must exactly match a suffix + on a granular group somewhere else in the request. Importantly, the + identified request groups need not have a resources$S. If this is provided, + at least one of the resource providers satisfying the specified request + group must be an ancestor of the rest. The ``same_subtree`` query parameter + can be repeated and each repeated group is treated independently.