From a69e05d29a518e00f9a5b6d7e31fa7e4a0829023 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 12 Mar 2018 17:24:14 +0000 Subject: [PATCH] Add 'member_of' param to GET /allocation_candidates The call to GET /allocation_candidates now accepts a 'member_of' parameter, representing one or more aggregate UUIDs. If this parameter is supplied, the allocation_candidates returned will be limited to those with resource_providers that belong to at least one of the supplied aggregates. Blueprint: alloc-candidates-member-of Change-Id: I5857e927a830914c96e040936804e322baccc24c --- .../handlers/allocation_candidate.py | 4 +- nova/api/openstack/placement/lib.py | 4 +- nova/api/openstack/placement/microversion.py | 2 + .../placement/objects/resource_provider.py | 51 +++++++++-- .../placement/rest_api_version_history.rst | 9 ++ .../placement/schemas/allocation_candidate.py | 6 ++ nova/api/openstack/placement/util.py | 66 +++++++++++--- .../api/openstack/placement/fixtures.py | 71 +++++++++++++++ .../allocation-candidates-member-of.yaml | 88 +++++++++++++++++++ .../placement/gabbits/microversion.yaml | 4 +- .../source/allocation_candidates.inc | 1 + ...es_support_member_of-92f7e1440ed63fe7.yaml | 13 +++ 12 files changed, 300 insertions(+), 19 deletions(-) create mode 100644 nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml create mode 100644 releasenotes/notes/allocation_candidates_support_member_of-92f7e1440ed63fe7.yaml diff --git a/nova/api/openstack/placement/handlers/allocation_candidate.py b/nova/api/openstack/placement/handlers/allocation_candidate.py index d5511827712e..bc8e73151088 100644 --- a/nova/api/openstack/placement/handlers/allocation_candidate.py +++ b/nova/api/openstack/placement/handlers/allocation_candidate.py @@ -212,7 +212,9 @@ def list_allocation_candidates(req): context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] get_schema = schema.GET_SCHEMA_1_10 - if want_version.matches((1, 17)): + if want_version.matches((1, 21)): + get_schema = schema.GET_SCHEMA_1_21 + elif want_version.matches((1, 17)): get_schema = schema.GET_SCHEMA_1_17 elif want_version.matches((1, 16)): get_schema = schema.GET_SCHEMA_1_16 diff --git a/nova/api/openstack/placement/lib.py b/nova/api/openstack/placement/lib.py index d0e8198f4e23..c3108983a7b9 100644 --- a/nova/api/openstack/placement/lib.py +++ b/nova/api/openstack/placement/lib.py @@ -18,7 +18,7 @@ common library that both placement and its consumers can require.""" class RequestGroup(object): def __init__(self, use_same_provider=True, resources=None, - required_traits=None): + required_traits=None, member_of=None): """Create a grouping of resource and trait requests. :param use_same_provider: @@ -28,7 +28,9 @@ class RequestGroup(object): in any resource provider in the same tree, or a sharing provider. :param resources: A dict of { resource_class: amount, ... } :param required_traits: A set of { trait_name, ... } + :param member_of: A list of [ aggregate_UUID, ... ] """ self.use_same_provider = use_same_provider self.resources = resources or {} self.required_traits = required_traits or set() + self.member_of = member_of or [] diff --git a/nova/api/openstack/placement/microversion.py b/nova/api/openstack/placement/microversion.py index cdbce19ad8f0..a04b7140558d 100644 --- a/nova/api/openstack/placement/microversion.py +++ b/nova/api/openstack/placement/microversion.py @@ -64,6 +64,8 @@ VERSIONS = [ '1.19', # Include generation and conflict detection in provider aggregates # APIs '1.20', # Return 200 with provider payload from POST /resource_providers + '1.21', # Support ?member_of= queryparam on + # GET /allocation_candidates ] diff --git a/nova/api/openstack/placement/objects/resource_provider.py b/nova/api/openstack/placement/objects/resource_provider.py index b9106b653134..39eebbd8326e 100644 --- a/nova/api/openstack/placement/objects/resource_provider.py +++ b/nova/api/openstack/placement/objects/resource_provider.py @@ -1064,7 +1064,7 @@ def _get_providers_with_shared_capacity(ctx, rc_id, amount): @db_api.api_context_manager.reader -def _get_all_with_shared(ctx, resources): +def _get_all_with_shared(ctx, resources, member_of=None): """Uses some more advanced SQL to find providers that either have the requested resources "locally" or are associated with a provider that shares those requested resources. @@ -1142,6 +1142,10 @@ def _get_all_with_shared(ctx, resources): # LEFT JOIN resource_provider_aggregates AS sharing_{RC_NAME} # ON shared_{RC_NAME}.aggregate_id = sharing_{RC_NAME}.aggregate_id # + # If the request specified limiting resource providers to one or more + # specific aggregates, we then join the above to another copy of the + # aggregate table and filter on the provided aggregates. + # # We calculate the WHERE conditions based on whether the resource class has # any shared providers. # @@ -1182,7 +1186,8 @@ def _get_all_with_shared(ctx, resources): # To show an example, here is the exact SQL that will be generated in an # environment that has a shared storage pool and compute nodes that have # vCPU and RAM associated with the same aggregate as the provider - # representing the shared storage pool: + # representing the shared storage pool, and where the request specified + # aggregates that the compute nodes had to be associated with: # # SELECT rp.* # FROM resource_providers AS rp @@ -1226,6 +1231,9 @@ def _get_all_with_shared(ctx, resources): # LEFT JOIN resource_provider_aggregates AS sharing_disk_gb # ON shared_disk_gb.aggregate_id = sharing_disk_gb.aggregate_id # AND sharing_disk_gb.resource_provider_id IN ($RPS_SHARING_DISK) + # INNER JOIN resource_provider_aggregates AS member_aggs + # ON rp.id = member_aggs.resource_provider_id + # AND member_aggs.aggregate_id IN ($MEMBER_OF) # WHERE ( # ( # COALESCE(usage_vcpu.used, 0) + $AMOUNT_VCPU <= @@ -1380,6 +1388,17 @@ def _get_all_with_shared(ctx, resources): )) join_chain = sharing_join + # If 'member_of' has values join with the PlacementAggregates to + # get those resource providers that are associated with any of the + # list of aggregate uuids provided with 'member_of'. + if member_of: + member_join = sa.join(join_chain, _RP_AGG_TBL, + _RP_AGG_TBL.c.resource_provider_id == rpt.c.id) + agg_join = sa.join(member_join, _AGG_TBL, sa.and_( + _AGG_TBL.c.id == _RP_AGG_TBL.c.aggregate_id, + _AGG_TBL.c.uuid.in_(member_of))) + join_chain = agg_join + sel = sel.select_from(join_chain) sel = sel.where(sa.and_(*where_conds)) sel = sel.group_by(rpt.c.id) @@ -2782,7 +2801,8 @@ def _has_provider_trees(ctx): @db_api.api_context_manager.reader -def _get_provider_ids_matching_all(ctx, resources, required_traits): +def _get_provider_ids_matching_all(ctx, resources, required_traits, + member_of=None): """Returns a list of resource provider internal IDs that have available inventory to satisfy all the supplied requests for resources. @@ -2796,6 +2816,10 @@ def _get_provider_ids_matching_all(ctx, resources, required_traits): :param required_traits: A map, keyed by trait string name, of required trait internal IDs that each provider must have associated with it + :param member_of: An optional list of aggregate UUIDs. If provided, the + allocation_candidates returned will only be for resource + providers that are members of one or more of the supplied + aggregates. """ trait_rps = None if required_traits: @@ -2881,6 +2905,17 @@ def _get_provider_ids_matching_all(ctx, resources, required_traits): ) where_conds.append(usage_cond) + # If 'member_of' has values join with the PlacementAggregates to + # get those resource providers that are associated with any of the + # list of aggregate uuids provided with 'member_of'. + if member_of: + member_join = sa.join(join_chain, _RP_AGG_TBL, + _RP_AGG_TBL.c.resource_provider_id == rpt.c.id) + agg_join = sa.join(member_join, _AGG_TBL, sa.and_( + _AGG_TBL.c.id == _RP_AGG_TBL.c.aggregate_id, + _AGG_TBL.c.uuid.in_(member_of))) + join_chain = agg_join + sel = sel.select_from(join_chain) sel = sel.where(sa.and_(*where_conds)) @@ -3544,6 +3579,12 @@ class AllocationCandidates(base.VersionedObject): missing = traits - set(trait_map) raise exception.TraitNotFound(names=', '.join(missing)) + # Microversions prior to 1.21 will not have 'member_of' in the groups. + # This allows earlier microversions to continue to work. + member_of = "" + if hasattr(sharing_groups[0], "member_of"): + member_of = sharing_groups[0].member_of + # Contains a set of resource provider IDs that share some inventory for # each resource class requested. We do this here as an optimization. If # we have no sharing providers, the SQL to find matching providers for @@ -3567,7 +3608,7 @@ class AllocationCandidates(base.VersionedObject): # provider IDs of provider trees instead of the resource provider # IDs. rp_ids = _get_provider_ids_matching_all(context, resources, - trait_map) + trait_map, member_of) alloc_request_objs, summary_objs = _alloc_candidates_no_shared( context, resources, rp_ids) else: @@ -3584,7 +3625,7 @@ class AllocationCandidates(base.VersionedObject): # and are related to a provider that is sharing some resources # with it. In other words, this is the list of resource provider # IDs that are NOT sharing resources. - rps = _get_all_with_shared(context, resources) + rps = _get_all_with_shared(context, resources, member_of) rp_ids = set([r[0] for r in rps]) alloc_request_objs, summary_objs = _alloc_candidates_with_shared( context, resources, trait_map, rp_ids, sharing_providers) diff --git a/nova/api/openstack/placement/rest_api_version_history.rst b/nova/api/openstack/placement/rest_api_version_history.rst index 5b5face75e81..22e000332063 100644 --- a/nova/api/openstack/placement/rest_api_version_history.rst +++ b/nova/api/openstack/placement/rest_api_version_history.rst @@ -252,3 +252,12 @@ representing the newly-created resource provider, in the same format as the corresponding `GET /resource_providers/{uuid}` call. This is to allow the caller to glean automatically-set fields, such as UUID and generation, without a subsequent GET. + +1.21 Support ?member_of= queryparam on GET /allocation_candidates +----------------------------------------------------------------------------- + +Add support for the `member_of` query parameter to the `GET +/allocation_candidates` API. It accepts a comma-separated list of UUIDs for +aggregates. If this parameter is provided, the only resource providers returned +will be those in one of the specified aggregates that meet the other parts of +the request. diff --git a/nova/api/openstack/placement/schemas/allocation_candidate.py b/nova/api/openstack/placement/schemas/allocation_candidate.py index ff7216074562..48bb638ab5fa 100644 --- a/nova/api/openstack/placement/schemas/allocation_candidate.py +++ b/nova/api/openstack/placement/schemas/allocation_candidate.py @@ -46,3 +46,9 @@ GET_SCHEMA_1_17 = copy.deepcopy(GET_SCHEMA_1_16) GET_SCHEMA_1_17['properties']['required'] = { "type": ["string"] } + +# Add member_of parameter. +GET_SCHEMA_1_21 = copy.deepcopy(GET_SCHEMA_1_17) +GET_SCHEMA_1_21['properties']['member_of'] = { + "type": ["string"] +} diff --git a/nova/api/openstack/placement/util.py b/nova/api/openstack/placement/util.py index 6cc8e9257d60..f9db5e034907 100644 --- a/nova/api/openstack/placement/util.py +++ b/nova/api/openstack/placement/util.py @@ -31,8 +31,10 @@ from nova.i18n import _ # Querystring-related constants _QS_RESOURCES = 'resources' _QS_REQUIRED = 'required' +_QS_MEMBER_OF = 'member_of' _QS_KEY_PATTERN = re.compile( - r"^(%s)([1-9][0-9]*)?$" % '|'.join((_QS_RESOURCES, _QS_REQUIRED))) + r"^(%s)([1-9][0-9]*)?$" % '|'.join( + (_QS_RESOURCES, _QS_REQUIRED, _QS_MEMBER_OF))) # NOTE(cdent): This registers a FormatChecker on the jsonschema @@ -315,23 +317,53 @@ def normalize_traits_qs_param(val): return ret +def normalize_member_of_qs_param(val): + """Parse a member_of query string parameter value. + + Valid values are either a single UUID, or the prefix 'in:' followed by two + or more comma-separated UUIDs. + + :param val: A member_of query parameter of either a single UUID, or a + comma-separated string of two or more UUIDs. + :return: A list of UUIDs + :raises `webob.exc.HTTPBadRequest` if the val parameter is not in the + expected format. + """ + # Ensure that multiple values are prefixed with "in:" + if "," in val and not val.startswith("in:"): + msg = _("Multiple values for 'member_of' must be prefixed with the " + "'in:' keyword. Got: %s") % val + raise webob.exc.HTTPBadRequest(msg) + if val.startswith("in:"): + ret = val[3:].split(",") + else: + ret = [val] + # Ensure the UUIDs are valid + if not all([uuidutils.is_uuid_like(agg) for agg in ret]): + msg = _("Invalid query string parameters: Expected 'member_of' " + "parameter to contain valid UUID(s). Got: %s") % val + raise webob.exc.HTTPBadRequest(msg) + return ret + + def parse_qs_request_groups(qsdict): - """Parse numbered resources and traits groupings out of a querystring dict. + """Parse numbered resources, traits, and member_of groupings out of a + querystring dict. The input qsdict represents a query string of the form: ?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT - &required=$TRAIT_NAME,$TRAIT_NAME + &required=$TRAIT_NAME,$TRAIT_NAME&member_of=$AGG_UUID &resources1=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT - &required1=$TRAIT_NAME,$TRAIT_NAME + &required1=$TRAIT_NAME,$TRAIT_NAME&member_of1=$AGG_UUID &resources2=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT - &required2=$TRAIT_NAME,$TRAIT_NAME + &required2=$TRAIT_NAME,$TRAIT_NAME&member_of2=$AGG_UUID These are parsed in groups according to the numeric suffix of the key. For each group, a RequestGroup instance is created containing that group's - resources and required traits. For the (single) group with no suffix, the - RequestGroup.use_same_provider attribute is False; for the numbered groups - it is True. + resources, required traits, and member_of. For the (single) group with no + suffix, the RequestGroup.use_same_provider attribute is False; for the + numbered groups it is True. The return is a list of these RequestGroup instances. @@ -339,6 +371,7 @@ def parse_qs_request_groups(qsdict): ?resources=VCPU:2,MEMORY_MB:1024,DISK_GB=50 &required=HW_CPU_X86_VMX,CUSTOM_STORAGE_RAID + &member_of=in:9323b2b1-82c9-4e91-bdff-e95e808ef954,8592a199-7d73-4465-8df6-ab00a6243c82 # noqa &resources1=SRIOV_NET_VF:2 &required1=CUSTOM_PHYSNET_PUBLIC,CUSTOM_SWITCH_A &resources2=SRIOV_NET_VF:1 @@ -357,6 +390,10 @@ def parse_qs_request_groups(qsdict): "HW_CPU_X86_VMX", "CUSTOM_STORAGE_RAID", ], + member_of=[ + 9323b2b1-82c9-4e91-bdff-e95e808ef954, + 8592a199-7d73-4465-8df6-ab00a6243c82, + ], ), RequestGroup( use_same_provider=True, @@ -397,7 +434,7 @@ def parse_qs_request_groups(qsdict): match = _QS_KEY_PATTERN.match(key) if not match: continue - # `prefix` is 'resources' or 'required' + # `prefix` is 'resources', 'required', or 'member_of' # `suffix` is an integer string, or None prefix, suffix = match.groups() request_group = get_request_group(suffix or '') @@ -405,14 +442,23 @@ def parse_qs_request_groups(qsdict): request_group.resources = normalize_resources_qs_param(val) elif prefix == _QS_REQUIRED: request_group.required_traits = normalize_traits_qs_param(val) + elif prefix == _QS_MEMBER_OF: + request_group.member_of = normalize_member_of_qs_param(val) - # Ensure any group with 'required' also has 'resources'. + # Ensure any group with 'required' or 'member_of' also has 'resources'. orphans = [('required%s' % suff) for suff, group in by_suffix.items() if group.required_traits and not group.resources] if orphans: msg = _('All traits parameters must be associated with resources. ' 'Found the following orphaned traits keys: %s') raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans)) + orphans = [('member_of%s' % suff) for suff, group in by_suffix.items() + if group.member_of and not group.resources] + if orphans: + msg = _('All member_of parameters must be associated with ' + 'resources. Found the following orphaned member_of ' + ' values: %s') + raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans)) # NOTE(efried): The sorting is not necessary for the API, but it makes # testing easier. diff --git a/nova/tests/functional/api/openstack/placement/fixtures.py b/nova/tests/functional/api/openstack/placement/fixtures.py index 741b39b47a30..e28f4a9a7d09 100644 --- a/nova/tests/functional/api/openstack/placement/fixtures.py +++ b/nova/tests/functional/api/openstack/placement/fixtures.py @@ -304,6 +304,77 @@ class SharedStorageFixture(APIFixture): ss.set_aggregates([agg_uuid]) +class NonSharedStorageFixture(APIFixture): + """An APIFixture that has two compute nodes with local storage that do not + use shared storage. + """ + def start_fixture(self): + super(NonSharedStorageFixture, self).start_fixture() + self.context = context.get_admin_context() + + cn1_uuid = uuidutils.generate_uuid() + cn2_uuid = uuidutils.generate_uuid() + aggA_uuid = uuidutils.generate_uuid() + aggB_uuid = uuidutils.generate_uuid() + aggC_uuid = uuidutils.generate_uuid() + os.environ['CN1_UUID'] = cn1_uuid + os.environ['CN2_UUID'] = cn2_uuid + os.environ['AGGA_UUID'] = aggA_uuid + os.environ['AGGB_UUID'] = aggB_uuid + os.environ['AGGC_UUID'] = aggC_uuid + + cn1 = rp_obj.ResourceProvider( + self.context, + name='cn1', + uuid=cn1_uuid) + cn1.create() + + cn2 = rp_obj.ResourceProvider( + self.context, + name='cn2', + uuid=cn2_uuid) + cn2.create() + + # Populate compute node inventory for VCPU and RAM + for cn in (cn1, cn2): + vcpu_inv = rp_obj.Inventory( + self.context, + resource_provider=cn, + resource_class='VCPU', + total=24, + reserved=0, + max_unit=24, + min_unit=1, + step_size=1, + allocation_ratio=16.0) + vcpu_inv.obj_set_defaults() + ram_inv = rp_obj.Inventory( + self.context, + resource_provider=cn, + resource_class='MEMORY_MB', + total=128 * 1024, + reserved=0, + max_unit=128 * 1024, + min_unit=256, + step_size=256, + allocation_ratio=1.5) + ram_inv.obj_set_defaults() + disk_inv = rp_obj.Inventory( + self.context, + resource_provider=cn, + resource_class='DISK_GB', + total=2000, + reserved=100, + max_unit=2000, + min_unit=10, + step_size=10, + allocation_ratio=1.0) + disk_inv.obj_set_defaults() + inv_list = rp_obj.InventoryList(objects=[vcpu_inv, ram_inv, + disk_inv]) + cn.set_inventory(inv_list) + + class CORSFixture(APIFixture): """An APIFixture that turns on CORS.""" diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml new file mode 100644 index 000000000000..a359170426ed --- /dev/null +++ b/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml @@ -0,0 +1,88 @@ +# Tests of allocation candidates API + +fixtures: + - NonSharedStorageFixture + +defaults: + request_headers: + x-auth-token: admin + content-type: application/json + accept: application/json + openstack-api-version: placement 1.21 + +tests: + +- name: get bad member_of microversion + GET: /allocation_candidates?resources=VCPU:1&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] + request_headers: + openstack-api-version: placement 1.18 + status: 400 + response_strings: + - Invalid query string parameters + - "'member_of' was unexpected" + +- name: get allocation candidates invalid member_of value + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=INVALID_UUID + status: 400 + response_strings: + - Invalid query string parameters + - Expected 'member_of' parameter to contain valid UUID(s) + +- name: get allocation candidates no 'in:' for multiple member_of + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] + status: 400 + response_strings: + - Multiple values for 'member_of' must be prefixed with the 'in:' keyword + +- name: get allocation candidates multiple member_of with 'in:' but invalid values + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],INVALID_UUID + status: 400 + response_strings: + - Invalid query string parameters + - Expected 'member_of' parameter to contain valid UUID(s) + +- name: get allocation candidates with no match for member_of + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID'] + status: 200 + response_json_paths: + $.allocation_requests.`len`: 0 + +- name: get compute node 1 state + GET: /resource_providers/$ENVIRON['CN1_UUID'] + +- name: associate the first compute node with aggA + PUT: /resource_providers/$ENVIRON['CN1_UUID']/aggregates + data: + aggregates: + - $ENVIRON['AGGA_UUID'] + resource_provider_generation: $HISTORY['get compute node 1 state'].$RESPONSE['$.generation'] + status: 200 + +- name: verify that the member_of call now returns 1 allocation_candidate + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] + status: 200 + response_json_paths: + $.allocation_requests.`len`: 1 + +- name: get compute node 2 state + GET: /resource_providers/$ENVIRON['CN2_UUID'] + +- name: associate the second compute node with aggB + PUT: /resource_providers/$ENVIRON['CN2_UUID']/aggregates + data: + aggregates: + - $ENVIRON['AGGB_UUID'] + resource_provider_generation: $HISTORY['get compute node 2 state'].$RESPONSE['$.generation'] + status: 200 + +- name: verify that the member_of call now returns both RPs + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] + status: 200 + response_json_paths: + $.allocation_requests.`len`: 2 + +- name: verify that aggC still returns no RPs + GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGC_UUID'] + status: 200 + response_json_paths: + $.allocation_requests.`len`: 0 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml b/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml index 9d833ec7a628..b46e66918b83 100644 --- a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml +++ b/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml @@ -39,13 +39,13 @@ tests: response_json_paths: $.errors[0].title: Not Acceptable -- name: latest microversion is 1.20 +- name: latest microversion is 1.21 GET: / request_headers: openstack-api-version: placement latest response_headers: vary: /OpenStack-API-Version/ - openstack-api-version: placement 1.20 + openstack-api-version: placement 1.21 - name: other accept header bad version GET: / diff --git a/placement-api-ref/source/allocation_candidates.inc b/placement-api-ref/source/allocation_candidates.inc index 5b09d044ab31..38fa56805ea7 100644 --- a/placement-api-ref/source/allocation_candidates.inc +++ b/placement-api-ref/source/allocation_candidates.inc @@ -32,6 +32,7 @@ Request - resources: resources_query_required - limit: allocation_candidates_limit - required: allocation_candidates_required + - member_of: member_of Response (microversions 1.12 - ) -------------------------------- diff --git a/releasenotes/notes/allocation_candidates_support_member_of-92f7e1440ed63fe7.yaml b/releasenotes/notes/allocation_candidates_support_member_of-92f7e1440ed63fe7.yaml new file mode 100644 index 000000000000..6ea43e891da6 --- /dev/null +++ b/releasenotes/notes/allocation_candidates_support_member_of-92f7e1440ed63fe7.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Add support, in a new placement microversion 1.21, for the ``member_of`` + query parameter, representing one or more aggregate UUIDs. When supplied, + it will filter the returned allocation candidates to only those + resource_providers that are associated with ("members of") the specified + aggregate(s). This parameter can have a value of either a single aggregate + UUID, or a comma-separated list of aggregate UUIDs. When specifying more + than one aggregate, a resource provider needs to be associated with at + least one of the aggregates in order to be included; it does not have to be + associated with all of them. Because of this, the list of UUIDs must be + prefixed with ``in:`` to represent the logical ``OR`` of the selection.