diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst index dbd42991148b..8c0064baac10 100644 --- a/doc/source/cli/nova-status.rst +++ b/doc/source/cli/nova-status.rst @@ -140,6 +140,9 @@ Upgrade service was removed in Train. * The ``Request Spec Migration`` check was removed. + **21.0.0 (Ussuri)** + * Checks for the Placement API are modified to require version 1.34. + See Also ======== diff --git a/nova/cmd/status.py b/nova/cmd/status.py index 899e623748cf..cac107c66ee2 100644 --- a/nova/cmd/status.py +++ b/nova/cmd/status.py @@ -46,12 +46,11 @@ from nova.volume import cinder CONF = nova.conf.CONF -# NOTE(vrushali): 1.32 is required by nova-scheduler to use member_of -# queryparam to prepare a list of forbidden aggregates that should be -# ignored by placement service in the allocation candidates API. +# NOTE(efried): 1.34 is required by nova-scheduler to return mappings from +# request group suffixes to the resource providers that satisfy them. # NOTE: If you bump this version, remember to update the history # section in the nova-status man page (doc/source/cli/nova-status). -MIN_PLACEMENT_MICROVERSION = "1.32" +MIN_PLACEMENT_MICROVERSION = "1.34" # NOTE(mriedem): 3.44 is needed to work with volume attachment records which # are required for supporting multi-attach capable volumes. diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index 9f60c118b9da..a25ab69e927d 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -41,7 +41,7 @@ from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) WARN_EVERY = 10 -NEGATIVE_MEMBER_OF_VERSION = '1.32' +MAPPINGS_VERSION = '1.34' RESHAPER_VERSION = '1.30' CONSUMER_GENERATION_VERSION = '1.28' ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26' @@ -291,7 +291,7 @@ class SchedulerReportClient(object): """ # Note that claim_resources() will use this version as well to # make allocations by `PUT /allocations/{consumer_uuid}` - version = NEGATIVE_MEMBER_OF_VERSION + version = MAPPINGS_VERSION qparams = resources.to_querystring() url = "/allocation_candidates?%s" % qparams resp = self.get(url, version=version, diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index 00f8ec20b6cd..62d4cf5adc01 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -53,8 +53,8 @@ class ResourceRequest(object): # extra_specs-specific consts XS_RES_PREFIX = 'resources' XS_TRAIT_PREFIX = 'trait' - # Regex patterns for numbered or un-numbered resources/trait keys - XS_KEYPAT = re.compile(r"^(%s)([1-9][0-9]*)?:(.*)$" % + # Regex patterns for suffixed or unsuffixed resources/trait keys + XS_KEYPAT = re.compile(r"^(%s)([a-zA-Z0-9_-]{1,64})?:(.*)$" % '|'.join((XS_RES_PREFIX, XS_TRAIT_PREFIX))) def __init__(self, request_spec, enable_pinning_translate=True): @@ -66,13 +66,17 @@ class ResourceRequest(object): For extra specs, items of the following form are examined: - ``resources:$RESOURCE_CLASS``: $AMOUNT - - ``resources$N:$RESOURCE_CLASS``: $AMOUNT + - ``resources$S:$RESOURCE_CLASS``: $AMOUNT - ``trait:$TRAIT_NAME``: "required" - - ``trait$N:$TRAIT_NAME``: "required" + - ``trait$S:$TRAIT_NAME``: "required" + + ...where ``$S`` is a string suffix as supported via Placement + microversion 1.33 + https://docs.openstack.org/placement/train/specs/train/implemented/2005575-nested-magic-1.html#arbitrary-group-suffixes # noqa .. note:: - This does *not* yet handle ``member_of[$N]``. + This does *not* yet handle ``member_of[$S]``. For image metadata, traits are extracted from the ``traits_required`` property, if present. @@ -97,7 +101,7 @@ class ResourceRequest(object): # set to None to indicate "no limit". self._limit = CONF.scheduler.max_placement_results - # TODO(efried): Handle member_of[$N], which will need to be reconciled + # TODO(efried): Handle member_of[$S], which will need to be reconciled # with destination.aggregates handling in resources_from_request_spec # request_spec.image is nullable @@ -109,7 +113,7 @@ class ResourceRequest(object): # Parse the flavor extra specs self._process_extra_specs(request_spec.flavor) - self.numbered_groups_from_flavor = self.get_num_of_numbered_groups() + self.suffixed_groups_from_flavor = self.get_num_of_suffixed_groups() # Now parse the (optional) image metadata self._process_image_meta(image) @@ -128,7 +132,7 @@ class ResourceRequest(object): # Parse the flavor itself, though we'll only use these fields if they # don't conflict with something already provided by the flavor extra - # specs. These are all added to the unnumbered request group. + # specs. These are all added to the unsuffixed request group. merged_resources = self.merged_resources() if (orc.VCPU not in merged_resources and @@ -175,15 +179,15 @@ class ResourceRequest(object): continue # 'prefix' is 'resources' or 'trait' - # 'suffix' is $N or None + # 'suffix' is $S or None # 'name' is either the resource class name or the trait name. prefix, suffix, name = match.groups() - # Process "resources[$N]" + # Process "resources[$S]" if prefix == self.XS_RES_PREFIX: self._add_resource(suffix, name, val) - # Process "trait[$N]" + # Process "trait[$S]" elif prefix == self.XS_TRAIT_PREFIX: self._add_trait(suffix, name, val) @@ -193,7 +197,7 @@ class ResourceRequest(object): for trait in image.properties.get('traits_required', []): # required traits from the image are always added to the - # unnumbered request group, granular request groups are not + # unsuffixed request group, granular request groups are not # supported in image traits self._add_trait(None, trait, "required") @@ -295,18 +299,29 @@ class ResourceRequest(object): return self._rg_by_id[ident] def _add_request_group(self, request_group): - """Inserts the existing group with a unique integer id + """Inserts the existing group with a unique suffix. - The groups coming from the flavor can have arbitrary ids but every id - is an integer. So this function can ensure unique ids by using bigger - ids than the maximum of existing ids. + The groups coming from the flavor can have arbitrary suffixes; those + are guaranteed to be unique within the flavor. + + A group coming from "outside" (ports, device profiles) must be given a + suffix that is unique in combination with suffixes from the flavor. + + .. todo:: Tie suffixes to RequestGroup.requester_id :param request_group: the RequestGroup to be added """ - # NOTE(gibi) [0] just here to always have a defined maximum - group_idents = [0] + [int(ident) for ident in self._rg_by_id if ident] - ident = max(group_idents) + 1 - self._rg_by_id[ident] = request_group + # Generate a unique suffix by peeling out all the suffixes that are + # integers (respond to int()) and adding 1 to the highest one. + max_ident = 0 + for ident in self._rg_by_id: + try: + max_ident = max(int(ident), max_ident) + except (TypeError, ValueError): + # Non-numeric or None (unsuffixed) + continue + + self._rg_by_id[max_ident + 1] = request_group def _add_resource(self, groupid, rclass, amount): self.get_request_group(groupid).add_resource(rclass, amount) @@ -327,7 +342,7 @@ class ResourceRequest(object): for rg in self._rg_by_id.values(): yield rg.resources - def get_num_of_numbered_groups(self): + def get_num_of_suffixed_groups(self): return len([ident for ident in self._rg_by_id.keys() if ident is not None]) @@ -346,7 +361,7 @@ class ResourceRequest(object): return dict(ret) def _clean_empties(self): - """Get rid of any empty ResourceGroup instances.""" + """Get rid of any empty RequestGroup instances.""" for ident, rg in list(self._rg_by_id.items()): if not any((rg.resources, rg.required_traits, rg.forbidden_traits)): @@ -390,8 +405,8 @@ class ResourceRequest(object): qs_params.append(('required%s' % suffix, required_val)) if aggregates: aggs = [] - # member_ofN is a list of lists. We need a tuple of - # ('member_ofN', 'in:uuid,uuid,...') for each inner list. + # member_of$S is a list of lists. We need a tuple of + # ('member_of$S', 'in:uuid,uuid,...') for each inner list. for agglist in aggregates: aggs.append(('member_of%s' % suffix, 'in:' + ','.join(sorted(agglist)))) @@ -399,8 +414,8 @@ class ResourceRequest(object): if in_tree: qs_params.append(('in_tree%s' % suffix, in_tree)) if forbidden_aggregates: - # member_ofN is a list of aggregate uuids. We need a - # tuple of ('member_ofN, '!in:uuid,uuid,...'). + # member_of$S is a list of aggregate uuids. We need a + # tuple of ('member_of$S, '!in:uuid,uuid,...'). forbidden_aggs = '!in:' + ','.join( sorted(forbidden_aggregates)) qs_params.append(('member_of%s' % suffix, forbidden_aggs)) @@ -414,10 +429,10 @@ class ResourceRequest(object): qparams.append(('group_policy', self._group_policy)) for ident, rg in self._rg_by_id.items(): - # [('resourcesN', 'rclass:amount,rclass:amount,...'), - # ('requiredN', 'trait_name,!trait_name,...'), - # ('member_ofN', 'in:uuid,uuid,...'), - # ('member_ofN', 'in:uuid,uuid,...')] + # [('resources[$S]', 'rclass:amount,rclass:amount,...'), + # ('required[$S]', 'trait_name,!trait_name,...'), + # ('member_of[$S]', 'in:uuid,uuid,...'), + # ('member_of[$S]', 'in:uuid,uuid,...')] qparams.extend(to_queryparams(rg, ident or '')) return parse.urlencode(sorted(qparams)) @@ -435,7 +450,7 @@ class ResourceRequest(object): def build_request_spec(image, instances, instance_type=None): - """Build a request_spec for the scheduler. + """Build a request_spec (ahem, not a RequestSpec) for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. @@ -600,7 +615,7 @@ def resources_from_request_spec(ctxt, spec_obj, host_manager, for key in spec_obj.scheduler_hints)): res_req._limit = None - if res_req.get_num_of_numbered_groups() >= 2 and not res_req.group_policy: + if res_req.get_num_of_suffixed_groups() >= 2 and not res_req.group_policy: LOG.warning( "There is more than one numbered request group in the " "allocation candidate query but the flavor did not specify " @@ -613,7 +628,7 @@ def resources_from_request_spec(ctxt, spec_obj, host_manager, "group to be satisfied from a separate resource provider then " "use 'group_policy': 'isolate'.") - if res_req.numbered_groups_from_flavor <= 1: + if res_req.suffixed_groups_from_flavor <= 1: LOG.info( "At least one numbered request group is defined outside of " "the flavor (e.g. in a port that has a QoS minimum bandwidth " diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py index 6caed8bcce4c..ea52fcfccbd6 100644 --- a/nova/tests/functional/test_report_client.py +++ b/nova/tests/functional/test_report_client.py @@ -1272,6 +1272,33 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): del alloc['generation'] self.assertEqual(expected, actual) + def test_allocation_candidates_mappings(self): + """Do a complex GET /allocation_candidates query and make sure the + response contains the ``mappings`` keys we expect at Placement 1.34. + """ + flavor = objects.Flavor( + vcpus=0, memory_mb=0, root_gb=0, ephemeral_gb=0, swap=0, + extra_specs={ + 'group_policy': 'none', + 'resources_CPU:VCPU': 1, + 'resources_MEM:MEMORY_MB': 1024, + 'resources_DISK:DISK_GB': 10 + }) + req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) + with self._interceptor(): + self._set_up_provider_tree() + acs = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec))[0] + # We're not going to validate all the allocations - Placement has + # tests for that - just make sure they're there. + self.assertEqual(3, len(acs)) + # We're not going to validate all the mappings - Placement has + # tests for that - just make sure they're there. + for ac in acs: + self.assertIn('allocations', ac) + self.assertEqual({'_CPU', '_MEM', '_DISK'}, + set(ac['mappings'])) + def test_get_allocations_for_provider_tree(self): with self._interceptor(): # When the provider tree cache is empty (or we otherwise supply a diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index 095055201fb4..1521911ee6dd 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -2074,16 +2074,16 @@ class TestProviderOperations(SchedulerReportClientTestCase): 'trait:CUSTOM_TRAIT2': 'preferred', 'trait:CUSTOM_TRAIT3': 'forbidden', 'trait:CUSTOM_TRAIT4': 'forbidden', - 'resources1:DISK_GB': '30', - 'trait1:STORAGE_DISK_SSD': 'required', + 'resources_DISK:DISK_GB': '30', + 'trait_DISK:STORAGE_DISK_SSD': 'required', 'resources2:VGPU': '2', 'trait2:HW_GPU_RESOLUTION_W2560H1600': 'required', 'trait2:HW_GPU_API_VULKAN': 'required', - 'resources3:SRIOV_NET_VF': '1', - 'resources3:CUSTOM_NET_EGRESS_BYTES_SEC': '125000', + 'resources_NET:SRIOV_NET_VF': '1', + 'resources_NET:CUSTOM_NET_EGRESS_BYTES_SEC': '125000', 'group_policy': 'isolate', # These are ignored because misspelled, bad value, etc. - 'resources02:CUSTOM_WIDGET': '123', + 'resources*2:CUSTOM_WIDGET': '123', 'trait:HW_NIC_OFFLOAD_LRO': 'preferred', 'group_policy3': 'none', }) @@ -2102,12 +2102,13 @@ class TestProviderOperations(SchedulerReportClientTestCase): ('member_of', 'in:agg1,agg2,agg3'), ('required', 'CUSTOM_TRAIT1,HW_CPU_X86_AVX,!CUSTOM_TRAIT3,' '!CUSTOM_TRAIT4'), - ('required1', 'STORAGE_DISK_SSD'), ('required2', 'HW_GPU_API_VULKAN,HW_GPU_RESOLUTION_W2560H1600'), + ('required_DISK', 'STORAGE_DISK_SSD'), ('resources', 'MEMORY_MB:1024,VCPU:1'), - ('resources1', 'DISK_GB:30'), ('resources2', 'VGPU:2'), - ('resources3', 'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1') + ('resources_DISK', 'DISK_GB:30'), + ('resources_NET', + 'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1') ] resp_mock.json.return_value = json_data @@ -2124,7 +2125,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_url = '/allocation_candidates?%s' % parse.urlencode( expected_query) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.32', + expected_url, microversion='1.34', global_request_id=self.context.global_id) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2168,7 +2169,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_query) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.32', + expected_url, microversion='1.34', global_request_id=self.context.global_id) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2194,7 +2195,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): res = self.client.get_allocation_candidates(self.context, resources) self.ks_adap_mock.get.assert_called_once_with( - mock.ANY, microversion='1.32', + mock.ANY, microversion='1.34', global_request_id=self.context.global_id) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py index 798426052045..e4a785e73099 100644 --- a/nova/tests/unit/scheduler/test_utils.py +++ b/nova/tests/unit/scheduler/test_utils.py @@ -852,8 +852,8 @@ class TestUtils(TestUtilsBase): 'trait2:CUSTOM_PHYSNET_NET2': 'required', 'trait2:HW_NIC_ACCEL_SSL': 'required', # Groupings that don't quite match the patterns are ignored - 'resources_5:SRIOV_NET_VF': '7', - 'traitFoo:HW_NIC_ACCEL_SSL': 'required', + 'resources_*5:SRIOV_NET_VF': '7', + 'traitFoo$:HW_NIC_ACCEL_SSL': 'required', # Solo resource, no corresponding traits 'resources3:DISK_GB': '5', 'group_policy': 'isolate',