diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst index 8c0064baac10..f30318452a9a 100644 --- a/doc/source/cli/nova-status.rst +++ b/doc/source/cli/nova-status.rst @@ -141,7 +141,7 @@ Upgrade * The ``Request Spec Migration`` check was removed. **21.0.0 (Ussuri)** - * Checks for the Placement API are modified to require version 1.34. + * Checks for the Placement API are modified to require version 1.35. See Also ======== diff --git a/nova/cmd/status.py b/nova/cmd/status.py index cac107c66ee2..18d84ce5ecb0 100644 --- a/nova/cmd/status.py +++ b/nova/cmd/status.py @@ -46,11 +46,12 @@ from nova.volume import cinder CONF = nova.conf.CONF -# NOTE(efried): 1.34 is required by nova-scheduler to return mappings from -# request group suffixes to the resource providers that satisfy them. +# NOTE(efried): 1.35 is required by nova-scheduler to support the root_required +# queryparam to make GET /allocation_candidates require that a trait be present +# on the root provider, irrespective of how the request groups are specified. # NOTE: If you bump this version, remember to update the history # section in the nova-status man page (doc/source/cli/nova-status). -MIN_PLACEMENT_MICROVERSION = "1.34" +MIN_PLACEMENT_MICROVERSION = "1.35" # NOTE(mriedem): 3.44 is needed to work with volume attachment records which # are required for supporting multi-attach capable volumes. diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py index d3988da6ffde..bf0a7a5c4aeb 100644 --- a/nova/objects/request_spec.py +++ b/nova/objects/request_spec.py @@ -34,7 +34,8 @@ LOG = logging.getLogger(__name__) REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination', 'security_groups', 'network_metadata', - 'requested_resources'] + 'requested_resources', + 'request_level_params'] @base.NovaObjectRegistry.register @@ -52,7 +53,8 @@ class RequestSpec(base.NovaObject): # Version 1.10: Added network_metadata # Version 1.11: Added is_bfv # Version 1.12: Added requested_resources - VERSION = '1.12' + # Version 1.13: Added request_level_params + VERSION = '1.13' fields = { 'id': fields.IntegerField(), @@ -103,12 +105,16 @@ class RequestSpec(base.NovaObject): # NOTE(alex_xu): This field won't be persisted. 'requested_resources': fields.ListOfObjectsField('RequestGroup', nullable=True, - default=None) + default=None), + # NOTE(efried): This field won't be persisted. + 'request_level_params': fields.ObjectField('RequestLevelParams'), } def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) + if target_version < (1, 13) and 'request_level_params' in primitive: + del primitive['request_level_params'] if target_version < (1, 12): if 'requested_resources' in primitive: del primitive['requested_resources'] @@ -142,6 +148,10 @@ class RequestSpec(base.NovaObject): physnets=set(), tunneled=False) return + if attrname == 'request_level_params': + self.request_level_params = RequestLevelParams() + return + # NOTE(sbauza): In case the primitive was not providing that field # because of a previous RequestSpec version, we want to default # that field in order to have the same behaviour. @@ -167,6 +177,18 @@ class RequestSpec(base.NovaObject): def swap(self): return self.flavor.swap + @property + def root_required(self): + # self.request_level_params and .root_required lazy-default via their + # respective obj_load_attr methods. + return self.request_level_params.root_required + + @property + def root_forbidden(self): + # self.request_level_params and .root_forbidden lazy-default via their + # respective obj_load_attr methods. + return self.request_level_params.root_forbidden + def _image_meta_from_image(self, image): if isinstance(image, objects.ImageMeta): self.image = image @@ -497,6 +519,10 @@ class RequestSpec(base.NovaObject): if port_resource_requests: spec_obj.requested_resources.extend(port_resource_requests) + # NOTE(efried): We don't need to handle request_level_params here yet + # because they're set dynamically by the scheduler. That could change + # in the future. + # NOTE(sbauza): Default the other fields that are not part of the # original contract spec_obj.obj_set_defaults() @@ -537,11 +563,10 @@ class RequestSpec(base.NovaObject): if key in ['id', 'instance_uuid']: setattr(spec, key, db_spec[key]) elif key in ('requested_destination', 'requested_resources', - 'network_metadata'): + 'network_metadata', 'request_level_params'): # Do not override what we already have in the object as this # field is not persisted. If save() is called after - # requested_resources, requested_destination or - # network_metadata is populated, it will reset the field to + # one of these fields is populated, it will reset the field to # None and we'll lose what is set (but not persisted) on the # object. continue @@ -618,10 +643,10 @@ class RequestSpec(base.NovaObject): if 'instance_group' in spec and spec.instance_group: spec.instance_group.members = None spec.instance_group.hosts = None - # NOTE(mriedem): Don't persist retries, requested_destination, - # requested_resources or ignored hosts since those are per-request + # NOTE(mriedem): Don't persist these since they are per-request for excluded in ('retry', 'requested_destination', - 'requested_resources', 'ignore_hosts'): + 'requested_resources', 'ignore_hosts', + 'request_level_params'): if excluded in spec and getattr(spec, excluded): setattr(spec, excluded, None) # NOTE(stephenfin): Don't persist network metadata since we have @@ -1182,3 +1207,25 @@ class RequestGroup(base.NovaObject): for rclass in list(self.resources): if self.resources[rclass] == 0: self.resources.pop(rclass) + + +@base.NovaObjectRegistry.register +class RequestLevelParams(base.NovaObject): + """Options destined for the "top level" of the placement allocation + candidates query (parallel to, but not including, the list of + RequestGroup). + """ + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + # Traits required on the root provider + 'root_required': fields.SetOfStringsField(default=set()), + # Traits forbidden on the root provider + 'root_forbidden': fields.SetOfStringsField(default=set()), + # NOTE(efried): group_policy would be appropriate to include here, once + # we have a use case for it. + } + + def obj_load_attr(self, attrname): + self.obj_set_defaults(attrname) diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index a25ab69e927d..350825af146e 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -41,7 +41,7 @@ from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) WARN_EVERY = 10 -MAPPINGS_VERSION = '1.34' +ROOT_REQUIRED_VERSION = '1.35' RESHAPER_VERSION = '1.30' CONSUMER_GENERATION_VERSION = '1.28' ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26' @@ -291,7 +291,7 @@ class SchedulerReportClient(object): """ # Note that claim_resources() will use this version as well to # make allocations by `PUT /allocations/{consumer_uuid}` - version = MAPPINGS_VERSION + version = ROOT_REQUIRED_VERSION qparams = resources.to_querystring() url = "/allocation_candidates?%s" % qparams resp = self.get(url, version=version, diff --git a/nova/scheduler/request_filter.py b/nova/scheduler/request_filter.py index 3ff1c1296aff..d5d6501293b8 100644 --- a/nova/scheduler/request_filter.py +++ b/nova/scheduler/request_filter.py @@ -185,11 +185,7 @@ def require_image_type_support(ctxt, request_spec): 'is os-traits up to date?'), trait_name) return False - # NOTE(danms): We are using the transient flavor in the request spec - # to add the trait that we need. We make sure that we reset the dirty-ness - # of this field to avoid persisting it. - request_spec.flavor.extra_specs['trait:%s' % trait_name] = 'required' - request_spec.obj_reset_changes(fields=['flavor'], recursive=True) + request_spec.root_required.add(trait_name) LOG.debug('require_image_type_support request filter added required ' 'trait %s', trait_name) @@ -206,13 +202,8 @@ def compute_status_filter(ctxt, request_spec): service should have the COMPUTE_STATUS_DISABLED trait set and be excluded by this mandatory pre-filter. """ - # We're called before scheduler utils resources_from_request_spec builds - # the RequestGroup stuff which gets used to form the - # GET /allocation_candidates call, so mutate the flavor for that call but - # don't persist the change. trait_name = os_traits.COMPUTE_STATUS_DISABLED - request_spec.flavor.extra_specs['trait:%s' % trait_name] = 'forbidden' - request_spec.obj_reset_changes(fields=['flavor'], recursive=True) + request_spec.root_forbidden.add(trait_name) LOG.debug('compute_status_filter request filter added forbidden ' 'trait %s', trait_name) return True diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index 9d94bde6f339..77fd023f18ad 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -61,7 +61,8 @@ class ResourceRequest(object): """Create a new instance of ResourceRequest from a RequestSpec. Examines the flavor, flavor extra specs, (optional) image metadata, - and (optional) requested_resources of the provided ``request_spec``. + and (optional) requested_resources and request_level_params of the + provided ``request_spec``. For extra specs, items of the following form are examined: @@ -95,6 +96,9 @@ class ResourceRequest(object): image (e.g. from ports) are incorporated as is, but ensuring that they get unique group suffixes. + request_level_params - settings associated with the request as a whole + rather than with a specific RequestGroup - are incorporated as is. + :param request_spec: An instance of ``objects.RequestSpec``. :param enable_pinning_translate: True if the CPU policy extra specs should be translated to placement resources and traits. @@ -102,6 +106,10 @@ class ResourceRequest(object): # { ident: RequestGroup } self._rg_by_id = {} self._group_policy = None + # root_required+=these + self._root_required = request_spec.root_required + # root_required+=!these + self._root_forbidden = request_spec.root_forbidden # Default to the configured limit but _limit can be # set to None to indicate "no limit". self._limit = CONF.scheduler.max_placement_results @@ -446,6 +454,10 @@ class ResourceRequest(object): qparams = [] if self._group_policy is not None: qparams.append(('group_policy', self._group_policy)) + if self._root_required or self._root_forbidden: + vals = sorted(self._root_required) + ['!' + t for t in + sorted(self._root_forbidden)] + qparams.append(('root_required', ','.join(vals))) for ident, rg in self._rg_by_id.items(): # [('resources[$S]', 'rclass:amount,rclass:amount,...'), diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py index ea52fcfccbd6..c151fca2c1af 100644 --- a/nova/tests/functional/test_report_client.py +++ b/nova/tests/functional/test_report_client.py @@ -12,9 +12,11 @@ # under the License. import copy +import ddt from keystoneauth1 import exceptions as kse import mock import os_resource_classes as orc +import os_traits as ot from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils.fixture import uuidsentinel as uuids @@ -141,6 +143,7 @@ class SchedulerReportClientTestBase(test.TestCase): pass +@ddt.ddt @mock.patch('nova.compute.utils.is_volume_backed_instance', new=mock.Mock(return_value=False)) @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock()) @@ -1299,6 +1302,64 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): self.assertEqual({'_CPU', '_MEM', '_DISK'}, set(ac['mappings'])) + # One data element is: + # root_required: set of traits for root_required + # root_forbidden: set of traits for root_forbidden + # expected_acs: integer expected number of allocation candidates + @ddt.data( + # With no root_required qparam, we get two candidates involving the + # primary compute and one involving `othercn`. (This is covered + # elsewhere too, but included here as a baseline). + {'root_required': {}, + 'root_forbidden': {}, + 'expected_acs': 3}, + # Requiring traits that are on our primary compute culls out the result + # involving `othercn`. + {'root_required': {ot.COMPUTE_VOLUME_EXTEND, 'CUSTOM_FOO'}, + 'root_forbidden': {}, + 'expected_acs': 2}, + # Forbidding a trait that's absent doesn't matter + {'root_required': {ot.COMPUTE_VOLUME_EXTEND, 'CUSTOM_FOO'}, + 'root_forbidden': {ot.COMPUTE_VOLUME_MULTI_ATTACH}, + 'expected_acs': 2}, + # But forbidding COMPUTE_STATUS_DISABLED kills it + {'root_required': {ot.COMPUTE_VOLUME_EXTEND, 'CUSTOM_FOO'}, + 'root_forbidden': {ot.COMPUTE_VOLUME_MULTI_ATTACH, + ot.COMPUTE_STATUS_DISABLED}, + 'expected_acs': 0}, + # Removing the required traits brings back the candidate involving + # `othercn`. But the primary compute is still filtered out by the + # root_forbidden. + {'root_required': {}, + 'root_forbidden': {ot.COMPUTE_VOLUME_MULTI_ATTACH, + ot.COMPUTE_STATUS_DISABLED}, + 'expected_acs': 1}, + ) + def test_allocation_candidates_root_traits(self, data): + """Smoke test to ensure root_{required|forbidden} percolates from the + RequestSpec through to the GET /allocation_candidates response. + + We're not trying to prove that root_required works -- Placement tests + for that -- we're just throwing out enough permutations to prove that + we must be building the querystring correctly. + """ + flavor = objects.Flavor( + vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0) + req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) + req_spec.root_required.update(data['root_required']) + req_spec.root_forbidden.update(data['root_forbidden']) + with self._interceptor(): + self._set_up_provider_tree() + self.client.set_traits_for_provider( + self.context, self.compute_uuid, + (ot.COMPUTE_STATUS_DISABLED, ot.COMPUTE_VOLUME_EXTEND, + 'CUSTOM_FOO')) + acs, _, ver = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec)) + self.assertEqual('1.35', ver) + # This prints which ddt permutation we're using if it fails. + self.assertEqual(data['expected_acs'], len(acs), data) + def test_get_allocations_for_provider_tree(self): with self._interceptor(): # When the provider tree cache is empty (or we otherwise supply a diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py index 67ecc86c0d7a..eca24d26f382 100644 --- a/nova/tests/unit/conductor/tasks/test_migrate.py +++ b/nova/tests/unit/conductor/tasks/test_migrate.py @@ -734,7 +734,7 @@ class MigrationTaskTestCase(test.NoDBTestCase): allocation_request=jsonutils.dumps( {"allocations": {uuids.host1: resources}, "mappings": {uuids.port1: [uuids.host1]}}), - allocation_request_version='1.34') + allocation_request_version='1.35') second = objects.Selection( service_host="host2", nodename="node2", @@ -742,7 +742,7 @@ class MigrationTaskTestCase(test.NoDBTestCase): allocation_request=jsonutils.dumps( {"allocations": {uuids.host2: resources}, "mappings": {uuids.port1: [uuids.host2]}}), - allocation_request_version='1.34') + allocation_request_version='1.35') first_service = objects.Service(service_host='host1') first_service.version = 38 @@ -765,7 +765,7 @@ class MigrationTaskTestCase(test.NoDBTestCase): self.context.elevated(), task.reportclient, task.request_spec, self.instance.uuid, {"allocations": {uuids.host2: resources}, - "mappings": {uuids.port1: [uuids.host2]}}, '1.34') + "mappings": {uuids.port1: [uuids.host2]}}, '1.35') mock_debug.assert_has_calls([ mock.call( 'Scheduler returned alternate host %(host)s as a possible ' diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py index cfcfc71f39b6..f22a74b4e124 100644 --- a/nova/tests/unit/objects/test_objects.py +++ b/nova/tests/unit/objects/test_objects.py @@ -1120,7 +1120,8 @@ object_data = { 'Quotas': '1.3-40fcefe522111dddd3e5e6155702cf4e', 'QuotasNoOp': '1.3-347a039fc7cfee7b225b68b5181e0733', 'RequestGroup': '1.3-0458d350a8ec9d0673f9be5640a990ce', - 'RequestSpec': '1.12-25010470f219af9b6163f2a457a513f5', + 'RequestLevelParams': '1.0-1e5c8c18bd44cd233c8b32509c99d06f', + 'RequestSpec': '1.13-e1aa38b2bf3f8547474ee9e4c0aa2745', 'Resource': '1.0-d8a2abbb380da583b995fd118f6a8953', 'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879', 'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d', diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py index a09329c26256..0e6286f2d6d5 100644 --- a/nova/tests/unit/objects/test_request_spec.py +++ b/nova/tests/unit/objects/test_request_spec.py @@ -307,7 +307,8 @@ class _TestRequestSpecObject(object): spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props) mock_limits.assert_called_once_with({}) # Make sure that all fields are set using that helper method - skip = ['id', 'security_groups', 'network_metadata', 'is_bfv'] + skip = ['id', 'security_groups', 'network_metadata', 'is_bfv', + 'request_level_params'] for field in [f for f in spec.obj_fields if f not in skip]: self.assertTrue(spec.obj_attr_is_set(field), 'Field: %s is not set' % field) @@ -338,7 +339,7 @@ class _TestRequestSpecObject(object): filter_properties, instance_group, instance.availability_zone, objects.SecurityGroupList()) # Make sure that all fields are set using that helper method - skip = ['id', 'network_metadata', 'is_bfv'] + skip = ['id', 'network_metadata', 'is_bfv', 'request_level_params'] for field in [f for f in spec.obj_fields if f not in skip]: self.assertTrue(spec.obj_attr_is_set(field), 'Field: %s is not set' % field) diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index 1521911ee6dd..cf916fda4938 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -2125,7 +2125,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_url = '/allocation_candidates?%s' % parse.urlencode( expected_query) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.34', + expected_url, microversion='1.35', global_request_id=self.context.global_id) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2169,7 +2169,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_query) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.34', + expected_url, microversion='1.35', global_request_id=self.context.global_id) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2195,7 +2195,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): res = self.client.get_allocation_candidates(self.context, resources) self.ks_adap_mock.get.assert_called_once_with( - mock.ANY, microversion='1.34', + mock.ANY, microversion='1.35', global_request_id=self.context.global_id) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) diff --git a/nova/tests/unit/scheduler/test_request_filter.py b/nova/tests/unit/scheduler/test_request_filter.py index 34a8e1d44ce7..556dcb219a30 100644 --- a/nova/tests/unit/scheduler/test_request_filter.py +++ b/nova/tests/unit/scheduler/test_request_filter.py @@ -11,6 +11,7 @@ # under the License. import mock +import os_traits as ot from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import timeutils @@ -339,7 +340,8 @@ class TestRequestFilter(test.NoDBTestCase): is_bfv=False) # Assert that we completely skip the filter if disabled request_filter.require_image_type_support(self.context, reqspec) - self.assertEqual({}, reqspec.flavor.extra_specs) + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual(0, len(reqspec.root_forbidden)) def test_require_image_type_support_volume_backed(self): self.flags(query_placement_for_image_type_support=True, @@ -348,7 +350,8 @@ class TestRequestFilter(test.NoDBTestCase): is_bfv=True) # Assert that we completely skip the filter if no image request_filter.require_image_type_support(self.context, reqspec) - self.assertEqual({}, reqspec.flavor.extra_specs) + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual(0, len(reqspec.root_forbidden)) def test_require_image_type_support_unknown(self): self.flags(query_placement_for_image_type_support=True, @@ -359,7 +362,8 @@ class TestRequestFilter(test.NoDBTestCase): is_bfv=False) # Assert that we completely skip the filter if no matching trait request_filter.require_image_type_support(self.context, reqspec) - self.assertEqual({}, reqspec.flavor.extra_specs) + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual(0, len(reqspec.root_forbidden)) @mock.patch.object(request_filter, 'LOG') def test_require_image_type_support_adds_trait(self, mock_log): @@ -369,11 +373,13 @@ class TestRequestFilter(test.NoDBTestCase): image=objects.ImageMeta(disk_format='raw'), flavor=objects.Flavor(extra_specs={}), is_bfv=False) - # Assert that we add the trait to the flavor as required + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual(0, len(reqspec.root_forbidden)) + + # Request filter puts the trait into the request spec request_filter.require_image_type_support(self.context, reqspec) - self.assertEqual({'trait:COMPUTE_IMAGE_TYPE_RAW': 'required'}, - reqspec.flavor.extra_specs) - self.assertEqual(set(), reqspec.flavor.obj_what_changed()) + self.assertEqual({ot.COMPUTE_IMAGE_TYPE_RAW}, reqspec.root_required) + self.assertEqual(0, len(reqspec.root_forbidden)) log_lines = [c[0][0] for c in mock_log.debug.call_args_list] self.assertIn('added required trait', log_lines[0]) @@ -382,13 +388,14 @@ class TestRequestFilter(test.NoDBTestCase): @mock.patch.object(request_filter, 'LOG') def test_compute_status_filter(self, mock_log): reqspec = objects.RequestSpec(flavor=objects.Flavor(extra_specs={})) + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual(0, len(reqspec.root_forbidden)) + + # Request filter puts the trait into the request spec request_filter.compute_status_filter(self.context, reqspec) - # The forbidden trait should be added to the RequestSpec.flavor. - self.assertEqual({'trait:COMPUTE_STATUS_DISABLED': 'forbidden'}, - reqspec.flavor.extra_specs) - # The RequestSpec.flavor changes should be reset so they are not - # persisted. - self.assertEqual(set(), reqspec.flavor.obj_what_changed()) + self.assertEqual(0, len(reqspec.root_required)) + self.assertEqual({ot.COMPUTE_STATUS_DISABLED}, reqspec.root_forbidden) + # Assert both the in-method logging and trace decorator. log_lines = [c[0][0] for c in mock_log.debug.call_args_list] self.assertIn('added forbidden trait', log_lines[0])