diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst index f958c24663b7..54c14ef701ba 100644 --- a/doc/source/cli/nova-status.rst +++ b/doc/source/cli/nova-status.rst @@ -130,6 +130,10 @@ Upgrade * The "API Service Version" upgrade check was removed since the corresponding code for that check was removed in Stein. + **20.0.0 (Train)** + + * Checks for the Placement API are modified to require version 1.31. + See Also ======== diff --git a/nova/cmd/status.py b/nova/cmd/status.py index b2a06a9bdb7d..4d93a650937a 100644 --- a/nova/cmd/status.py +++ b/nova/cmd/status.py @@ -44,11 +44,11 @@ from nova import version CONF = nova.conf.CONF -# NOTE(efried): 1.30 is required by nova-compute to support resource provider -# reshaping (inventory and allocation data migration). +# NOTE(tetsuro): 1.31 is required by nova-scheduler to use in_tree +# queryparam to get allocation candidates. # NOTE: If you bump this version, remember to update the history # section in the nova-status man page (doc/source/cli/nova-status). -MIN_PLACEMENT_MICROVERSION = "1.30" +MIN_PLACEMENT_MICROVERSION = "1.31" class UpgradeCommands(upgradecheck.UpgradeCommands): diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py index e636caef61a0..bcd01caace5b 100644 --- a/nova/scheduler/client/report.py +++ b/nova/scheduler/client/report.py @@ -43,7 +43,7 @@ LOG = logging.getLogger(__name__) WARN_EVERY = 10 RESHAPER_VERSION = '1.30' CONSUMER_GENERATION_VERSION = '1.28' -NESTED_AC_VERSION = '1.29' +INTREE_AC_VERSION = '1.31' ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26' POST_RPS_RETURNS_PAYLOAD_API_VERSION = '1.20' AGGREGATE_GENERATION_VERSION = '1.19' @@ -311,7 +311,7 @@ class SchedulerReportClient(object): """ # Note that claim_resources() will use this version as well to # make allocations by `PUT /allocations/{consumer_uuid}` - version = NESTED_AC_VERSION + version = INTREE_AC_VERSION qparams = resources.to_querystring() url = "/allocation_candidates?%s" % qparams resp = self.get(url, version=version, diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index ae49f92351bf..caa14b51f4f2 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -268,6 +268,7 @@ class ResourceRequest(object): required_traits = request_group.required_traits forbidden_traits = request_group.forbidden_traits aggregates = request_group.aggregates + in_tree = request_group.in_tree resource_query = ",".join( sorted("%s:%s" % (rc, amount) @@ -289,6 +290,8 @@ class ResourceRequest(object): aggs.append(('member_of%s' % suffix, 'in:' + ','.join(sorted(agglist)))) qs_params.extend(sorted(aggs)) + if in_tree: + qs_params.append(('in_tree%s' % suffix, in_tree)) return qs_params if self._limit is not None: diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py index 7d6f677f6abb..4a9dcd4869f6 100644 --- a/nova/tests/unit/scheduler/client/test_report.py +++ b/nova/tests/unit/scheduler/client/test_report.py @@ -2088,7 +2088,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_url = '/allocation_candidates?%s' % parse.urlencode( expected_query) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.29', + expected_url, microversion='1.31', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2128,7 +2128,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): expected_query) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.ks_adap_mock.get.assert_called_once_with( - expected_url, microversion='1.29', + expected_url, microversion='1.31', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(mock.sentinel.p_sums, p_sums) @@ -2150,7 +2150,7 @@ class TestProviderOperations(SchedulerReportClientTestCase): res = self.client.get_allocation_candidates(self.context, resources) self.ks_adap_mock.get.assert_called_once_with( - mock.ANY, microversion='1.29', + mock.ANY, microversion='1.31', headers={'X-Openstack-Request-Id': self.context.global_id}) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py index 5bc3238ea191..3b22602d9e35 100644 --- a/nova/tests/unit/scheduler/test_utils.py +++ b/nova/tests/unit/scheduler/test_utils.py @@ -445,6 +445,7 @@ class TestUtils(test.NoDBTestCase): self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( + 'in_tree=12345678-1234-1234-1234-123456789012&' 'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ @@ -477,6 +478,7 @@ class TestUtils(test.NoDBTestCase): self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( + 'in_tree=12345678-1234-1234-1234-123456789012&' 'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ @@ -550,6 +552,7 @@ class TestUtils(test.NoDBTestCase): self.context, fake_spec, self.mock_host_manager) self.assertResourceRequestsEqual(expected, resources) expected_querystring = ( + 'in_tree=12345678-1234-1234-1234-123456789012&' 'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1') self.assertEqual(expected_querystring, resources.to_querystring()) self.mock_host_manager.get_compute_nodes_by_host_or_node.\ diff --git a/releasenotes/notes/use-placement-in-tree-756cb20af66b08bd.yaml b/releasenotes/notes/use-placement-in-tree-756cb20af66b08bd.yaml new file mode 100644 index 000000000000..c610a35a4454 --- /dev/null +++ b/releasenotes/notes/use-placement-in-tree-756cb20af66b08bd.yaml @@ -0,0 +1,14 @@ +--- +fixes: + - | + There had been `bug 1777591`_ that placement filters out the specified + target host when deploying an instance by the random limitation. In + previous releases the bug has been worked around by unlimiting the results + from the Placement service if the target host is specified. From this + release, the Nova scheduler uses more optimized path retrieving only the + target host information from placement. Note that it still uses the unlimit + workaround if a target host is specified without a specific node and + multiple nodes are found for the target host. This can happen in some of + the virt drivers such as the Ironic driver. + + .. _bug 1777591: https://bugs.launchpad.net/nova/+bug/1777591