Merge "Query `in_tree` to placement"

This commit is contained in:
Zuul 2019-05-02 21:55:38 +00:00 committed by Gerrit Code Review
commit dd6bd75355
7 changed files with 32 additions and 8 deletions

View File

@ -130,6 +130,10 @@ Upgrade
* The "API Service Version" upgrade check was removed since the corresponding
code for that check was removed in Stein.
**20.0.0 (Train)**
* Checks for the Placement API are modified to require version 1.31.
See Also
========

View File

@ -44,11 +44,11 @@ from nova import version
CONF = nova.conf.CONF
# NOTE(efried): 1.30 is required by nova-compute to support resource provider
# reshaping (inventory and allocation data migration).
# NOTE(tetsuro): 1.31 is required by nova-scheduler to use in_tree
# queryparam to get allocation candidates.
# NOTE: If you bump this version, remember to update the history
# section in the nova-status man page (doc/source/cli/nova-status).
MIN_PLACEMENT_MICROVERSION = "1.30"
MIN_PLACEMENT_MICROVERSION = "1.31"
class UpgradeCommands(upgradecheck.UpgradeCommands):

View File

@ -43,7 +43,7 @@ LOG = logging.getLogger(__name__)
WARN_EVERY = 10
RESHAPER_VERSION = '1.30'
CONSUMER_GENERATION_VERSION = '1.28'
NESTED_AC_VERSION = '1.29'
INTREE_AC_VERSION = '1.31'
ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26'
POST_RPS_RETURNS_PAYLOAD_API_VERSION = '1.20'
AGGREGATE_GENERATION_VERSION = '1.19'
@ -311,7 +311,7 @@ class SchedulerReportClient(object):
"""
# Note that claim_resources() will use this version as well to
# make allocations by `PUT /allocations/{consumer_uuid}`
version = NESTED_AC_VERSION
version = INTREE_AC_VERSION
qparams = resources.to_querystring()
url = "/allocation_candidates?%s" % qparams
resp = self.get(url, version=version,

View File

@ -268,6 +268,7 @@ class ResourceRequest(object):
required_traits = request_group.required_traits
forbidden_traits = request_group.forbidden_traits
aggregates = request_group.aggregates
in_tree = request_group.in_tree
resource_query = ",".join(
sorted("%s:%s" % (rc, amount)
@ -289,6 +290,8 @@ class ResourceRequest(object):
aggs.append(('member_of%s' % suffix,
'in:' + ','.join(sorted(agglist))))
qs_params.extend(sorted(aggs))
if in_tree:
qs_params.append(('in_tree%s' % suffix, in_tree))
return qs_params
if self._limit is not None:

View File

@ -2088,7 +2088,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_url = '/allocation_candidates?%s' % parse.urlencode(
expected_query)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.29',
expected_url, microversion='1.31',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.assertEqual(mock.sentinel.p_sums, p_sums)
@ -2128,7 +2128,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
expected_query)
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.29',
expected_url, microversion='1.31',
headers={'X-Openstack-Request-Id': self.context.global_id})
self.assertEqual(mock.sentinel.p_sums, p_sums)
@ -2150,7 +2150,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
res = self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
mock.ANY, microversion='1.29',
mock.ANY, microversion='1.31',
headers={'X-Openstack-Request-Id': self.context.global_id})
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)

View File

@ -445,6 +445,7 @@ class TestUtils(test.NoDBTestCase):
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
@ -477,6 +478,7 @@ class TestUtils(test.NoDBTestCase):
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
@ -550,6 +552,7 @@ class TestUtils(test.NoDBTestCase):
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\

View File

@ -0,0 +1,14 @@
---
fixes:
- |
There had been `bug 1777591`_ that placement filters out the specified
target host when deploying an instance by the random limitation. In
previous releases the bug has been worked around by unlimiting the results
from the Placement service if the target host is specified. From this
release, the Nova scheduler uses more optimized path retrieving only the
target host information from placement. Note that it still uses the unlimit
workaround if a target host is specified without a specific node and
multiple nodes are found for the target host. This can happen in some of
the virt drivers such as the Ironic driver.
.. _bug 1777591: https://bugs.launchpad.net/nova/+bug/1777591