Report client: _reshape helper, placement min bump
Add a thin wrapper to invoke the POST /reshaper placement API with appropriate error checking. This bumps the placement minimum to the reshaper microversion, 1.30. Change-Id: Idf8997d5efdfdfca6967899a0882ffb9ecf96915 blueprint: reshape-provider-tree
This commit is contained in:
parent
25b852efd7
commit
2833785f59
|
@ -118,6 +118,10 @@ Upgrade
|
||||||
* Checks that existing instances have been migrated to have a matching
|
* Checks that existing instances have been migrated to have a matching
|
||||||
request spec in the API DB.
|
request spec in the API DB.
|
||||||
|
|
||||||
|
**19.0.0 (Stein)**
|
||||||
|
|
||||||
|
* Checks for the Placement API are modified to require version 1.30.
|
||||||
|
|
||||||
See Also
|
See Also
|
||||||
========
|
========
|
||||||
|
|
||||||
|
|
|
@ -57,11 +57,13 @@ changed or be partially complete at this time.
|
||||||
* `Request Traits During Scheduling`_
|
* `Request Traits During Scheduling`_
|
||||||
* `filter allocation candidates by aggregate membership`_
|
* `filter allocation candidates by aggregate membership`_
|
||||||
* `perform granular allocation candidate requests`_
|
* `perform granular allocation candidate requests`_
|
||||||
|
* `inventory and allocation data migration`_ (reshaping provider trees)
|
||||||
|
|
||||||
.. _Nested Resource Providers: http://specs.openstack.org/openstack/nova-specs/specs/queens/approved/nested-resource-providers.html
|
.. _Nested Resource Providers: http://specs.openstack.org/openstack/nova-specs/specs/queens/approved/nested-resource-providers.html
|
||||||
.. _Request Traits During Scheduling: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/request-traits-in-nova.html
|
.. _Request Traits During Scheduling: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/request-traits-in-nova.html
|
||||||
.. _filter allocation candidates by aggregate membership: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/alloc-candidates-member-of.html
|
.. _filter allocation candidates by aggregate membership: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/alloc-candidates-member-of.html
|
||||||
.. _perform granular allocation candidate requests: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/granular-resource-requests.html
|
.. _perform granular allocation candidate requests: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/granular-resource-requests.html
|
||||||
|
.. _inventory and allocation data migration: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/reshape-provider-tree.html
|
||||||
|
|
||||||
Deployment
|
Deployment
|
||||||
==========
|
==========
|
||||||
|
|
|
@ -52,12 +52,11 @@ CONF = nova.conf.CONF
|
||||||
PLACEMENT_DOCS_LINK = 'https://docs.openstack.org/nova/latest' \
|
PLACEMENT_DOCS_LINK = 'https://docs.openstack.org/nova/latest' \
|
||||||
'/user/placement.html'
|
'/user/placement.html'
|
||||||
|
|
||||||
# NOTE(efried): 1.28 is required by "nova-manage placement heal_allocations"
|
# NOTE(efried): 1.30 is required by nova-compute to support resource provider
|
||||||
# to get the consumer generation when updating incomplete allocations with
|
# reshaping (inventory and allocation data migration).
|
||||||
# instance consumer project_id and user_id values.
|
|
||||||
# NOTE: If you bump this version, remember to update the history
|
# NOTE: If you bump this version, remember to update the history
|
||||||
# section in the nova-status man page (doc/source/cli/nova-status).
|
# section in the nova-status man page (doc/source/cli/nova-status).
|
||||||
MIN_PLACEMENT_MICROVERSION = "1.28"
|
MIN_PLACEMENT_MICROVERSION = "1.30"
|
||||||
|
|
||||||
|
|
||||||
class UpgradeCheckCode(enum.IntEnum):
|
class UpgradeCheckCode(enum.IntEnum):
|
||||||
|
|
|
@ -2378,3 +2378,8 @@ class ResourceProviderAllocationRetrievalFailed(NovaException):
|
||||||
class ConsumerAllocationRetrievalFailed(NovaException):
|
class ConsumerAllocationRetrievalFailed(NovaException):
|
||||||
msg_fmt = _("Failed to retrieve allocations for consumer "
|
msg_fmt = _("Failed to retrieve allocations for consumer "
|
||||||
"%(consumer_uuid)s: %(error)s")
|
"%(consumer_uuid)s: %(error)s")
|
||||||
|
|
||||||
|
|
||||||
|
class ReshapeFailed(NovaException):
|
||||||
|
msg_fmt = _("Resource provider inventory and allocation data migration "
|
||||||
|
"failed: %(error)s")
|
||||||
|
|
|
@ -47,6 +47,7 @@ _RE_INV_IN_USE = re.compile("Inventory for (.+) on resource provider "
|
||||||
"(.+) in use")
|
"(.+) in use")
|
||||||
WARN_EVERY = 10
|
WARN_EVERY = 10
|
||||||
PLACEMENT_CLIENT_SEMAPHORE = 'placement_client'
|
PLACEMENT_CLIENT_SEMAPHORE = 'placement_client'
|
||||||
|
RESHAPER_VERSION = '1.30'
|
||||||
CONSUMER_GENERATION_VERSION = '1.28'
|
CONSUMER_GENERATION_VERSION = '1.28'
|
||||||
GRANULAR_AC_VERSION = '1.25'
|
GRANULAR_AC_VERSION = '1.25'
|
||||||
ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26'
|
ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26'
|
||||||
|
@ -1420,6 +1421,40 @@ class SchedulerReportClient(object):
|
||||||
# when we invoke the DELETE. See bug #1746374.
|
# when we invoke the DELETE. See bug #1746374.
|
||||||
self._update_inventory(context, compute_node.uuid, inv_data)
|
self._update_inventory(context, compute_node.uuid, inv_data)
|
||||||
|
|
||||||
|
def _reshape(self, context, inventories, allocations):
|
||||||
|
"""Perform atomic inventory & allocation data migration.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param inventories: A dict, keyed by resource provider UUID, of:
|
||||||
|
{ "inventories": { inventory dicts, keyed by resource class },
|
||||||
|
"resource_provider_generation": $RP_GEN }
|
||||||
|
:param allocations: A dict, keyed by consumer UUID, of:
|
||||||
|
{ "project_id": $PROJ_ID,
|
||||||
|
"user_id": $USER_ID,
|
||||||
|
"consumer_generation": $CONSUMER_GEN,
|
||||||
|
"allocations": {
|
||||||
|
$RP_UUID: {
|
||||||
|
"resources": { $RC: $AMOUNT, ... }
|
||||||
|
},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
:return: The Response object representing a successful API call.
|
||||||
|
:raises: ReshapeFailed if the POST /reshaper request fails.
|
||||||
|
:raises: keystoneauth1.exceptions.ClientException if placement API
|
||||||
|
communication fails.
|
||||||
|
"""
|
||||||
|
# We have to make sure any new resource classes exist
|
||||||
|
for invs in inventories.values():
|
||||||
|
self._ensure_resource_classes(context, list(invs['inventories']))
|
||||||
|
payload = {"inventories": inventories, "allocations": allocations}
|
||||||
|
resp = self.post('/reshaper', payload, version=RESHAPER_VERSION,
|
||||||
|
global_request_id=context.global_id)
|
||||||
|
if not resp:
|
||||||
|
raise exception.ReshapeFailed(error=resp.text)
|
||||||
|
|
||||||
|
return resp
|
||||||
|
|
||||||
def update_from_provider_tree(self, context, new_tree):
|
def update_from_provider_tree(self, context, new_tree):
|
||||||
"""Flush changes from a specified ProviderTree back to placement.
|
"""Flush changes from a specified ProviderTree back to placement.
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from keystoneauth1 import exceptions as kse
|
||||||
import mock
|
import mock
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
|
@ -1033,14 +1034,11 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
||||||
self.client.get_allocation_candidates(
|
self.client.get_allocation_candidates(
|
||||||
self.context, utils.ResourceRequest())
|
self.context, utils.ResourceRequest())
|
||||||
|
|
||||||
def test_get_allocations_for_provider_tree(self):
|
def _set_up_provider_tree(self):
|
||||||
with self._interceptor():
|
"""Create two compute nodes in placement: "this" one, and another one.
|
||||||
# When the provider tree cache is empty (or we otherwise supply a
|
|
||||||
# bogus node name), we get ValueError.
|
|
||||||
self.assertRaises(ValueError,
|
|
||||||
self.client.get_allocations_for_provider_tree,
|
|
||||||
self.context, 'bogus')
|
|
||||||
|
|
||||||
|
Must be invoked from within an _interceptor() context.
|
||||||
|
"""
|
||||||
# get_provider_tree_and_ensure_root creates a resource provider
|
# get_provider_tree_and_ensure_root creates a resource provider
|
||||||
# record for us
|
# record for us
|
||||||
ptree = self.client.get_provider_tree_and_ensure_root(
|
ptree = self.client.get_provider_tree_and_ensure_root(
|
||||||
|
@ -1096,16 +1094,16 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
||||||
# Part of the shared storage aggregate
|
# Part of the shared storage aggregate
|
||||||
self.client.put(
|
self.client.put(
|
||||||
'/resource_providers/%s/aggregates' % uuids.othercn,
|
'/resource_providers/%s/aggregates' % uuids.othercn,
|
||||||
{'aggregates': [uuids.agg1],
|
{'aggregates': [uuids.ssp],
|
||||||
'resource_provider_generation':
|
'resource_provider_generation':
|
||||||
resp.json()['resource_provider_generation']},
|
resp.json()['resource_provider_generation']},
|
||||||
version='1.19')
|
version='1.19')
|
||||||
|
|
||||||
# At this point, there are no allocations
|
def _set_up_provider_tree_allocs(self):
|
||||||
self.assertEqual({}, self.client.get_allocations_for_provider_tree(
|
"""Create some allocations on our compute (with sharing).
|
||||||
self.context, self.compute_name))
|
|
||||||
|
|
||||||
# Create some allocations on our compute (with sharing)
|
Must be invoked from within an _interceptor() context.
|
||||||
|
"""
|
||||||
cn_inst1_allocs = {
|
cn_inst1_allocs = {
|
||||||
'allocations': {
|
'allocations': {
|
||||||
self.compute_uuid: {'resources': {'MEMORY_MB': 512}},
|
self.compute_uuid: {'resources': {'MEMORY_MB': 512}},
|
||||||
|
@ -1140,6 +1138,25 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
||||||
'user_id': uuids.user,
|
'user_id': uuids.user,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return cn_inst1_allocs, cn_inst2_allocs
|
||||||
|
|
||||||
|
def test_get_allocations_for_provider_tree(self):
|
||||||
|
with self._interceptor():
|
||||||
|
# When the provider tree cache is empty (or we otherwise supply a
|
||||||
|
# bogus node name), we get ValueError.
|
||||||
|
self.assertRaises(ValueError,
|
||||||
|
self.client.get_allocations_for_provider_tree,
|
||||||
|
self.context, 'bogus')
|
||||||
|
|
||||||
|
self._set_up_provider_tree()
|
||||||
|
|
||||||
|
# At this point, there are no allocations
|
||||||
|
self.assertEqual({}, self.client.get_allocations_for_provider_tree(
|
||||||
|
self.context, self.compute_name))
|
||||||
|
|
||||||
|
cn_inst1_allocs, cn_inst2_allocs = (
|
||||||
|
self._set_up_provider_tree_allocs())
|
||||||
|
|
||||||
# And now we should get all the right allocations. Note that we see
|
# And now we should get all the right allocations. Note that we see
|
||||||
# nothing from othercn_inst.
|
# nothing from othercn_inst.
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -1157,3 +1174,47 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase):
|
||||||
if 'generation' in alloc:
|
if 'generation' in alloc:
|
||||||
del alloc['generation']
|
del alloc['generation']
|
||||||
self.assertEqual(expected, actual)
|
self.assertEqual(expected, actual)
|
||||||
|
|
||||||
|
def test_reshape(self):
|
||||||
|
"""Smoke test the report client shim for the reshaper API."""
|
||||||
|
with self._interceptor():
|
||||||
|
# Simulate placement API communication failure
|
||||||
|
with mock.patch.object(
|
||||||
|
self.client, 'post', side_effect=kse.MissingAuthPlugin):
|
||||||
|
self.assertRaises(kse.ClientException,
|
||||||
|
self.client._reshape, self.context, {}, {})
|
||||||
|
|
||||||
|
# Invalid payload (empty inventories) results in a 409, which the
|
||||||
|
# report client converts to ReshapeFailed
|
||||||
|
try:
|
||||||
|
self.client._reshape(self.context, {}, {})
|
||||||
|
except exception.ReshapeFailed as e:
|
||||||
|
self.assertIn('JSON does not validate: {} does not have '
|
||||||
|
'enough properties', e.kwargs['error'])
|
||||||
|
|
||||||
|
# Okay, do some real stuffs. We're just smoke-testing that we can
|
||||||
|
# hit a good path to the API here; real testing of the API happens
|
||||||
|
# in gabbits and via update_from_provider_tree.
|
||||||
|
self._set_up_provider_tree()
|
||||||
|
self._set_up_provider_tree_allocs()
|
||||||
|
|
||||||
|
ptree = self.client.get_provider_tree_and_ensure_root(
|
||||||
|
self.context, self.compute_uuid)
|
||||||
|
inventories = {}
|
||||||
|
for rp_uuid in ptree.get_provider_uuids():
|
||||||
|
data = ptree.data(rp_uuid)
|
||||||
|
# Add a new resource class to the inventories
|
||||||
|
inventories[rp_uuid] = {
|
||||||
|
"inventories": dict(data.inventory,
|
||||||
|
CUSTOM_FOO={'total': 10}),
|
||||||
|
"resource_provider_generation": data.generation
|
||||||
|
}
|
||||||
|
|
||||||
|
allocs = self.client.get_allocations_for_provider_tree(
|
||||||
|
self.context, self.compute_name)
|
||||||
|
for alloc in allocs.values():
|
||||||
|
for res in alloc['allocations'].values():
|
||||||
|
res['resources']['CUSTOM_FOO'] = 1
|
||||||
|
|
||||||
|
resp = self.client._reshape(self.context, inventories, allocs)
|
||||||
|
self.assertEqual(204, resp.status_code)
|
||||||
|
|
Loading…
Reference in New Issue