Add microversion for nested allocation candidate

This patch adds a microversion with a release note for allocation
candidates with nested resource provider trees.

From now on we support allocation candidates with nested resource
providers with the following features.

1) ``GET /allocation_candidates`` is aware of nested providers.
   Namely, when provider trees are present, ``allocation_requests``
   in the response of ``GET /allocation_candidates`` can include
   allocations on combinations of multiple resource providers
   in the same tree.
2) ``root_provider_uuid`` and ``parent_provider_uuid`` fields are
    added to ``provider_summaries`` in the response of
   ``GET /allocation_candidates``.

Change-Id: I6cecb25c6c16cecc23d4008474d150b1f15f7d8a
Blueprint: nested-resource-providers-allocation-candidates
This commit is contained in:
Tetsuro Nakamura 2018-05-01 20:23:20 +09:00
parent aeb95b5739
commit 41b04e8819
8 changed files with 253 additions and 93 deletions

View File

@ -117,7 +117,8 @@ def _transform_allocation_requests_list(alloc_reqs):
def _transform_provider_summaries(p_sums, requests, include_traits=False, def _transform_provider_summaries(p_sums, requests, include_traits=False,
include_all_resources=False): include_all_resources=False,
enable_nested_providers=False):
"""Turn supplied list of ProviderSummary objects into a dict, keyed by """Turn supplied list of ProviderSummary objects into a dict, keyed by
resource provider UUID, of dicts of provider and inventory information. resource provider UUID, of dicts of provider and inventory information.
The traits only show up when `include_traits` is `True`. The traits only show up when `include_traits` is `True`.
@ -141,6 +142,8 @@ def _transform_provider_summaries(p_sums, requests, include_traits=False,
'HW_CPU_X86_AVX512F', 'HW_CPU_X86_AVX512F',
'HW_CPU_X86_AVX512CD' 'HW_CPU_X86_AVX512CD'
] ]
parent_provider_uuid: null,
root_provider_uuid: RP_UUID_1
}, },
RP_UUID_2: { RP_UUID_2: {
'resources': { 'resources': {
@ -156,7 +159,9 @@ def _transform_provider_summaries(p_sums, requests, include_traits=False,
'traits': [ 'traits': [
'HW_NIC_OFFLOAD_TSO', 'HW_NIC_OFFLOAD_TSO',
'HW_NIC_OFFLOAD_GRO' 'HW_NIC_OFFLOAD_GRO'
] ],
parent_provider_uuid: null,
root_provider_uuid: RP_UUID_2
} }
} }
""" """
@ -185,9 +190,49 @@ def _transform_provider_summaries(p_sums, requests, include_traits=False,
ret[ps.resource_provider.uuid]['traits'] = [ ret[ps.resource_provider.uuid]['traits'] = [
t.name for t in ps.traits] t.name for t in ps.traits]
if enable_nested_providers:
ret[ps.resource_provider.uuid]['parent_provider_uuid'] = (
ps.resource_provider.parent_provider_uuid)
ret[ps.resource_provider.uuid]['root_provider_uuid'] = (
ps.resource_provider.root_provider_uuid)
return ret return ret
def _exclude_nested_providers(alloc_cands):
"""Exclude allocation requests and provider summaries for old microversions
if they involve more than one provider from the same tree.
"""
# Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all RPs
# in that tree.
tree_rps_by_root = collections.defaultdict(set)
for ps in alloc_cands.provider_summaries:
rp_uuid = ps.resource_provider.uuid
root_uuid = ps.resource_provider.root_provider_uuid
tree_rps_by_root[root_uuid].add(rp_uuid)
# We use this to get a list of sets of providers in each tree
tree_sets = list(tree_rps_by_root.values())
for a_req in alloc_cands.allocation_requests[:]:
alloc_rp_uuids = set([
arr.resource_provider.uuid for arr in a_req.resource_requests])
# If more than one allocation is provided by the same tree, kill
# that allocation request.
if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in tree_sets):
alloc_cands.allocation_requests.remove(a_req)
# Exclude eliminated providers from the provider summaries.
all_rp_uuids = set()
for a_req in alloc_cands.allocation_requests:
all_rp_uuids |= set(
arr.resource_provider.uuid for arr in a_req.resource_requests)
for ps in alloc_cands.provider_summaries[:]:
if ps.resource_provider.uuid not in all_rp_uuids:
alloc_cands.provider_summaries.remove(ps)
return alloc_cands
def _transform_allocation_candidates(alloc_cands, requests, want_version): def _transform_allocation_candidates(alloc_cands, requests, want_version):
"""Turn supplied AllocationCandidates object into a dict containing """Turn supplied AllocationCandidates object into a dict containing
allocation requests and provider summaries. allocation requests and provider summaries.
@ -197,6 +242,11 @@ def _transform_allocation_candidates(alloc_cands, requests, want_version):
'provider_summaries': <PROVIDER_SUMMARIES>, 'provider_summaries': <PROVIDER_SUMMARIES>,
} }
""" """
# exclude nested providers with old microversions
enable_nested_providers = want_version.matches((1, 29))
if not enable_nested_providers:
alloc_cands = _exclude_nested_providers(alloc_cands)
if want_version.matches((1, 12)): if want_version.matches((1, 12)):
a_reqs = _transform_allocation_requests_dict( a_reqs = _transform_allocation_requests_dict(
alloc_cands.allocation_requests) alloc_cands.allocation_requests)
@ -209,7 +259,9 @@ def _transform_allocation_candidates(alloc_cands, requests, want_version):
p_sums = _transform_provider_summaries( p_sums = _transform_provider_summaries(
alloc_cands.provider_summaries, requests, alloc_cands.provider_summaries, requests,
include_traits=include_traits, include_traits=include_traits,
include_all_resources=include_all_resources) include_all_resources=include_all_resources,
enable_nested_providers=enable_nested_providers)
return { return {
'allocation_requests': a_reqs, 'allocation_requests': a_reqs,
'provider_summaries': p_sums, 'provider_summaries': p_sums,

View File

@ -74,6 +74,7 @@ VERSIONS = [
# field in response of `GET /allocation_candidates` API even if # field in response of `GET /allocation_candidates` API even if
# the resource class is not in the requested resources. # the resource class is not in the requested resources.
'1.28', # Add support for consumer generation '1.28', # Add support for consumer generation
'1.29', # Support nested providers in GET /allocation_candidates API.
] ]

View File

@ -487,3 +487,14 @@ prior to 1.28. The only way to safely modify allocations for a consumer and
satisfy expectations you have regarding the prior existence (or lack of satisfy expectations you have regarding the prior existence (or lack of
existence) of those allocations is to always use microversion 1.28+ when existence) of those allocations is to always use microversion 1.28+ when
calling allocations API endpoints. calling allocations API endpoints.
1.29 Support allocation candidates with nested resource providers
-----------------------------------------------------------------
Add support for nested resource providers with the following two features.
1) ``GET /allocation_candidates`` is aware of nested providers. Namely, when
provider trees are present, ``allocation_requests`` in the response of
``GET /allocation_candidates`` can include allocations on combinations of
multiple resource providers in the same tree.
2) ``root_provider_uuid`` and ``parent_provider_uuid`` are added to
``provider_summaries`` in the response of ``GET /allocation_candidates``.

View File

@ -26,6 +26,21 @@ from nova.tests import fixtures
from nova.tests import uuidsentinel as uuids from nova.tests import uuidsentinel as uuids
def create_provider(context, name, *aggs, **kwargs):
parent = kwargs.get('parent')
root = kwargs.get('root')
uuid = kwargs.get('uuid', getattr(uuids, name))
rp = rp_obj.ResourceProvider(context, name=name, uuid=uuid)
if parent:
rp.parent_provider_uuid = parent
if root:
rp.root_provider_uuid = root
rp.create()
if aggs:
rp.set_aggregates(aggs)
return rp
def add_inventory(rp, rc, total, **kwargs): def add_inventory(rp, rc, total, **kwargs):
kwargs.setdefault('max_unit', total) kwargs.setdefault('max_unit', total)
inv = rp_obj.Inventory(rp._context, resource_provider=rp, inv = rp_obj.Inventory(rp._context, resource_provider=rp,
@ -69,17 +84,7 @@ class PlacementDbBaseTestCase(test.NoDBTestCase):
self.rp_uuid_to_name = {} self.rp_uuid_to_name = {}
def _create_provider(self, name, *aggs, **kwargs): def _create_provider(self, name, *aggs, **kwargs):
parent = kwargs.get('parent') rp = create_provider(self.ctx, name, *aggs, **kwargs)
root = kwargs.get('root')
uuid = kwargs.get('uuid', getattr(uuids, name))
rp = rp_obj.ResourceProvider(self.ctx, name=name, uuid=uuid)
if parent:
rp.parent_provider_uuid = parent
if root:
rp.root_provider_uuid = root
rp.create()
if aggs:
rp.set_aggregates(aggs)
self.rp_uuid_to_name[rp.uuid] = name self.rp_uuid_to_name[rp.uuid] = name
return rp return rp

View File

@ -26,7 +26,9 @@ from nova.api.openstack.placement import policies
from nova import conf from nova import conf
from nova import config from nova import config
from nova import context from nova import context
from nova import rc_fields as fields
from nova.tests import fixtures from nova.tests import fixtures
from nova.tests.functional.api.openstack.placement.db import test_base as tb
from nova.tests.unit import policy_fixture from nova.tests.unit import policy_fixture
from nova.tests import uuidsentinel as uuids from nova.tests import uuidsentinel as uuids
@ -247,99 +249,88 @@ class AllocationFixture(APIFixture):
class SharedStorageFixture(APIFixture): class SharedStorageFixture(APIFixture):
"""An APIFixture that has some two compute nodes without local storage """An APIFixture that has some two compute nodes without local storage
associated by aggregate to a provider of shared storage. associated by aggregate to a provider of shared storage. Both compute
nodes have respectively two numa node resource providers, each of
which has a pf resource provider.
+-------------------------------------+
| sharing storage (ss) |
| DISK_GB:2000 |
| traits: MISC_SHARES_VIA_AGGREGATE |
+-----------------+-------------------+
| aggregate
+--------------------------+ | +------------------------+
| compute node (cn1) |---+---| compute node (cn2) |
| CPU: 24 | | CPU: 24 |
| MEMORY_MB: 128*1024 | | MEMORY_MB: 128*1024 |
| traits: HW_CPU_X86_SSE, | | |
| HW_CPU_X86_SSE2 | | |
+--------------------------+ +------------------------+
| | | |
+---------+ +---------+ +---------+ +---------+
| numa1_1 | | numa1_2 | | numa2_1 | | numa2_2 |
+---------+ +---------+ +---------+ +---------+
| | | |
+---------------++---------------++---------------++----------------+
| pf1_1 || pf1_2 || pf2_1 || pf2_2 |
| SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8 |
+---------------++---------------++---------------++----------------+
""" """
def start_fixture(self): def start_fixture(self):
super(SharedStorageFixture, self).start_fixture() super(SharedStorageFixture, self).start_fixture()
self.context = context.get_admin_context() self.context = context.get_admin_context()
cn1_uuid = uuidutils.generate_uuid()
cn2_uuid = uuidutils.generate_uuid()
ss_uuid = uuidutils.generate_uuid()
agg_uuid = uuidutils.generate_uuid() agg_uuid = uuidutils.generate_uuid()
os.environ['CN1_UUID'] = cn1_uuid
os.environ['CN2_UUID'] = cn2_uuid cn1 = tb.create_provider(self.context, 'cn1', agg_uuid)
os.environ['SS_UUID'] = ss_uuid cn2 = tb.create_provider(self.context, 'cn2', agg_uuid)
ss = tb.create_provider(self.context, 'ss', agg_uuid)
numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid)
numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid)
numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid)
numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid)
pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid)
pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid)
pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid)
pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid)
os.environ['AGG_UUID'] = agg_uuid os.environ['AGG_UUID'] = agg_uuid
cn1 = rp_obj.ResourceProvider( os.environ['CN1_UUID'] = cn1.uuid
self.context, os.environ['CN2_UUID'] = cn2.uuid
name='cn1', os.environ['SS_UUID'] = ss.uuid
uuid=cn1_uuid)
cn1.create()
cn2 = rp_obj.ResourceProvider( os.environ['NUMA1_1_UUID'] = numa1_1.uuid
self.context, os.environ['NUMA1_2_UUID'] = numa1_2.uuid
name='cn2', os.environ['NUMA2_1_UUID'] = numa2_1.uuid
uuid=cn2_uuid) os.environ['NUMA2_2_UUID'] = numa2_2.uuid
cn2.create()
ss = rp_obj.ResourceProvider( os.environ['PF1_1_UUID'] = pf1_1.uuid
self.context, os.environ['PF1_2_UUID'] = pf1_2.uuid
name='ss', os.environ['PF2_1_UUID'] = pf2_1.uuid
uuid=ss_uuid) os.environ['PF2_2_UUID'] = pf2_2.uuid
ss.create()
# Populate compute node inventory for VCPU and RAM # Populate compute node inventory for VCPU and RAM
for cn in (cn1, cn2): for cn in (cn1, cn2):
vcpu_inv = rp_obj.Inventory( tb.add_inventory(cn, fields.ResourceClass.VCPU, 24,
self.context, allocation_ratio=16.0)
resource_provider=cn, tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024,
resource_class='VCPU', allocation_ratio=1.5)
total=24, tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2')
reserved=0,
max_unit=24,
min_unit=1,
step_size=1,
allocation_ratio=16.0)
vcpu_inv.obj_set_defaults()
ram_inv = rp_obj.Inventory(
self.context,
resource_provider=cn,
resource_class='MEMORY_MB',
total=128 * 1024,
reserved=0,
max_unit=128 * 1024,
min_unit=256,
step_size=256,
allocation_ratio=1.5)
ram_inv.obj_set_defaults()
inv_list = rp_obj.InventoryList(objects=[vcpu_inv, ram_inv])
cn.set_inventory(inv_list)
t_avx_sse = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE") # Populate shared storage provider with DISK_GB inventory and
t_avx_sse2 = rp_obj.Trait.get_by_name(self.context, "HW_CPU_X86_SSE2") # mark it shared among any provider associated via aggregate
cn1.set_traits(rp_obj.TraitList(objects=[t_avx_sse, t_avx_sse2])) tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000,
reserved=100, allocation_ratio=1.0)
tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE')
# Populate shared storage provider with DISK_GB inventory # Populate PF inventory for VF
disk_inv = rp_obj.Inventory( for pf in (pf1_1, pf1_2, pf2_1, pf2_2):
self.context, tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF,
resource_provider=ss, 8, allocation_ratio=1.0)
resource_class='DISK_GB',
total=2000,
reserved=100,
max_unit=2000,
min_unit=10,
step_size=10,
allocation_ratio=1.0)
disk_inv.obj_set_defaults()
inv_list = rp_obj.InventoryList(objects=[disk_inv])
ss.set_inventory(inv_list)
# Mark the shared storage pool as having inventory shared among any
# provider associated via aggregate
t = rp_obj.Trait.get_by_name(
self.context,
"MISC_SHARES_VIA_AGGREGATE",
)
ss.set_traits(rp_obj.TraitList(objects=[t]))
# Now associate the shared storage pool and both compute nodes with the
# same aggregate
cn1.set_aggregates([agg_uuid])
cn2.set_aggregates([agg_uuid])
ss.set_aggregates([agg_uuid])
class NonSharedStorageFixture(APIFixture): class NonSharedStorageFixture(APIFixture):

View File

@ -325,3 +325,92 @@ tests:
MEMORY_MB: MEMORY_MB:
capacity: 196608 # 1.5 * 128G capacity: 196608 # 1.5 * 128G
used: 0 used: 0
# Before microversion 1.29, no root/parent uuid is included
- name: get allocation candidates no root or parent uuid
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 2
$.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 2
$.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 2
- name: get allocation candidates with root and parent uuid
GET: /allocation_candidates?resources=VCPU:1
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 2
$.provider_summaries.`len`: 10
$.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 4
$.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 4
$.provider_summaries.["$ENVIRON['CN1_UUID']"].parent_provider_uuid: null
$.provider_summaries.["$ENVIRON['CN1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_1_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
$.provider_summaries.["$ENVIRON['PF1_2_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_2_UUID']"
$.provider_summaries.["$ENVIRON['PF1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']"
# Before microversion 1.29, it isn't aware of nested providers.
# Namely, it can return non-root providers for allocation candidates,
- name: get allocation candidates only nested provider old microversion
GET: /allocation_candidates?resources=SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 4
- name: get allocation candidates only nested provider new microversion
GET: /allocation_candidates?resources=SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 10
# ...but it can't return combinations of providers in a tree.
- name: get allocation candidates root and nested old microversion
GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 0
$.provider_summaries.`len`: 0
- name: get allocation candidates root and nested new microversion
GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.29
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 10
$.allocation_requests..allocations["$ENVIRON['CN1_UUID']"].resources.VCPU: [1, 1]
$.allocation_requests..allocations["$ENVIRON['PF1_1_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['PF1_2_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['CN2_UUID']"].resources.VCPU: [1, 1]
$.allocation_requests..allocations["$ENVIRON['PF2_1_UUID']"].resources.SRIOV_NET_VF: 4
$.allocation_requests..allocations["$ENVIRON['PF2_2_UUID']"].resources.SRIOV_NET_VF: 4
# Make sure that old microversions can return combinations where
# sharing providers are involved
- name: get allocation candidates shared and nested old microversion
GET: /allocation_candidates?resources=DISK_GB:10,SRIOV_NET_VF:4
status: 200
request_headers:
openstack-api-version: placement 1.28
response_json_paths:
$.allocation_requests.`len`: 4
$.provider_summaries.`len`: 5

View File

@ -41,13 +41,13 @@ tests:
response_json_paths: response_json_paths:
$.errors[0].title: Not Acceptable $.errors[0].title: Not Acceptable
- name: latest microversion is 1.28 - name: latest microversion is 1.29
GET: / GET: /
request_headers: request_headers:
openstack-api-version: placement latest openstack-api-version: placement latest
response_headers: response_headers:
vary: /openstack-api-version/ vary: /openstack-api-version/
openstack-api-version: placement 1.28 openstack-api-version: placement 1.29
- name: other accept header bad version - name: other accept header bad version
GET: / GET: /

View File

@ -0,0 +1,11 @@
---
features:
- |
From microversion 1.29, we support allocation candidates with nested
resource providers. Namely, the following features are added.
1) ``GET /allocation_candidates`` is aware of nested providers. Namely,
when provider trees are present, ``allocation_requests`` in the response
of ``GET /allocation_candidates`` can include allocations on combinations
of multiple resource providers in the same tree.
2) ``root_provider_uuid`` and ``parent_provider_uuid`` are added to
``provider_summaries`` in the response of ``GET /allocation_candidates``.