Add gabbits using a DeepNUMANetworkFixture

Modify the NUMANetworkFixture so we make a subclass
that pre-creates cn1 and cn2 and puts them under distinct parents and a
shared grandparent. This helps to model a deeply nested but sparse
topology where providers that contribute to the solution are low down
in the tree.

This provides a deeper topology to test the same_subtree and
_merge_candidates changes that come in the following patches.

The added gabbit intentionally uses yaml anchors to indicate that the
expected response on each test is the same.

Understanding the structure of the available resource providers and
the UUIDs in use can be super painful, but osc_placement_tree [1] can
help. In my own tetsing I added the following to the end of
make_entities to trigger it to dump:

        from osc_placement_tree import utils as placement_visual
        from placement import direct
        import time

        with direct.PlacementDirect(
              self.conf_fixture.conf, latest_microversion=True) as client:
            placement_visual.dump_placement_db_to_dot(
                    placement_visual.PlacementDirectAsClientWrapper(client),
                    '/tmp/dump.%s.dot' % time.time(),
                    hidden_fields=['inventories', 'generation', 'aggregates', 'resource_provider_generation'])

[1] https://pypi.org/project/osc-placement-tree/

Change-Id: I2aedac0ce3a4f5a40de796bb9f74824541a95a65
This commit is contained in:
Chris Dent 2019-08-13 15:09:10 +01:00
parent 9d256fa6c7
commit 978408b900
2 changed files with 125 additions and 14 deletions

View File

@ -436,9 +436,18 @@ class NUMANetworkFixture(APIFixture):
|VF:4 | |VF:4 | |VF:2 | |VF:2 | |VF:2 | |VF:2 | |VF:8 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
"""
# Having these here allows us to pre-create cn1 and cn2 providers in
# DeepNUMANetworkFixture, where they have additional parents.
cn1 = None
cn2 = None
def start_fixture(self):
super(NUMANetworkFixture, self).start_fixture()
self.make_entities()
def make_entities(self):
aggA_uuid = uuidutils.generate_uuid()
os.environ['AGGA_UUID'] = aggA_uuid
@ -448,14 +457,16 @@ class NUMANetworkFixture(APIFixture):
os.environ['SS1_UUID'] = ss1.uuid
# CN1
cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
tb.set_traits(cn1, ot.COMPUTE_VOLUME_MULTI_ATTACH)
os.environ['CN1_UUID'] = cn1.uuid
if not self.cn1:
self.cn1 = tb.create_provider(self.context, 'cn1', aggA_uuid)
self.cn1.set_aggregates([aggA_uuid])
tb.set_traits(self.cn1, ot.COMPUTE_VOLUME_MULTI_ATTACH)
os.environ['CN1_UUID'] = self.cn1.uuid
numas = []
for i in (0, 1):
numa = tb.create_provider(
self.context, 'numa%d' % i, parent=cn1.uuid)
self.context, 'numa%d' % i, parent=self.cn1.uuid)
traits = [ot.HW_NUMA_ROOT]
if i == 1:
traits.append('CUSTOM_FOO')
@ -488,7 +499,7 @@ class NUMANetworkFixture(APIFixture):
os.environ['FPGA1_%d_UUID' % i] = fpga.uuid
agent = tb.create_provider(
self.context, 'sriov_agent', parent=cn1.uuid)
self.context, 'sriov_agent', parent=self.cn1.uuid)
tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_DIRECT')
os.environ['SRIOV_AGENT_UUID'] = agent.uuid
@ -500,7 +511,7 @@ class NUMANetworkFixture(APIFixture):
os.environ['ESN%d_UUID' % i] = dev.uuid
agent = tb.create_provider(
self.context, 'ovs_agent', parent=cn1.uuid)
self.context, 'ovs_agent', parent=self.cn1.uuid)
tb.set_traits(agent, 'CUSTOM_VNIC_TYPE_NORMAL')
os.environ['OVS_AGENT_UUID'] = agent.uuid
@ -510,21 +521,24 @@ class NUMANetworkFixture(APIFixture):
os.environ['BR_INT_UUID'] = dev.uuid
# CN2
cn2 = tb.create_provider(self.context, 'cn2', aggA_uuid)
tb.add_inventory(cn2, orc.VCPU, 8)
if not self.cn2:
self.cn2 = tb.create_provider(self.context, 'cn2')
self.cn2.set_aggregates([aggA_uuid])
tb.add_inventory(self.cn2, orc.VCPU, 8)
# Get a new consumer
consumer = tb.ensure_consumer(self.context, user, proj)
tb.set_allocation(self.context, cn2, consumer, {orc.VCPU: 3})
tb.set_allocation(self.context, self.cn2, consumer, {orc.VCPU: 3})
tb.add_inventory(
cn2, orc.MEMORY_MB, 2048, min_unit=1024, step_size=128)
tb.add_inventory(cn2, orc.DISK_GB, 1000)
tb.set_traits(cn2, 'CUSTOM_FOO')
os.environ['CN2_UUID'] = cn2.uuid
self.cn2, orc.MEMORY_MB, 2048, min_unit=1024, step_size=128)
tb.add_inventory(self.cn2, orc.DISK_GB, 1000)
tb.set_traits(self.cn2, 'CUSTOM_FOO')
os.environ['CN2_UUID'] = self.cn2.uuid
nics = []
for i in (1, 2, 3):
nic = tb.create_provider(
self.context, 'nic%d' % i, parent=cn2.uuid)
self.context, 'nic%d' % i, parent=self.cn2.uuid)
# TODO(efried): Use standard HW_NIC_ROOT trait
tb.set_traits(nic, 'CUSTOM_HW_NIC_ROOT')
nics.append(nic)
@ -557,6 +571,25 @@ class NUMANetworkFixture(APIFixture):
os.environ['PF%s_UUID' % suf] = pf.uuid
class DeepNUMANetworkFixture(NUMANetworkFixture):
"""Extend the NUMANetworkFixture with two empty resource providers as
parents and grandparents of the compute nodes. This is to exercise
same_subtree in a more complete fashion.
"""
def make_entities(self):
"""Create parents and grandparents for cn1 and cn2. They will be fully
populated by the superclass, NUMANetworkFixture.
"""
grandparent1 = tb.create_provider(self.context, 'gp1')
parent1 = tb.create_provider(
self.context, 'p1', parent=grandparent1.uuid)
parent2 = tb.create_provider(
self.context, 'p2', parent=grandparent1.uuid)
self.cn1 = tb.create_provider(self.context, 'cn1', parent=parent1.uuid)
self.cn2 = tb.create_provider(self.context, 'cn2', parent=parent2.uuid)
super(DeepNUMANetworkFixture, self).make_entities()
class NonSharedStorageFixture(APIFixture):
"""An APIFixture that has three compute nodes with local storage that do
not use shared storage.

View File

@ -0,0 +1,78 @@
# Test same_subtree with a deep hierarchy where the top levels of the tree
# provide no resources. We create this by adding additional empty top
# providers to the NUMANetworkFixture used elsewhere for testing same_subtree.
fixtures:
- DeepNUMANetworkFixture
defaults:
request_headers:
x-auth-token: admin
content-type: application/json
accept: application/json
# version of request in which `same_subtree` is supported
openstack-api-version: placement 1.36
tests:
- name: deep subtree 2VFs, one compute
GET: /allocation_candidates
query_parameters:
resources_COMPUTE: VCPU:1
required_COMPUTE: CUSTOM_FOO
required_NIC: CUSTOM_HW_NIC_ROOT
resources_PORT1: CUSTOM_VF:2
required_PORT1: CUSTOM_PHYSNET1
# Make sure that there is a chain of subtrees, compute->nic->port, so
# that we only get results where _PORT1 is anchored under _NIC, which
# is anchored under _COMPUTE.
# _COMPUTE, _NIC, _PORT1 in one same_subtree would allow some _PORT1
# results to be independent of _NIC (while still sharing the _COMPUTE
# ancestor), leading to 12 allocation requests instead of 4.
same_subtree:
- _NIC,_COMPUTE
- _NIC,_PORT1
group_policy: none
# Create an anchor of this response verification, used below to signify that
# each of three tests expects the same responses.
response_json_paths: &json_response
$.provider_summaries.`len`: 26
$.allocation_requests.`len`: 4
$.allocation_requests..mappings._COMPUTE:
# 4 cn2_uuid each as a list, no other computes
-
- $ENVIRON['CN2_UUID']
-
- $ENVIRON['CN2_UUID']
-
- $ENVIRON['CN2_UUID']
-
- $ENVIRON['CN2_UUID']
$.allocation_requests..allocations['$ENVIRON["CN2_UUID"]'].resources.VCPU: [1, 1, 1, 1]
$.allocation_requests..allocations['$ENVIRON["PF1_1_UUID"]'].resources.CUSTOM_VF: 2
$.allocation_requests..allocations['$ENVIRON["PF3_1_UUID"]'].resources.CUSTOM_VF: 2
$.allocation_requests..allocations['$ENVIRON["PF2_1_UUID"]'].resources.CUSTOM_VF: 2
$.allocation_requests..allocations['$ENVIRON["PF2_3_UUID"]'].resources.CUSTOM_VF: 2
- name: deep subtree 2VFs, with foo
GET: /allocation_candidates
query_parameters:
resources_COMPUTE: VCPU:1
required_COMPUTE: CUSTOM_FOO
resources_PORT1: CUSTOM_VF:2
required_PORT1: CUSTOM_PHYSNET1
same_subtree: _COMPUTE,_PORT1
group_policy: none
response_json_paths:
<<: *json_response
- name: deep subtree 2VFs, no foo
GET: /allocation_candidates
query_parameters:
resources_COMPUTE: VCPU:1
resources_PORT1: CUSTOM_VF:2
required_PORT1: CUSTOM_PHYSNET1
same_subtree: _COMPUTE,_PORT1
group_policy: none
response_json_paths:
<<: *json_response