Merge "functional: Drop '_api' suffix from placement fixture"

This commit is contained in:
Zuul 2020-08-21 10:06:41 +00:00 committed by Gerrit Code Review
commit 835440e3f9
12 changed files with 131 additions and 134 deletions

View File

@ -419,7 +419,7 @@ class TestUpdateComputeNodeReservedAndAllocationRatio(
rt = compute_service.manager.rt
inv = self.placement_api.get(
inv = self.placement.get(
'/resource_providers/%s/inventories' % rp_uuid).body
ratios = {'VCPU': 16.1, 'MEMORY_MB': 1.6, 'DISK_GB': 1.1}

View File

@ -444,46 +444,46 @@ class PlacementHelperMixin:
"""A helper mixin for interacting with placement."""
def _get_all_resource_classes(self):
resp = self.placement_api.get(
resp = self.placement.get(
'/resource_classes', version='1.2'
).body['resource_classes']
return [d['name'] for d in resp]
def _get_all_providers(self):
return self.placement_api.get(
return self.placement.get(
'/resource_providers', version='1.14'
).body['resource_providers']
def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
rps = self.placement_api.get(
rps = self.placement.get(
'/resource_providers?in_tree=%s' % in_tree_rp_uuid,
version='1.20',
).body['resource_providers']
return [rp['uuid'] for rp in rps]
def _post_resource_provider(self, rp_name):
return self.placement_api.post(
return self.placement.post(
'/resource_providers', version='1.20', body={'name': rp_name}
).body
def _get_resource_provider_by_uuid(self, rp_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s' % rp_uuid, version='1.15',
).body
def _get_provider_uuid_by_name(self, name):
return self.placement_api.get(
return self.placement.get(
'/resource_providers?name=%s' % name,
).body['resource_providers'][0]['uuid']
def _get_provider_usages(self, provider_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/usages' % provider_uuid
).body['usages']
# TODO(stephenfin): Rename to '_get_provider_allocations'
def _get_allocations_by_provider_uuid(self, rp_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/allocations' % rp_uuid
).body['allocations']
@ -493,7 +493,7 @@ class PlacementHelperMixin:
:param rp_uuid: UUID of the resource provider to update
:returns: Dict object with the results.
"""
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/traits' % rp_uuid, version='1.6'
).body['traits']
@ -506,10 +506,10 @@ class PlacementHelperMixin:
:param traits: List of trait strings to set on the provider.
:returns: APIResponse object with the results.
"""
provider = self.placement_api.get(
provider = self.placement.get(
'/resource_providers/%s' % rp_uuid
).body
return self.placement_api.put(
return self.placement.put(
'/resource_providers/%s/traits' % rp_uuid,
{
'resource_provider_generation': provider['generation'],
@ -519,7 +519,7 @@ class PlacementHelperMixin:
)
def _get_provider_inventory(self, rp_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/inventories' % rp_uuid
).body['inventories']
@ -531,7 +531,7 @@ class PlacementHelperMixin:
:param inv_body: inventory to set on the provider
:returns: APIResponse object with the results
"""
return self.placement_api.post(
return self.placement.post(
'/resource_providers/%s/inventories' % rp_uuid,
version='1.15', body=inv_body
).body
@ -544,21 +544,21 @@ class PlacementHelperMixin:
:param inv_body: inventory to set on the provider
:returns: APIResponse object with the results
"""
return self.placement_api.put(
return self.placement.put(
'/resource_providers/%s/inventories' % rp_uuid, body=inv_body,
).body
def _get_provider_aggregates(self, rp_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/aggregates' % rp_uuid, version='1.1'
).body['aggregates']
# TODO(stephenfin): Rename '_set_provider_aggregates'
def _set_aggregate(self, rp_uuid, agg_id):
provider = self.placement_api.get(
provider = self.placement.get(
'/resource_providers/%s' % rp_uuid
).body
return self.placement_api.put(
return self.placement.put(
'/resource_providers/%s/aggregates' % rp_uuid,
body={
'aggregates': [agg_id],
@ -568,13 +568,13 @@ class PlacementHelperMixin:
).body
def _get_all_traits(self):
return self.placement_api.get('/traits', version='1.6').body['traits']
return self.placement.get('/traits', version='1.6').body['traits']
def _create_trait(self, trait):
return self.placement_api.put('/traits/%s' % trait, {}, version='1.6')
return self.placement.put('/traits/%s' % trait, {}, version='1.6')
def _delete_trait(self, trait):
return self.placement_api.delete('/traits/%s' % trait, version='1.6')
return self.placement.delete('/traits/%s' % trait, version='1.6')
def assertRequestMatchesUsage(self, requested_resources, root_rp_uuid):
# It matches the usages of the whole tree against the request
@ -673,7 +673,7 @@ class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
# TODO(stephenfin): Rename to '_get_server_allocations'
def _get_allocations_by_server_uuid(self, server_uuid):
return self.placement_api.get(
return self.placement.get(
'/allocations/%s' % server_uuid
).body['allocations']
@ -995,9 +995,7 @@ class _IntegratedTestBase(test.TestCase, PlacementInstanceHelperMixin):
self.useFixture(cast_as_call.CastAsCall(self))
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
@ -1067,13 +1065,12 @@ class ProviderUsageBaseTestCase(test.TestCase, PlacementInstanceHelperMixin):
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
self.useFixture(nova_fixtures.AllServicesCurrent())
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))

View File

@ -165,7 +165,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
# host_info do not change with every iteration of the for loop.
self.computes[host] = start_compute(host, host_info)
if save_rp_uuids:
self.compute_rp_uuids[host] = self.placement_api.get(
self.compute_rp_uuids[host] = self.placement.get(
'/resource_providers?name=%s' % host).body[
'resource_providers'][0]['uuid']

View File

@ -68,7 +68,7 @@ class NUMAServersTest(NUMAServersTestBase):
# 'start_service' to make sure there isn't a mismatch
self.compute = self.start_service('compute', host='compute1')
compute_rp_uuid = self.placement_api.get(
compute_rp_uuid = self.placement.get(
'/resource_providers?name=compute1').body[
'resource_providers'][0]['uuid']
@ -94,7 +94,7 @@ class NUMAServersTest(NUMAServersTestBase):
self.assertFalse(self.mock_filter.called)
if expected_usage:
compute_usage = self.placement_api.get(
compute_usage = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(expected_usage, compute_usage)
@ -524,7 +524,7 @@ class NUMAServersTest(NUMAServersTestBase):
expected_usage = {'VCPU': 0, 'PCPU': 0, 'DISK_GB': 0,
'MEMORY_MB': 0}
compute_usage = self.placement_api.get(
compute_usage = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(expected_usage, compute_usage)
@ -565,7 +565,7 @@ class NUMAServersTest(NUMAServersTestBase):
expected_usage = {'VCPU': 0, 'PCPU': 2, 'DISK_GB': 20,
'MEMORY_MB': 2048}
compute_usage = self.placement_api.get(
compute_usage = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(expected_usage, compute_usage)
@ -594,7 +594,7 @@ class NUMAServersTest(NUMAServersTestBase):
expected_usage = {'VCPU': 0, 'PCPU': 2, 'DISK_GB': 20,
'MEMORY_MB': 2048}
compute_usage = self.placement_api.get(
compute_usage = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(expected_usage, compute_usage)
@ -729,7 +729,7 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
# ensure there is no PCPU inventory being reported
for host, compute_rp_uuid in self.compute_rp_uuids.items():
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['VCPU']['total'])
@ -755,20 +755,20 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute0']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(4, compute_usages['VCPU'])
compute_rp_uuid = self.compute_rp_uuids['test_compute1']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(0, compute_usages['VCPU'])
@ -792,18 +792,18 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute0']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['VCPU']['total'])
self.assertNotIn('PCPU', compute_inventory)
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(4, compute_usages['VCPU'])
self.assertNotIn('PCPU', compute_usages)
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server1['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
@ -815,18 +815,18 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute1']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['VCPU']['total'])
self.assertNotIn('PCPU', compute_inventory)
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(2, compute_usages['VCPU'])
self.assertNotIn('PCPU', compute_usages)
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server2['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
@ -854,18 +854,18 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute0']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['PCPU']['total'])
self.assertNotIn('VCPU', compute_inventory)
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(4, compute_usages['PCPU'])
self.assertNotIn('VCPU', compute_usages)
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server1['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
@ -877,18 +877,18 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute1']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['PCPU']['total'])
self.assertNotIn('VCPU', compute_inventory)
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(2, compute_usages['PCPU'])
self.assertNotIn('VCPU', compute_usages)
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server2['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
@ -903,12 +903,12 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
compute_rp_uuid = self.compute_rp_uuids['test_compute0']
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(8, compute_inventory['PCPU']['total'])
self.assertNotIn('VCPU', compute_inventory)
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(6, compute_usages['PCPU'])
@ -916,7 +916,7 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
# check the allocations for this server specifically
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server3['id']).body[
'allocations']
self.assertEqual(

View File

@ -89,10 +89,10 @@ class VGPUReshapeTests(base.ServersTestBase):
self.compute = self.start_service('compute', host='compute1')
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement_api.get(
compute_rp_uuid = self.placement.get(
'/resource_providers?name=compute1').body[
'resource_providers'][0]['uuid']
inventories = self.placement_api.get(
inventories = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body
inventories['inventories']['VGPU'] = {
'allocation_ratio': 1.0,
@ -101,7 +101,7 @@ class VGPUReshapeTests(base.ServersTestBase):
'reserved': 0,
'step_size': 1,
'total': 3}
self.placement_api.put(
self.placement.put(
'/resource_providers/%s/inventories' % compute_rp_uuid,
inventories)
@ -150,17 +150,17 @@ class VGPUReshapeTests(base.ServersTestBase):
# verify that the inventory, usages and allocation are correct before
# the reshape
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(3, compute_inventory['VGPU']['total'])
compute_usages = self.placement_api.get(
compute_usages = self.placement.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(2, compute_usages['VGPU'])
for server in (server1, server2):
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
@ -172,7 +172,7 @@ class VGPUReshapeTests(base.ServersTestBase):
# verify that the inventory, usages and allocation are correct after
# the reshape
compute_inventory = self.placement_api.get(
compute_inventory = self.placement.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertNotIn('VGPU', compute_inventory)
@ -185,16 +185,16 @@ class VGPUReshapeTests(base.ServersTestBase):
for pci_device in [fakelibvirt.PGPU1_PCI_ADDR,
fakelibvirt.PGPU2_PCI_ADDR,
fakelibvirt.PGPU3_PCI_ADDR]:
gpu_rp_uuid = self.placement_api.get(
gpu_rp_uuid = self.placement.get(
'/resource_providers?name=compute1_%s' % pci_device).body[
'resource_providers'][0]['uuid']
pgpu_uuid_to_name[gpu_rp_uuid] = pci_device
gpu_inventory = self.placement_api.get(
gpu_inventory = self.placement.get(
'/resource_providers/%s/inventories' % gpu_rp_uuid).body[
'inventories']
self.assertEqual(1, gpu_inventory['VGPU']['total'])
gpu_usages = self.placement_api.get(
gpu_usages = self.placement.get(
'/resource_providers/%s/usages' % gpu_rp_uuid).body[
'usages']
usages[pci_device] = gpu_usages['VGPU']
@ -204,7 +204,7 @@ class VGPUReshapeTests(base.ServersTestBase):
self.assertEqual(2, len(used_devices))
# Make sure that both instances are using the correct pGPUs
for server in [server1, server2]:
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body[
'allocations']
self.assertEqual(
@ -229,15 +229,15 @@ class VGPUReshapeTests(base.ServersTestBase):
# find the pGPU that wasn't used before we created the third instance
# It should have taken the previously available pGPU
device = avail_devices[0]
gpu_rp_uuid = self.placement_api.get(
gpu_rp_uuid = self.placement.get(
'/resource_providers?name=compute1_%s' % device).body[
'resource_providers'][0]['uuid']
gpu_usages = self.placement_api.get(
gpu_usages = self.placement.get(
'/resource_providers/%s/usages' % gpu_rp_uuid).body[
'usages']
self.assertEqual(1, gpu_usages['VGPU'])
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server3['id']).body[
'allocations']
self.assertEqual(

View File

@ -346,7 +346,7 @@ class VGPUMultipleTypesTests(VGPUTestBase):
# ... but we double-check by asking the RP by its expected name
expected_pgpu2_rp_name = (self.compute1.host + '_' +
fakelibvirt.PGPU2_PCI_ADDR)
pgpu2_rp = self.placement_api.get(
pgpu2_rp = self.placement.get(
'/resource_providers?name=' + expected_pgpu2_rp_name).body[
'resource_providers']
# See, Placement API returned no RP for this name as it doesn't exist.

View File

@ -66,7 +66,7 @@ class BuildRescheduleClaimFailsTestCase(
'project_id': self.api.project_id,
'user_id': self.api.project_id
}
resp = self.placement_api.put(
resp = self.placement.put(
'/allocations/%s' % alt_consumer, allocs, version='1.12')
self.assertEqual(204, resp.status, resp.content)
raise exception.ComputeResourcesUnavailable(reason='overhead!')

View File

@ -27,7 +27,7 @@ class UnshelveNeutronErrorTest(
# Start standard fixtures.
placement = func_fixtures.PlacementFixture()
self.useFixture(placement)
self.placement_api = placement.api
self.placement = placement.api
self.neutron = nova_fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
fake_image.stub_out_image_service(self)
@ -56,7 +56,7 @@ class UnshelveNeutronErrorTest(
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -84,6 +84,6 @@ class UnshelveNeutronErrorTest(
'OS-EXT-SRV-ATTR:host': None})
# As the instance went back to offloaded state we expect no allocation
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))

View File

@ -26,7 +26,7 @@ class TestServices(integrated_helpers._IntegratedTestBase):
host='compute', binary='nova-compute')[0]['id']
def _get_traits_on_compute(self):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/traits' % self.compute_rp_uuid,
version='1.6'
).body['traits']

View File

@ -640,10 +640,10 @@ class TestNovaManagePlacementHealAllocations(
}
]
}
self.placement_api.put('/allocations/%s' % server['id'], alloc_body)
self.placement.put('/allocations/%s' % server['id'], alloc_body)
# Make sure we did that correctly. Use version 1.12 so we can assert
# the project_id and user_id are based on the sentinel values.
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id'], version='1.12').body
self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['project_id'])
self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['user_id'])
@ -666,7 +666,7 @@ class TestNovaManagePlacementHealAllocations(
'Successfully updated allocations for', output)
self.assertIn('Processed 1 instances.', output)
# Now assert that the consumer was actually updated.
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id'], version='1.12').body
self.assertEqual(server['tenant_id'], allocations['project_id'])
self.assertEqual(server['user_id'], allocations['user_id'])
@ -830,7 +830,7 @@ class TestNovaManagePlacementHealAllocations(
}
]
}
self.placement_api.put('/allocations/%s' % server['id'], alloc_body)
self.placement.put('/allocations/%s' % server['id'], alloc_body)
# Check allocation to see if memory has changed
allocs = self._get_allocations_by_server_uuid(
@ -951,7 +951,7 @@ class TestNovaManagePlacementHealPortAllocations(
return server, updated_ports
def _assert_placement_updated(self, server, ports):
rsp = self.placement_api.get(
rsp = self.placement.get(
'/allocations/%s' % server['id'],
version=1.28).body
@ -991,7 +991,7 @@ class TestNovaManagePlacementHealPortAllocations(
self._assert_port_updated(port['id'])
def _assert_placement_not_updated(self, server):
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(1, len(allocations))
self.assertIn(self.compute1_rp_uuid, allocations)
@ -1111,10 +1111,10 @@ class TestNovaManagePlacementHealPortAllocations(
# NOTE(gibi): putting empty allocation will delete the consumer in
# placement
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id'], version=1.28).body
allocations['allocations'] = {}
self.placement_api.put(
self.placement.put(
'/allocations/%s' % server['id'], allocations, version=1.28)
# let's trigger a heal
@ -1154,7 +1154,7 @@ class TestNovaManagePlacementHealPortAllocations(
}
]
}
self.placement_api.put('/allocations/%s' % server['id'], alloc_body)
self.placement.put('/allocations/%s' % server['id'], alloc_body)
# let's trigger a heal
result = self.cli.heal_allocations(verbose=True, max_count=2)
@ -1182,10 +1182,10 @@ class TestNovaManagePlacementHealPortAllocations(
# NOTE(gibi): putting empty allocation will delete the consumer in
# placement
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id'], version=1.28).body
allocations['allocations'] = {}
self.placement_api.put(
self.placement.put(
'/allocations/%s' % server['id'], allocations, version=1.28)
# let's trigger a heal

View File

@ -1410,19 +1410,19 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
return resp['hypervisors'][0]['id']
def _get_provider_usages(provider_uuid):
return self.placement_api.get(
return self.placement.get(
'/resource_providers/%s/usages' % provider_uuid).body['usages']
def _get_allocations_by_server_uuid(server_uuid):
return self.placement_api.get(
return self.placement.get(
'/allocations/%s' % server_uuid).body['allocations']
def _set_provider_inventory(rp_uuid, resource_class, inventory):
# Get the resource provider generation for the inventory update.
rp = self.placement_api.get(
rp = self.placement.get(
'/resource_providers/%s' % rp_uuid).body
inventory['resource_provider_generation'] = rp['generation']
return self.placement_api.put(
return self.placement.put(
'/resource_providers/%s/inventories/%s' %
(rp_uuid, resource_class), inventory).body
@ -5639,7 +5639,7 @@ class PortResourceRequestBasedSchedulingTestBase(
return self.api.post_server({'server': server_req})
def _set_provider_inventories(self, rp_uuid, inventories):
rp = self.placement_api.get(
rp = self.placement.get(
'/resource_providers/%s' % rp_uuid).body
inventories['resource_provider_generation'] = rp['generation']
return self._update_inventory(rp_uuid, inventories)
@ -5653,7 +5653,7 @@ class PortResourceRequestBasedSchedulingTestBase(
"uuid": ovs_agent_rp_uuid,
"parent_provider_uuid": compute_rp_uuid
}
self.placement_api.post('/resource_providers',
self.placement.post('/resource_providers',
body=agent_rp_req,
version='1.20')
ovs_bridge_rp_uuid = getattr(uuids, ovs_agent_rp_uuid + 'ovs br')
@ -5662,7 +5662,7 @@ class PortResourceRequestBasedSchedulingTestBase(
"uuid": ovs_bridge_rp_uuid,
"parent_provider_uuid": ovs_agent_rp_uuid
}
self.placement_api.post('/resource_providers',
self.placement.post('/resource_providers',
body=ovs_bridge_req,
version='1.20')
self.ovs_bridge_rp_per_host[compute_rp_uuid] = ovs_bridge_rp_uuid
@ -5696,7 +5696,7 @@ class PortResourceRequestBasedSchedulingTestBase(
"uuid": device_rp_uuid,
"parent_provider_uuid": parent_rp_uuid
}
self.placement_api.post('/resource_providers',
self.placement.post('/resource_providers',
body=sriov_pf_req,
version='1.20')
@ -5730,7 +5730,7 @@ class PortResourceRequestBasedSchedulingTestBase(
"uuid": sriov_agent_rp_uuid,
"parent_provider_uuid": compute_rp_uuid
}
self.placement_api.post('/resource_providers',
self.placement.post('/resource_providers',
body=agent_rp_req,
version='1.20')
dev_rp_name_prefix = ("%s:NIC Switch agent:" % hostname)
@ -5802,7 +5802,7 @@ class PortResourceRequestBasedSchedulingTestBase(
updated_qos_sriov_port = self.neutron.show_port(
qos_sriov_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# if there is new_flavor then we either have an in progress resize or
@ -5839,7 +5839,7 @@ class PortResourceRequestBasedSchedulingTestBase(
self.assertEqual({}, updated_non_qos_port['binding:profile'])
if migration_uuid:
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -6173,7 +6173,7 @@ class PortResourceRequestBasedSchedulingTest(
non_qos_port['id'])['port']
updated_qos_port = self.neutron.show_port(qos_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -6219,7 +6219,7 @@ class PortResourceRequestBasedSchedulingTest(
ovs_port = self.neutron.show_port(ovs_port['id'])['port']
sriov_port = self.neutron.show_port(sriov_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -6259,7 +6259,7 @@ class PortResourceRequestBasedSchedulingTest(
networks=[{'port': port['id']}])
self._wait_for_state_change(server, 'ACTIVE')
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
# compute rp and one set for the networking resources on the ovs bridge
@ -6290,7 +6290,7 @@ class PortResourceRequestBasedSchedulingTest(
updated_port = self.neutron.show_port(
self.neutron.port_with_resource_request['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect that the port related resource allocations are removed
@ -6319,7 +6319,7 @@ class PortResourceRequestBasedSchedulingTest(
networks=[{'port': port['id']}])
server = self._wait_for_state_change(server, 'ACTIVE')
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
# compute rp and one set for the networking resources on the ovs bridge
@ -6368,7 +6368,7 @@ class PortResourceRequestBasedSchedulingTest(
'rp_uuid': port_rp_uuid,
'server_uuid': server['id']})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# Nova leaks the port allocation so the server still has the same
@ -6421,7 +6421,7 @@ class PortResourceRequestBasedSchedulingTest(
sriov_port_with_res_req = self.neutron.show_port(
sriov_port_with_res_req['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -6518,7 +6518,7 @@ class PortResourceRequestBasedSchedulingTest(
port = self.neutron.show_port(port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -6619,7 +6619,7 @@ class ServerMoveWithPortResourceRequestTest(
# but the migration allocation is gone
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6682,7 +6682,7 @@ class ServerMoveWithPortResourceRequestTest(
server, compute3_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# but the migration allocation is gone
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6723,7 +6723,7 @@ class ServerMoveWithPortResourceRequestTest(
server, self.compute2_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy,
new_flavor=new_flavor)
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6775,7 +6775,7 @@ class ServerMoveWithPortResourceRequestTest(
{'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0,
'NET_BW_IGR_KILOBIT_PER_SEC': 0, 'NET_BW_EGR_KILOBIT_PER_SEC': 0},
self.compute2_rp_uuid)
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6850,7 +6850,7 @@ class ServerMoveWithPortResourceRequestTest(
self._check_allocation(
server, compute3_rp_uuid, non_qos_port, qos_port, qos_sriov_port,
self.flavor_with_group_policy, new_flavor=new_flavor)
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6894,7 +6894,7 @@ class ServerMoveWithPortResourceRequestTest(
# as the migration is failed we expect that the migration allocation
# is deleted
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -6915,7 +6915,7 @@ class ServerMoveWithPortResourceRequestTest(
# unexpected. This will cause
# update_pci_request_spec_with_allocated_interface_name() to raise
# when the instance is migrated to the host2.
rsp = self.placement_api.put(
rsp = self.placement.put(
'/resource_providers/%s'
% self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2],
{"name": "invalid-device-rp-name"})
@ -6961,7 +6961,7 @@ class ServerMoveWithPortResourceRequestTest(
# as the migration is failed we expect that the migration allocation
# is deleted
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -7004,7 +7004,7 @@ class ServerMoveWithPortResourceRequestTest(
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
# The migration allocation is deleted
migration_allocations = self.placement_api.get(
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
@ -7013,7 +7013,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_normal_port['id'])['port']
updated_qos_port = self.neutron.show_port(
qos_normal_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
# compute rp and one set for the networking resources on the ovs
@ -7045,7 +7045,7 @@ class ServerMoveWithPortResourceRequestTest(
updated_qos_sriov_port = self.neutron.show_port(
qos_sriov_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect two sets of allocations. One set for the source compute
@ -7116,7 +7116,7 @@ class ServerMoveWithPortResourceRequestTest(
updated_qos_sriov_port = self.neutron.show_port(
qos_sriov_port['id'])['port']
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(3, len(allocations), allocations)
@ -7252,7 +7252,7 @@ class ServerMoveWithPortResourceRequestTest(
# unexpected. This will cause
# update_pci_request_spec_with_allocated_interface_name() to raise
# when the instance is evacuated to the host2.
rsp = self.placement_api.put(
rsp = self.placement.put(
'/resource_providers/%s'
% self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2],
{"name": "invalid-device-rp-name"})
@ -7432,7 +7432,7 @@ class ServerMoveWithPortResourceRequestTest(
# unexpected. This will cause
# update_pci_request_spec_with_allocated_interface_name() to raise
# when the instance is live migrated to the host2.
rsp = self.placement_api.put(
rsp = self.placement.put(
'/resource_providers/%s'
% self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2],
{"name": "invalid-device-rp-name"})
@ -7490,7 +7490,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED'})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7513,7 +7513,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED'})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7540,7 +7540,7 @@ class ServerMoveWithPortResourceRequestTest(
# unexpected. This will cause
# update_pci_request_spec_with_allocated_interface_name() to raise
# when the instance is unshelved to the host2.
rsp = self.placement_api.put(
rsp = self.placement.put(
'/resource_providers/%s'
% self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2],
{"name": "invalid-device-rp-name"})
@ -7560,7 +7560,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED'})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7585,7 +7585,7 @@ class ServerMoveWithPortResourceRequestTest(
{'OS-EXT-STS:task_state': None,
'status': 'SHELVED_OFFLOADED'})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7608,7 +7608,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED'})
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7629,7 +7629,7 @@ class ServerMoveWithPortResourceRequestTest(
'OS-EXT-STS:task_state': None})
# As the instance went back to offloaded state we expect no allocation
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
self.assertEqual(0, len(allocations))
@ -7733,7 +7733,7 @@ class PortResourceRequestReSchedulingTest(
if dest_compute_rp_uuid == self.compute2_rp_uuid
else self.compute2_rp_uuid)
allocations = self.placement_api.get(
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
@ -7870,7 +7870,7 @@ class AcceleratorServerBase(integrated_helpers.ProviderUsageBaseTestCase):
def _post_nested_resource_provider(self, rp_name, parent_rp_uuid):
body = {'name': rp_name, 'parent_provider_uuid': parent_rp_uuid}
return self.placement_api.post(
return self.placement.post(
url='/resource_providers', version='1.20', body=body).body
def _create_acc_flavor(self):

View File

@ -56,7 +56,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
resp = self.placement.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Make sure the resource provider has inventory.
@ -114,7 +114,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
# And finally, the resource provider should also be gone. The API
# will perform a cascading delete of the resource provider inventory
# and allocation information.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
resp = self.placement.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(404, resp.status)
def test_evacuate_then_delete_compute_service(self):
@ -136,7 +136,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
resp = self.placement.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Down the compute service for host1 so we can evacuate from it.
self.admin_api.put_service(service['id'], {'forced_down': True})
@ -159,7 +159,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
# FIXME(mriedem): This is bug 1829479 where the compute service is
# deleted but the resource provider is not because there are still
# allocations against the provider from the evacuated server.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
resp = self.placement.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(rp_uuid, flavor)
# Try to restart the host1 compute service to create a new resource
@ -205,7 +205,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
resp = self.placement.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor)
# Now try to confirm the migration.
@ -214,7 +214,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
# server is on host2.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# The host1 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
resp = self.placement.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(404, resp.status)
def test_resize_revert_after_deleted_source_compute(self):
@ -250,7 +250,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
resp = self.placement.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
# Now revert the resize.
@ -264,7 +264,7 @@ class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
binary='nova-compute', host='host2')[0]
self.admin_api.api_delete('/os-services/%s' % service2['id'])
# The host2 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host2_rp_uuid)
resp = self.placement.get('/resource_providers/%s' % host2_rp_uuid)
self.assertEqual(404, resp.status)