Cleanup ugly stub in TestLocalDeleteAllocations

With a fix in wsgi-intercept 1.7.0 we can properly
use the PlacementFixture as a context manager to
simulate when placement is configured for a given
operation. This allows us to remove the ugly stub
that one of the tests in here had to rely on.

While in here, the CastAsCall fixture is removed
since we shouldn't rely on that in these tests
where we're trying to simulate the user experience.

Change-Id: I2074b45126b839ea6307a8740364393e9dddd50b
This commit is contained in:
Matt Riedemann 2018-04-17 11:53:59 -04:00
parent 9706d89d52
commit ee6a257f58
3 changed files with 53 additions and 77 deletions

View File

@ -169,4 +169,4 @@ warlock==1.3.0
WebOb==1.7.1
websockify==0.8.0
wrapt==1.10.11
wsgi-intercept==1.4.1
wsgi-intercept==1.7.0

View File

@ -10,11 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
from nova.scheduler.client import report as reportclient
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
@ -26,10 +24,6 @@ class TestLocalDeleteAllocations(test.TestCase,
self.useFixture(policy_fixture.RealPolicyFixture())
# The NeutronFixture is needed to show security groups for a server.
self.useFixture(nova_fixtures.NeutronFixture(self))
# We need the computes reporting into placement for the filter
# scheduler to pick a host.
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
@ -44,57 +38,32 @@ class TestLocalDeleteAllocations(test.TestCase,
self.start_service('scheduler')
self.compute = self.start_service('compute')
self.useFixture(cast_as_call.CastAsCall(self))
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def _get_usages(self, rp_uuid):
@staticmethod
def _get_usages(placement_api, rp_uuid):
fmt = '/resource_providers/%(uuid)s/usages'
resp = self.placement_api.get(fmt % {'uuid': rp_uuid})
resp = placement_api.get(fmt % {'uuid': rp_uuid})
return resp.body['usages']
# NOTE(mriedem): It would be preferable to use the PlacementFixture as
# a context manager but that causes some issues when trying to delete the
# server in test_local_delete_removes_allocations_after_compute_restart.
def _stub_compute_api_to_not_configure_placement(self):
"""Prior to the compute API deleting allocations in the "local delete"
case, nova.conf for nova-api might not be configured for talking to
the placement service, so we can mock that behavior by stubbing out
the placement client in the compute API to no-op as if safe_connect
failed and returned None to the caller.
"""
orig_delete_alloc = (
reportclient.SchedulerReportClient.delete_allocation_for_instance)
self.call_count = 0
def fake_delete_allocation_for_instance(*args, **kwargs):
# The first call will be from the API, so ignore that one and
# return None like the @safe_connect decorator would if nova-api
# wasn't configured to talk to placement.
if self.call_count:
orig_delete_alloc(*args, **kwargs)
else:
self.call_count += 1
self.stub_out('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance',
fake_delete_allocation_for_instance)
def test_local_delete_removes_allocations_after_compute_restart(self):
"""Tests that allocations are removed after a local delete.
This tests the scenario where a server is local deleted (because the
compute host is down) and we want to make sure that its allocations
have been cleaned up once the nova-compute service restarts.
In this scenario we conditionally use the PlacementFixture to simulate
the case that nova-api isn't configured to talk to placement.
"""
self._stub_compute_api_to_not_configure_placement()
# Get allocations, make sure they are 0.
resp = self.placement_api.get('/resource_providers')
with nova_fixtures.PlacementFixture() as placement:
compute = self.start_service('compute')
placement_api = placement.api
resp = placement_api.get('/resource_providers')
rp_uuid = resp.body['resource_providers'][0]['uuid']
usages_before = self._get_usages(rp_uuid)
usages_before = self._get_usages(placement_api, rp_uuid)
for usage in usages_before.values():
self.assertEqual(0, usage)
@ -105,31 +74,35 @@ class TestLocalDeleteAllocations(test.TestCase,
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
# Assert usages are non zero now.
usages_during = self._get_usages(rp_uuid)
usages_during = self._get_usages(placement_api, rp_uuid)
for usage in usages_during.values():
self.assertNotEqual(0, usage)
# Force-down compute to trigger local delete.
self.compute.stop()
compute.stop()
compute_service_id = self.admin_api.get_services(
host=self.compute.host, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute_service_id, {'forced_down': True})
host=compute.host, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute_service_id,
{'forced_down': True})
# Delete the server (will be a local delete because compute is down).
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
with nova_fixtures.PlacementFixture() as placement:
placement_api = placement.api
# Assert usages are still non-zero.
usages_during = self._get_usages(rp_uuid)
usages_during = self._get_usages(placement_api, rp_uuid)
for usage in usages_during.values():
self.assertNotEqual(0, usage)
# Start the compute service again. Before it comes up, it will call the
# update_available_resource code in the ResourceTracker which is what
# "heals" the allocations for the deleted instance.
self.compute.start()
# Start the compute service again. Before it comes up, it will
# call the update_available_resource code in the ResourceTracker
# which is what "heals" the allocations for the deleted instance.
compute.start()
# Get the allocations again to check against the original.
usages_after = self._get_usages(rp_uuid)
usages_after = self._get_usages(placement_api, rp_uuid)
# They should match.
self.assertEqual(usages_before, usages_after)
@ -138,10 +111,12 @@ class TestLocalDeleteAllocations(test.TestCase,
"""Tests that the compute API deletes allocations when the compute
service on which the instance was running is down.
"""
placement_api = self.useFixture(nova_fixtures.PlacementFixture()).api
compute = self.start_service('compute')
# Get allocations, make sure they are 0.
resp = self.placement_api.get('/resource_providers')
resp = placement_api.get('/resource_providers')
rp_uuid = resp.body['resource_providers'][0]['uuid']
usages_before = self._get_usages(rp_uuid)
usages_before = self._get_usages(placement_api, rp_uuid)
for usage in usages_before.values():
self.assertEqual(0, usage)
@ -152,21 +127,22 @@ class TestLocalDeleteAllocations(test.TestCase,
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
# Assert usages are non zero now.
usages_during = self._get_usages(rp_uuid)
usages_during = self._get_usages(placement_api, rp_uuid)
for usage in usages_during.values():
self.assertNotEqual(0, usage)
# Force-down compute to trigger local delete.
self.compute.stop()
compute.stop()
compute_service_id = self.admin_api.get_services(
host=self.compute.host, binary='nova-compute')[0]['id']
host=compute.host, binary='nova-compute')[0]['id']
self.admin_api.put_service(compute_service_id, {'forced_down': True})
# Delete the server (will be a local delete because compute is down).
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
# Get the allocations again to make sure they were deleted.
usages_after = self._get_usages(rp_uuid)
usages_after = self._get_usages(placement_api, rp_uuid)
# They should match.
self.assertEqual(usages_before, usages_after)

View File

@ -26,4 +26,4 @@ gabbi>=1.35.0 # Apache-2.0
oslo.vmware>=2.17.0 # Apache-2.0
# placement functional tests
wsgi-intercept>=1.4.1 # MIT License
wsgi-intercept>=1.7.0 # MIT License