Remove duplicate cleanup in functional tests

It is not necessary to call the following statement multiple times in
some functional tests.

self.addCleanup(fake.restore_nodes)

So remove duplicate calls in the tests.

TrivalFix
Change-Id: Iaae6fc4a66145576f4a4fc1cea452ef6acbadb15
This commit is contained in:
Takashi NATSUME 2019-02-15 00:27:42 +09:00
parent 221026eafe
commit bda4ae3884
14 changed files with 8 additions and 20 deletions

View File

@ -77,7 +77,6 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
self.scheduler_service = self.start_service('scheduler')

View File

@ -110,10 +110,8 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
fake.set_nodes(['host3'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host3')
self.useFixture(cast_as_call.CastAsCall(self))

View File

@ -71,7 +71,6 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
fake_network.set_stub_network_methods(self)

View File

@ -86,9 +86,9 @@ class TestRequestSpecRetryReschedule(test.TestCase,
self.start_service('scheduler')
# Let's now start three compute nodes as we said above.
self.addCleanup(fake.restore_nodes)
for host in ['host1', 'host2', 'host3']:
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def _stub_resize_failure(self, failed_host):

View File

@ -69,7 +69,6 @@ class TestRescheduleWithServerGroup(test.TestCase,
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -65,7 +65,6 @@ class TestParallelEvacuationWithServerGroup(
self.addCleanup(fake.restore_nodes)
self.compute1 = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']
@ -126,7 +125,6 @@ class TestParallelEvacuationWithServerGroup(
# start a third compute to have place for one of the instances
fake.set_nodes(['host3'])
self.addCleanup(fake.restore_nodes)
self.compute3 = self.start_service('compute', host='host3')
# evacuate both instances

View File

@ -63,9 +63,9 @@ class TestResizeWithNoAllocationScheduler(
self.start_service('conductor')
# Create two compute nodes/services.
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
scheduler_service = self.start_service('scheduler')

View File

@ -67,9 +67,9 @@ class TestBootFromVolumeIsolatedHostsFilter(
# Create two compute nodes/services so we can restrict the image
# we'll use to one of the hosts.
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_boot_from_volume_with_isolated_image(self):

View File

@ -68,7 +68,6 @@ class TestEvacuationWithSourceReturningDuringRebuild(
self.computes['host1'] = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.computes['host2'] = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -81,7 +81,6 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
def test_anti_affinity_multi_create(self):

View File

@ -60,7 +60,6 @@ class TestRescheduleWithVolumesAttached(
self.host1 = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.host2 = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -60,9 +60,9 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
self.start_service('conductor')
self.start_service('scheduler')
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_cold_migrate_target_host_then_live_migrate(self):

View File

@ -145,13 +145,13 @@ class ServerGroupTestV21(ServerGroupTestBase):
self.stub_out('nova.virt.driver.load_compute_driver',
_fake_load_compute_driver)
fake.set_nodes(['compute'])
self.addCleanup(fake.restore_nodes)
self.compute = self.start_service('compute', host='compute')
# NOTE(gibi): start a second compute host to be able to test affinity
# NOTE(sbauza): Make sure the FakeDriver returns a different nodename
# for the second compute node.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
def test_get_no_groups(self):
@ -911,7 +911,6 @@ class ServerGroupTestMultiCell(ServerGroupTestBase):
self.compute1 = self.start_service('compute', host='host1',
cell='cell1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2',
cell='cell2')
# This is needed to find a server that is still booting with multiple
@ -995,9 +994,9 @@ class TestAntiAffinityLiveMigration(test.TestCase,
# Start conductor, scheduler and two computes.
self.start_service('conductor')
self.start_service('scheduler')
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_serial_no_valid_host_then_pass_with_third_host(self):
@ -1060,7 +1059,6 @@ class TestAntiAffinityLiveMigration(test.TestCase,
# Now start up a 3rd compute service and retry the live migration which
# should work this time.
fake.set_nodes(['host3'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host3')
self.admin_api.post_server_action(server['id'], body)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')

View File

@ -4854,9 +4854,9 @@ class ServerTestV256Common(ServersTestBase):
def _setup_compute_service(self):
# Set up 3 compute services in the same cell
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2', 'host3'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def _create_server(self, target_host=None):
@ -4888,9 +4888,9 @@ class ServerTestV256MultiCellTestCase(ServerTestV256Common):
host_to_cell_mappings = {
'host1': 'cell1',
'host2': 'cell2'}
self.addCleanup(fake.restore_nodes)
for host in sorted(host_to_cell_mappings):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host,
cell=host_to_cell_mappings[host])