Remove global state from the FakeDriver

The virt driver FakeDriver used in both the functional and in the unit
test used a global state to configure the host and node names the driver
reports. This was hard to use when more then one compute service is started.
Also global state is dangerous.

It turned out that only a set of unit tests are using multiple nodes per
compute the rest of the tests can simply use host=<hostname>,
nodes=[<hostname>] setup.

So this removes the global state.

Conflicts:
    nova/tests/functional/compute/test_live_migration.py
    nova/tests/functional/integrated_helpers.py
    nova/tests/functional/regressions/test_bug_1671648.py
    nova/tests/functional/regressions/test_bug_1702454.py
    nova/tests/functional/regressions/test_bug_1718455.py
    nova/tests/functional/regressions/test_bug_1718512.py
    nova/tests/functional/regressions/test_bug_1719730.py
    nova/tests/functional/regressions/test_bug_1735407.py
    nova/tests/functional/regressions/test_bug_1741307.py
    nova/tests/functional/regressions/test_bug_1746483.py
    nova/tests/functional/regressions/test_bug_1764883.py
    nova/tests/functional/regressions/test_bug_1781710.py
    nova/tests/functional/regressions/test_bug_1784353.py
    nova/tests/functional/regressions/test_bug_1797580.py
    nova/tests/functional/test_aggregates.py
    nova/tests/functional/test_server_group.py
    nova/tests/functional/test_servers.py
    nova/tests/unit/compute/test_compute_mgr.py

Note(elod.illes): cause of conflicts of the files:
* test_live_migration.py:
    Patch Icb0bdaf454935b3713c35339394d260b33520de5 had to be
    refactored during backport to Rocky as uuidsentinel moved to oslo
    in Stein.
* integrated_helpers.py:
    Patches I4c12502c86c7ac27369d119e0f97768cf41695b5 and
    I7f5f08691ca3f73073c66c29dddb996fb2c2b266 were only added in Stein.
* test_bug_1671648.py, test_bug_1702454.py, test_bug_1718455.py,
  test_bug_1718512.py, test_bug_1719730.py, test_bug_1735407.py,
  test_bug_1741307.py, test_bug_1746483.py, test_bug_1764883.py,
  test_bug_1781710.py, test_bug_1784353.py, test_bug_1797580.py,
  test_servers.py:
    Duplicate cleanup removal patch
    Iaae6fc4a66145576f4a4fc1cea452ef6acbadb15 is not backported to
    Rocky.
* test_aggregates.py:
    Patches Ic55b88e7ad21ab5b7ad063eac743ff9406aae559 and
    Ic55b88e7ad21ab5b7ad063eac743ff9406aae559 were not backported to
    Rocky.
* test_server_group.py:
    Iaae6fc4a66145576f4a4fc1cea452ef6acbadb15 (Duplicate cleanup
    removal) and Ie07b419732e0832a9b9d16565f6c9d00ba85d654 (Add
    functional test for live migrate with anti-affinity group) patches
    were only added in Stein and were not backported to Rocky.
* test_compute_mgr.py:
    Patch I17543ecb572934ecc7d0bbc7a4ad2f537fa499bc (Raise
    InstanceFaultRollback for UnableToMigrateToSelf from _prep_resize)
    isn't backported to Rocky.

Related-Bug: #1859766

Change-Id: I2cf2fcbaebc706f897ce5dfbff47d32117064f9c
(cherry picked from commit b5666fb492)
(cherry picked from commit 23d3b8fbc9)
This commit is contained in:
Balazs Gibizer 2019-05-01 23:06:52 +02:00 committed by Elod Illes
parent 88409b52aa
commit 930bf0ae1b
31 changed files with 34 additions and 176 deletions

View File

@ -1,7 +1,7 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"hypervisor_hostname": "host2",
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"state": "up",
"status": "enabled"

View File

@ -419,6 +419,12 @@ class TestCase(testtools.TestCase):
def start_service(self, name, host=None, **kwargs):
cell = None
# if the host is None then the CONF.host remains defaulted to
# 'fake-mini' (originally done in ConfFixture)
if host is not None:
# Make sure that CONF.host is relevant to the right hostname
self.useFixture(nova_fixtures.ConfPatcher(host=host))
if name == 'compute' and self.USES_DB:
# NOTE(danms): We need to create the HostMapping first, because
# otherwise we'll fail to update the scheduler while running
@ -431,9 +437,6 @@ class TestCase(testtools.TestCase):
cell_mapping=cell)
hm.create()
self.host_mappings[hm.host] = hm
if host is not None:
# Make sure that CONF.host is relevant to the right hostname
self.useFixture(nova_fixtures.ConfPatcher(host=host))
svc = self.useFixture(
nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs))

View File

@ -1,7 +1,7 @@
{
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
"hypervisor_hostname": "host2",
"id": "%(hypervisor_id)s",
"state": "up",
"status": "enabled"

View File

@ -18,7 +18,6 @@ import mock
from nova.cells import utils as cells_utils
from nova import objects
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.virt import fake
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
@ -157,8 +156,6 @@ class HypervisorsSampleJson233Tests(api_sample_base.ApiSampleTestBaseV21):
# Start a new compute service to fake a record with hypervisor id=2
# for pagination test.
host = 'host1'
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_hypervisors_list(self):
@ -205,8 +202,6 @@ class HypervisorsSampleJson253Tests(HypervisorsSampleJson228Tests):
def test_hypervisors_detail(self):
# Start another compute service to get a 2nd compute for paging tests.
host = 'host2'
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
service_2 = self.start_service('compute', host=host).service_ref
compute_node_2 = service_2.compute_node
marker = self.compute_node_1.uuid

View File

@ -20,7 +20,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests import uuidsentinel as uuids
from nova.virt import fake
class FakeCinderError(object):
@ -53,8 +52,6 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
# Start a second compte node (the first one was started for us by
# _IntegratedTestBase. set_nodes() is needed to avoid duplicate
# nodenames. See comments in test_bug_1702454.py.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
# To get the old Cinder flow we need to hack the service version, otherwise

View File

@ -36,7 +36,6 @@ from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.tests import uuidsentinel as uuids
from nova.virt import fake
CONF = nova.conf.CONF
@ -397,8 +396,6 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
compute service (defaults to cell1)
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host, cell=cell_name)
self.computes[host] = compute
return compute

View File

@ -22,7 +22,6 @@ from nova.tests.functional.api import client
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
from nova.virt import fake
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
@ -48,8 +47,6 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self._wait_for_notification('instance.create.end')
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
# server will boot on the 'compute' host
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
actions = [

View File

@ -13,7 +13,6 @@
from nova import context
from nova import objects
from nova.tests.functional import integrated_helpers
from nova.virt import fake
class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
@ -47,8 +46,6 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
self._wait_for_state_change(self.api, server, 'ACTIVE')
# Start up another compute service so we can resize.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
host2 = self.start_service('compute', host='host2')
# Now resize the server to move it to host2.

View File

@ -22,7 +22,6 @@ from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestRetryBetweenComputeNodeBuilds(test.TestCase):
@ -67,17 +66,7 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
# We start two compute services because we're going to fake one
# of them to fail the build so we can trigger the retry code.
# set_nodes() is needed to have each compute service return a
# different nodename, so we get two hosts in the list of candidates
# for scheduling. Otherwise both hosts will have the same default
# nodename "fake-mini". The host passed to start_service controls the
# "host" attribute and set_nodes() sets the "nodename" attribute.
# We set_nodes() to make host and nodename the same for each compute.
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
self.useFixture(cast_as_call.CastAsCall(self))

View File

@ -17,7 +17,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class HostNameWeigher(weights.BaseHostWeigher):
@ -99,20 +98,8 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
self.start_service('scheduler')
# Let's now start three compute nodes as we said above.
# set_nodes() is needed to have each compute service return a
# different nodename, so we get two hosts in the list of candidates
# for scheduling. Otherwise both hosts will have the same default
# nodename "fake-mini". The host passed to start_service controls the
# "host" attribute and set_nodes() sets the "nodename" attribute.
# We set_nodes() to make host and nodename the same for each compute.
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
fake.set_nodes(['host3'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host3')
self.useFixture(cast_as_call.CastAsCall(self))

View File

@ -18,7 +18,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
@ -60,17 +59,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
self.start_service('conductor')
self.start_service('scheduler')
# set_nodes() is needed to have each compute service return a
# different nodename, so we get two hosts in the list of candidates
# for scheduling. Otherwise both hosts will have the same default
# nodename "fake-mini". The host passed to start_service controls the
# "host" attribute and set_nodes() sets the "nodename" attribute.
# We set_nodes() to make host and nodename the same for each compute.
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
fake_network.set_stub_network_methods(self)

View File

@ -19,7 +19,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class HostNameWeigher(weights.BaseHostWeigher):
@ -86,8 +85,6 @@ class TestRequestSpecRetryReschedule(test.TestCase,
# Let's now start three compute nodes as we said above.
for host in ['host1', 'host2', 'host3']:
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def _stub_resize_failure(self, failed_host):

View File

@ -17,7 +17,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestRescheduleWithServerGroup(test.TestCase,
@ -64,11 +63,7 @@ class TestRescheduleWithServerGroup(test.TestCase,
# We start two compute services because we're going to fake one raising
# RescheduledException to trigger a retry to the other compute host.
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -17,7 +17,6 @@ from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestParallelEvacuationWithServerGroup(
@ -57,11 +56,7 @@ class TestParallelEvacuationWithServerGroup(
# We start two compute services because we need two instances with
# anti-affinity server group policy to be booted
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.compute1 = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']
@ -123,8 +118,6 @@ class TestParallelEvacuationWithServerGroup(
self.api.force_down_service('host2', 'nova-compute', True)
# start a third compute to have place for one of the instances
fake.set_nodes(['host3'])
self.addCleanup(fake.restore_nodes)
self.compute3 = self.start_service('compute', host='host3')
# evacuate both instances

View File

@ -15,7 +15,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestResizeWithCachingScheduler(test.TestCase,
@ -65,8 +64,6 @@ class TestResizeWithCachingScheduler(test.TestCase,
# Create two compute nodes/services.
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
flavors = self.api.get_flavors()

View File

@ -17,7 +17,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fakes
from nova.tests.unit import policy_fixture
from nova import utils
from nova.virt import fake
CONF = config.CONF
@ -67,8 +66,6 @@ class TestBootFromVolumeIsolatedHostsFilter(
# Create two compute nodes/services so we can restrict the image
# we'll use to one of the hosts.
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_boot_from_volume_with_isolated_image(self):

View File

@ -18,7 +18,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
from nova import utils
from nova.virt import fake as fake_virt
class InstanceListWithDeletedServicesTestCase(
@ -83,8 +82,6 @@ class InstanceListWithDeletedServicesTestCase(
5. migrate the instance back to the host1 service
6. list instances which will try to online migrate the old service uuid
"""
fake_virt.set_nodes(['host1'])
self.addCleanup(fake_virt.restore_nodes)
host1 = self.start_service('compute', host='host1')
# Create an instance which will be on host1 since it's the only host.
@ -96,8 +93,6 @@ class InstanceListWithDeletedServicesTestCase(
# Now we start a 2nd compute which is "upgraded" (has a uuid) and
# we'll migrate the instance to that host.
fake_virt.set_nodes(['host2'])
self.addCleanup(fake_virt.restore_nodes)
host2 = self.start_service('compute', host='host2')
self.assertIsNotNone(host2.service_ref.uuid)

View File

@ -18,7 +18,6 @@ from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestEvacuationWithSourceReturningDuringRebuild(
@ -62,12 +61,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Start two computes
self.computes = {}
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.computes['host1'] = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.computes['host2'] = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -17,7 +17,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class HostNameWeigher(weights.BaseHostWeigher):
@ -76,11 +75,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
group='workarounds')
self.start_service('scheduler')
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host='host2')
def test_anti_affinity_multi_create(self):

View File

@ -16,7 +16,6 @@ from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestRescheduleWithVolumesAttached(
@ -54,12 +53,8 @@ class TestRescheduleWithVolumesAttached(
self.start_service('scheduler')
# Start two computes to allow the instance to be rescheduled
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.host1 = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.host2 = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']

View File

@ -15,7 +15,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class ColdMigrateTargetHostThenLiveMigrateTest(
@ -60,8 +59,6 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
self.start_service('scheduler')
for host in ('host1', 'host2'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_cold_migrate_target_host_then_live_migrate(self):

View File

@ -22,7 +22,6 @@ from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class NonPersistentFieldNotResetTest(
@ -68,9 +67,7 @@ class NonPersistentFieldNotResetTest(
self.compute = {}
self.addCleanup(fake.restore_nodes)
for host in ('host1', 'host2', 'host3'):
fake.set_nodes([host])
compute_service = self.start_service('compute', host=host)
self.compute.update({host: compute_service})

View File

@ -19,7 +19,6 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.virt import fake as fake_virt
class HostNameWeigher(weights.BaseHostWeigher):
@ -77,10 +76,8 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
self.start_service('scheduler')
# Start two computes, one where the server will be created and another
# where we'll cold migrate it.
self.addCleanup(fake_virt.restore_nodes)
self.computes = {} # keep track of the compute services per host name
for host in ('host1', 'host2'):
fake_virt.set_nodes([host])
compute_service = self.start_service('compute', host=host)
self.computes[host] = compute_service

View File

@ -19,7 +19,6 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova import utils
from nova.virt import fake as fake_virt
LOG = logging.getLogger(__name__)
@ -57,11 +56,15 @@ class PeriodicNodeRecreateTestCase(test.TestCase,
def test_update_available_resource_node_recreate(self):
# First we create a compute service to manage a couple of fake nodes.
# When start_service runs, it will create the node1 and node2
# ComputeNodes.
fake_virt.set_nodes(['node1', 'node2'])
self.addCleanup(fake_virt.restore_nodes)
compute = self.start_service('compute', 'node1')
# When start_service runs, it will create the node1 ComputeNode.
compute.manager.driver._set_nodes(['node1', 'node2'])
# Run the update_available_resource periodic to register node2.
ctxt = context.get_admin_context()
compute.manager.update_available_resource(ctxt)
# Make sure no compute nodes were orphaned or deleted.
self.assertNotIn('Deleting orphan compute node',
self.stdlog.logger.output)
# Now we should have two compute nodes, make sure the hypervisors API
# shows them.
hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']

View File

@ -23,7 +23,6 @@ from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
CONF = nova.conf.CONF
@ -113,8 +112,6 @@ class AggregateRequestFiltersTest(test.TestCase,
compute service.
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host)
self.computes[host] = compute
return compute

View File

@ -17,7 +17,6 @@ from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestAvailabilityZoneScheduling(
@ -52,8 +51,6 @@ class TestAvailabilityZoneScheduling(
def _start_host_in_zone(self, host, zone):
# Start the nova-compute service.
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
# Create a host aggregate with a zone in which to put this host.
aggregate_body = {

View File

@ -134,14 +134,9 @@ class ServerGroupTestV21(ServerGroupTestBase):
# tree.
self.stub_out('nova.virt.driver.load_compute_driver',
_fake_load_compute_driver)
fake.set_nodes(['compute'])
self.compute = self.start_service('compute', host='compute')
# NOTE(gibi): start a second compute host to be able to test affinity
# NOTE(sbauza): Make sure the FakeDriver returns a different nodename
# for the second compute node.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
def test_get_no_groups(self):
@ -361,7 +356,6 @@ class ServerGroupTestV21(ServerGroupTestBase):
def test_migrate_with_anti_affinity(self):
# Start additional host to test migration with anti-affinity
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
created_group = self.api.post_server_groups(self.anti_affinity)
@ -420,7 +414,6 @@ class ServerGroupTestV21(ServerGroupTestBase):
self._set_forced_down(host, True)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
post = {'evacuate': {'onSharedStorage': False}}
@ -605,7 +598,6 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
compute3 = self.start_service('compute', host='host3')
post = {'evacuate': {}}
@ -875,12 +867,8 @@ class ServerGroupTestMultiCell(ServerGroupTestBase):
def setUp(self):
super(ServerGroupTestMultiCell, self).setUp()
# Start two compute services, one per cell
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.compute1 = self.start_service('compute', host='host1',
cell='cell1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2',
cell='cell2')
# This is needed to find a server that is still booting with multiple

View File

@ -183,9 +183,6 @@ class ServersTest(ServersTestBase):
def _test_create_server_with_error_with_retries(self):
# Create a server which will enter error state.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.flags(host='host2')
self.compute2 = self.start_service('compute', host='host2')
self.computes['compute2'] = self.compute2
@ -1229,9 +1226,6 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
default so that should filter out the host based on the image meta.
"""
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.flags(host='host2')
self.compute2 = self.start_service('compute', host='host2')
# We hard-code from a fake image since we can't get images
@ -4271,8 +4265,6 @@ class ServerTestV256Common(ServersTestBase):
def _setup_compute_service(self):
# Set up 3 compute services in the same cell
for host in ('host1', 'host2', 'host3'):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def _create_server(self, target_host=None):
@ -4305,8 +4297,6 @@ class ServerTestV256MultiCellTestCase(ServerTestV256Common):
'host1': 'cell1',
'host2': 'cell2'}
for host in sorted(host_to_cell_mappings):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host,
cell=host_to_cell_mappings[host])

View File

@ -154,12 +154,15 @@ class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME, NODENAME2])
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.compute = compute_manager.ComputeManager()
# NOTE(gibi): this is a hack to make the fake virt driver use the nodes
# needed for these tests.
self.compute.driver._set_nodes([NODENAME, NODENAME2])
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
@ -276,7 +279,6 @@ class BaseTestCase(test.TestCase):
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _fake_instance(self, updates):

View File

@ -823,8 +823,8 @@ class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
fake.set_nodes(['myhostname'])
super(FakeConnectionTestCase, self).setUp()
self.connection.init_host('myhostname')
def _check_available_resource_fields(self, host_status):
super(FakeConnectionTestCase, self)._check_available_resource_fields(

View File

@ -25,7 +25,6 @@ semantics of real hypervisor connections.
import collections
import contextlib
import copy
import time
import uuid
@ -51,31 +50,6 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state, uuid):
@ -164,15 +138,21 @@ class FakeDriver(driver.ComputeDriver):
self._mounts = {}
self._interfaces = {}
self.active_migrations = {}
self._nodes = self._init_nodes()
def _init_nodes(self):
if not _FAKE_NODES:
set_nodes([CONF.host])
return copy.copy(_FAKE_NODES)
self._host = None
self._nodes = None
def init_host(self, host):
return
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
self._nodes = (['fake-mini'] if self._host == 'compute'
else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
# by our tests to customize the discovered nodes by the fake
# driver.
self._nodes = nodes
def list_instances(self):
return [self.instances[uuid].name for uuid in self.instances.keys()]