Remove global state from the FakeDriver
The virt driver FakeDriver used in both the functional and in the unit test used a global state to configure the host and node names the driver reports. This was hard to use when more then one compute service is started. Also global state is dangerous. It turned out that only a set of unit tests are using multiple nodes per compute the rest of the tests can simply use host=<hostname>, nodes=[<hostname>] setup. So this removes the global state. Change-Id: I2cf2fcbaebc706f897ce5dfbff47d32117064f9c
This commit is contained in:
parent
fae51fcb1b
commit
b5666fb492
@ -1,7 +1,7 @@
|
||||
{
|
||||
"hypervisors": [
|
||||
{
|
||||
"hypervisor_hostname": "fake-mini",
|
||||
"hypervisor_hostname": "host2",
|
||||
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
|
||||
"state": "up",
|
||||
"status": "enabled"
|
||||
|
@ -419,6 +419,12 @@ class TestCase(testtools.TestCase):
|
||||
|
||||
def start_service(self, name, host=None, **kwargs):
|
||||
cell = None
|
||||
# if the host is None then the CONF.host remains defaulted to
|
||||
# 'fake-mini' (originally done in ConfFixture)
|
||||
if host is not None:
|
||||
# Make sure that CONF.host is relevant to the right hostname
|
||||
self.useFixture(nova_fixtures.ConfPatcher(host=host))
|
||||
|
||||
if name == 'compute' and self.USES_DB:
|
||||
# NOTE(danms): We need to create the HostMapping first, because
|
||||
# otherwise we'll fail to update the scheduler while running
|
||||
@ -431,9 +437,6 @@ class TestCase(testtools.TestCase):
|
||||
cell_mapping=cell)
|
||||
hm.create()
|
||||
self.host_mappings[hm.host] = hm
|
||||
if host is not None:
|
||||
# Make sure that CONF.host is relevant to the right hostname
|
||||
self.useFixture(nova_fixtures.ConfPatcher(host=host))
|
||||
svc = self.useFixture(
|
||||
nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs))
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"hypervisors": [
|
||||
{
|
||||
"hypervisor_hostname": "fake-mini",
|
||||
"hypervisor_hostname": "host2",
|
||||
"id": "%(hypervisor_id)s",
|
||||
"state": "up",
|
||||
"status": "enabled"
|
||||
|
@ -16,7 +16,6 @@
|
||||
import mock
|
||||
|
||||
from nova.tests.functional.api_sample_tests import api_sample_base
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
|
||||
@ -113,8 +112,6 @@ class HypervisorsSampleJson233Tests(api_sample_base.ApiSampleTestBaseV21):
|
||||
# Start a new compute service to fake a record with hypervisor id=2
|
||||
# for pagination test.
|
||||
host = 'host1'
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def test_hypervisors_list(self):
|
||||
@ -161,8 +158,6 @@ class HypervisorsSampleJson253Tests(HypervisorsSampleJson228Tests):
|
||||
def test_hypervisors_detail(self):
|
||||
# Start another compute service to get a 2nd compute for paging tests.
|
||||
host = 'host2'
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
service_2 = self.start_service('compute', host=host).service_ref
|
||||
compute_node_2 = service_2.compute_node
|
||||
marker = self.compute_node_1.uuid
|
||||
|
@ -20,7 +20,6 @@ from nova import exception
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class FakeCinderError(object):
|
||||
@ -54,8 +53,6 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
|
||||
# Start a second compte node (the first one was started for us by
|
||||
# _IntegratedTestBase. set_nodes() is needed to avoid duplicate
|
||||
# nodenames. See comments in test_bug_1702454.py.
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
# To get the old Cinder flow we need to hack the service version, otherwise
|
||||
|
@ -42,7 +42,6 @@ from nova.tests.unit import fake_notifier
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova import utils
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
@ -450,8 +449,6 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
|
||||
compute service (defaults to cell1)
|
||||
:return: the nova compute service object
|
||||
"""
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
compute = self.start_service('compute', host=host, cell=cell_name)
|
||||
self.computes[host] = compute
|
||||
return compute
|
||||
|
@ -22,7 +22,6 @@ from nova.tests.functional.api import client
|
||||
from nova.tests.functional.notification_sample_tests \
|
||||
import notification_sample_base
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.virt import fake
|
||||
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
@ -51,8 +50,6 @@ class TestInstanceNotificationSampleWithMultipleCompute(
|
||||
self._wait_for_notification('instance.create.end')
|
||||
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
|
||||
# server will boot on the 'compute' host
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
actions = [
|
||||
|
@ -13,7 +13,6 @@
|
||||
from nova import context
|
||||
from nova import objects
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
|
||||
@ -47,8 +46,6 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase,
|
||||
self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
|
||||
# Start up another compute service so we can resize.
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
host2 = self.start_service('compute', host='host2')
|
||||
|
||||
# Now resize the server to move it to host2.
|
||||
|
@ -23,7 +23,6 @@ from nova.tests.unit import cast_as_call
|
||||
from nova.tests.unit import fake_network
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestRetryBetweenComputeNodeBuilds(test.TestCase):
|
||||
@ -66,16 +65,7 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
|
||||
|
||||
# We start two compute services because we're going to fake one
|
||||
# of them to fail the build so we can trigger the retry code.
|
||||
# set_nodes() is needed to have each compute service return a
|
||||
# different nodename, so we get two hosts in the list of candidates
|
||||
# for scheduling. Otherwise both hosts will have the same default
|
||||
# nodename "fake-mini". The host passed to start_service controls the
|
||||
# "host" attribute and set_nodes() sets the "nodename" attribute.
|
||||
# We set_nodes() to make host and nodename the same for each compute.
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.start_service('compute', host='host2')
|
||||
|
||||
self.scheduler_service = self.start_service('scheduler')
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import cast_as_call
|
||||
from nova.tests.unit.image import fake as image_fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class HostNameWeigher(weights.BaseHostWeigher):
|
||||
@ -96,18 +95,8 @@ class SchedulerOnlyChecksTargetTest(test.TestCase,
|
||||
self.start_service('scheduler')
|
||||
|
||||
# Let's now start three compute nodes as we said above.
|
||||
# set_nodes() is needed to have each compute service return a
|
||||
# different nodename, so we get two hosts in the list of candidates
|
||||
# for scheduling. Otherwise both hosts will have the same default
|
||||
# nodename "fake-mini". The host passed to start_service controls the
|
||||
# "host" attribute and set_nodes() sets the "nodename" attribute.
|
||||
# We set_nodes() to make host and nodename the same for each compute.
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.start_service('compute', host='host2')
|
||||
fake.set_nodes(['host3'])
|
||||
self.start_service('compute', host='host3')
|
||||
self.useFixture(cast_as_call.CastAsCall(self))
|
||||
|
||||
|
@ -19,7 +19,6 @@ from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_network
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
|
||||
@ -61,16 +60,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances(
|
||||
self.start_service('conductor')
|
||||
self.start_service('scheduler')
|
||||
|
||||
# set_nodes() is needed to have each compute service return a
|
||||
# different nodename, so we get two hosts in the list of candidates
|
||||
# for scheduling. Otherwise both hosts will have the same default
|
||||
# nodename "fake-mini". The host passed to start_service controls the
|
||||
# "host" attribute and set_nodes() sets the "nodename" attribute.
|
||||
# We set_nodes() to make host and nodename the same for each compute.
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.start_service('compute', host='host2')
|
||||
|
||||
fake_network.set_stub_network_methods(self)
|
||||
|
@ -20,7 +20,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as image_fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class HostNameWeigher(weights.BaseHostWeigher):
|
||||
@ -82,9 +81,7 @@ class TestRequestSpecRetryReschedule(test.TestCase,
|
||||
self.start_service('scheduler')
|
||||
|
||||
# Let's now start three compute nodes as we said above.
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ['host1', 'host2', 'host3']:
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def _stub_resize_failure(self, failed_host):
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_network
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestRescheduleWithServerGroup(test.TestCase,
|
||||
@ -65,10 +64,7 @@ class TestRescheduleWithServerGroup(test.TestCase,
|
||||
|
||||
# We start two compute services because we're going to fake one raising
|
||||
# RescheduledException to trigger a retry to the other compute host.
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.start_service('compute', host='host2')
|
||||
|
||||
self.image_id = self.api.get_images()[0]['id']
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.unit import fake_network
|
||||
from nova.tests.unit import fake_notifier
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestParallelEvacuationWithServerGroup(
|
||||
@ -61,10 +60,7 @@ class TestParallelEvacuationWithServerGroup(
|
||||
|
||||
# We start two compute services because we need two instances with
|
||||
# anti-affinity server group policy to be booted
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute1 = self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
self.image_id = self.api.get_images()[0]['id']
|
||||
@ -124,7 +120,6 @@ class TestParallelEvacuationWithServerGroup(
|
||||
self.api.force_down_service('host2', 'nova-compute', True)
|
||||
|
||||
# start a third compute to have place for one of the instances
|
||||
fake.set_nodes(['host3'])
|
||||
self.compute3 = self.start_service('compute', host='host3')
|
||||
|
||||
# evacuate both instances
|
||||
|
@ -16,7 +16,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestResizeWithNoAllocationScheduler(
|
||||
@ -63,9 +62,7 @@ class TestResizeWithNoAllocationScheduler(
|
||||
self.start_service('conductor')
|
||||
|
||||
# Create two compute nodes/services.
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2'):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
scheduler_service = self.start_service('scheduler')
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as image_fakes
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova import utils
|
||||
from nova.virt import fake
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
@ -67,9 +66,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
|
||||
|
||||
# Create two compute nodes/services so we can restrict the image
|
||||
# we'll use to one of the hosts.
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2'):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def test_boot_from_volume_with_isolated_image(self):
|
||||
|
@ -19,7 +19,6 @@ from nova.tests.unit import fake_network
|
||||
from nova.tests.unit import fake_notifier
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestEvacuationWithSourceReturningDuringRebuild(
|
||||
@ -63,11 +62,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
|
||||
# Start two computes
|
||||
self.computes = {}
|
||||
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.computes['host1'] = self.start_service('compute', host='host1')
|
||||
|
||||
fake.set_nodes(['host2'])
|
||||
self.computes['host2'] = self.start_service('compute', host='host2')
|
||||
|
||||
self.image_id = self.api.get_images()[0]['id']
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as image_fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class HostNameWeigher(weights.BaseHostWeigher):
|
||||
@ -77,10 +76,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase,
|
||||
group='workarounds')
|
||||
self.start_service('scheduler')
|
||||
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host='host1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.start_service('compute', host='host2')
|
||||
|
||||
def test_anti_affinity_multi_create(self):
|
||||
|
@ -17,7 +17,6 @@ from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_network
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestRescheduleWithVolumesAttached(
|
||||
@ -55,11 +54,8 @@ class TestRescheduleWithVolumesAttached(
|
||||
self.start_service('scheduler')
|
||||
|
||||
# Start two computes to allow the instance to be rescheduled
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.host1 = self.start_service('compute', host='host1')
|
||||
|
||||
fake.set_nodes(['host2'])
|
||||
self.host2 = self.start_service('compute', host='host2')
|
||||
|
||||
self.image_id = self.api.get_images()[0]['id']
|
||||
|
@ -16,7 +16,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as image_fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class ColdMigrateTargetHostThenLiveMigrateTest(
|
||||
@ -60,9 +59,7 @@ class ColdMigrateTargetHostThenLiveMigrateTest(
|
||||
self.start_service('conductor')
|
||||
self.start_service('scheduler')
|
||||
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2'):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def test_cold_migrate_target_host_then_live_migrate(self):
|
||||
|
@ -23,7 +23,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as image_fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class NonPersistentFieldNotResetTest(
|
||||
@ -69,9 +68,7 @@ class NonPersistentFieldNotResetTest(
|
||||
|
||||
self.compute = {}
|
||||
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2', 'host3'):
|
||||
fake.set_nodes([host])
|
||||
compute_service = self.start_service('compute', host=host)
|
||||
self.compute.update({host: compute_service})
|
||||
|
||||
|
@ -21,7 +21,6 @@ from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as fake_image
|
||||
from nova.virt import fake as fake_virt
|
||||
|
||||
|
||||
class HostNameWeigher(weights.BaseHostWeigher):
|
||||
@ -79,10 +78,8 @@ class MissingReqSpecInstanceGroupUUIDTestCase(
|
||||
self.start_service('scheduler')
|
||||
# Start two computes, one where the server will be created and another
|
||||
# where we'll cold migrate it.
|
||||
self.addCleanup(fake_virt.restore_nodes)
|
||||
self.computes = {} # keep track of the compute services per host name
|
||||
for host in ('host1', 'host2'):
|
||||
fake_virt.set_nodes([host])
|
||||
compute_service = self.start_service('compute', host=host)
|
||||
self.computes[host] = compute_service
|
||||
|
||||
|
@ -27,7 +27,6 @@ from nova.tests.functional import integrated_helpers
|
||||
import nova.tests.unit.image.fake
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova import utils
|
||||
from nova.virt import fake
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
@ -117,8 +116,6 @@ class AggregateRequestFiltersTest(test.TestCase,
|
||||
compute service.
|
||||
:return: the nova compute service object
|
||||
"""
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
compute = self.start_service('compute', host=host)
|
||||
self.computes[host] = compute
|
||||
return compute
|
||||
@ -472,8 +469,6 @@ class TestAggregateMultiTenancyIsolationFilter(
|
||||
test.TestCase, integrated_helpers.InstanceHelperMixin):
|
||||
|
||||
def _start_compute(self, host):
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def setUp(self):
|
||||
|
@ -18,7 +18,6 @@ from nova.tests.functional import fixtures as func_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit.image import fake as fake_image
|
||||
from nova.tests.unit import policy_fixture
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class TestAvailabilityZoneScheduling(
|
||||
@ -53,8 +52,6 @@ class TestAvailabilityZoneScheduling(
|
||||
|
||||
def _start_host_in_zone(self, host, zone):
|
||||
# Start the nova-compute service.
|
||||
fake.set_nodes([host])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.start_service('compute', host=host)
|
||||
# Create a host aggregate with a zone in which to put this host.
|
||||
aggregate_body = {
|
||||
|
@ -144,14 +144,9 @@ class ServerGroupTestV21(ServerGroupTestBase):
|
||||
# tree.
|
||||
self.stub_out('nova.virt.driver.load_compute_driver',
|
||||
_fake_load_compute_driver)
|
||||
fake.set_nodes(['compute'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute = self.start_service('compute', host='compute')
|
||||
|
||||
# NOTE(gibi): start a second compute host to be able to test affinity
|
||||
# NOTE(sbauza): Make sure the FakeDriver returns a different nodename
|
||||
# for the second compute node.
|
||||
fake.set_nodes(['host2'])
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
def test_get_no_groups(self):
|
||||
@ -371,7 +366,6 @@ class ServerGroupTestV21(ServerGroupTestBase):
|
||||
|
||||
def test_migrate_with_anti_affinity(self):
|
||||
# Start additional host to test migration with anti-affinity
|
||||
fake.set_nodes(['host3'])
|
||||
self.start_service('compute', host='host3')
|
||||
|
||||
created_group = self.api.post_server_groups(self.anti_affinity)
|
||||
@ -426,7 +420,6 @@ class ServerGroupTestV21(ServerGroupTestBase):
|
||||
time.sleep(self._service_down_time)
|
||||
|
||||
# Start additional host to test evacuation
|
||||
fake.set_nodes(['host3'])
|
||||
self.start_service('compute', host='host3')
|
||||
|
||||
post = {'evacuate': {'onSharedStorage': False}}
|
||||
@ -623,7 +616,6 @@ class ServerGroupTestV215(ServerGroupTestV21):
|
||||
time.sleep(self._service_down_time)
|
||||
|
||||
# Start additional host to test evacuation
|
||||
fake.set_nodes(['host3'])
|
||||
compute3 = self.start_service('compute', host='host3')
|
||||
|
||||
post = {'evacuate': {}}
|
||||
@ -906,11 +898,8 @@ class ServerGroupTestMultiCell(ServerGroupTestBase):
|
||||
def setUp(self):
|
||||
super(ServerGroupTestMultiCell, self).setUp()
|
||||
# Start two compute services, one per cell
|
||||
fake.set_nodes(['host1'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute1 = self.start_service('compute', host='host1',
|
||||
cell='cell1')
|
||||
fake.set_nodes(['host2'])
|
||||
self.compute2 = self.start_service('compute', host='host2',
|
||||
cell='cell2')
|
||||
# This is needed to find a server that is still booting with multiple
|
||||
@ -994,9 +983,7 @@ class TestAntiAffinityLiveMigration(test.TestCase,
|
||||
# Start conductor, scheduler and two computes.
|
||||
self.start_service('conductor')
|
||||
self.start_service('scheduler')
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2'):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def test_serial_no_valid_host_then_pass_with_third_host(self):
|
||||
@ -1058,7 +1045,6 @@ class TestAntiAffinityLiveMigration(test.TestCase,
|
||||
|
||||
# Now start up a 3rd compute service and retry the live migration which
|
||||
# should work this time.
|
||||
fake.set_nodes(['host3'])
|
||||
self.start_service('compute', host='host3')
|
||||
self.admin_api.post_server_action(server['id'], body)
|
||||
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
|
||||
|
@ -214,9 +214,6 @@ class ServersTest(ServersTestBase):
|
||||
def _test_create_server_with_error_with_retries(self):
|
||||
# Create a server which will enter error state.
|
||||
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.flags(host='host2')
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
self.computes['compute2'] = self.compute2
|
||||
|
||||
@ -1509,9 +1506,6 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
|
||||
default so that should filter out the host based on the image meta.
|
||||
"""
|
||||
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.flags(host='host2')
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
# We hard-code from a fake image since we can't get images
|
||||
@ -4397,9 +4391,7 @@ class ServerTestV256Common(ServersTestBase):
|
||||
|
||||
def _setup_compute_service(self):
|
||||
# Set up 3 compute services in the same cell
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in ('host1', 'host2', 'host3'):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host)
|
||||
|
||||
def _create_server(self, target_host=None):
|
||||
@ -4431,9 +4423,7 @@ class ServerTestV256MultiCellTestCase(ServerTestV256Common):
|
||||
host_to_cell_mappings = {
|
||||
'host1': 'cell1',
|
||||
'host2': 'cell2'}
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
for host in sorted(host_to_cell_mappings):
|
||||
fake.set_nodes([host])
|
||||
self.start_service('compute', host=host,
|
||||
cell=host_to_cell_mappings[host])
|
||||
|
||||
|
@ -153,12 +153,15 @@ class BaseTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
self.flags(network_manager='nova.network.manager.FlatManager')
|
||||
fake.set_nodes([NODENAME, NODENAME2])
|
||||
|
||||
fake_notifier.stub_notifier(self)
|
||||
self.addCleanup(fake_notifier.reset)
|
||||
|
||||
self.compute = compute_manager.ComputeManager()
|
||||
# NOTE(gibi): this is a hack to make the fake virt driver use the nodes
|
||||
# needed for these tests.
|
||||
self.compute.driver._set_nodes([NODENAME, NODENAME2])
|
||||
|
||||
# execute power syncing synchronously for testing:
|
||||
self.compute._sync_power_pool = eventlet_utils.SyncPool()
|
||||
|
||||
@ -275,7 +278,6 @@ class BaseTestCase(test.TestCase):
|
||||
instances = db.instance_get_all(ctxt)
|
||||
for instance in instances:
|
||||
db.instance_destroy(ctxt, instance['uuid'])
|
||||
fake.restore_nodes()
|
||||
super(BaseTestCase, self).tearDown()
|
||||
|
||||
def _fake_instance(self, updates):
|
||||
|
@ -8871,7 +8871,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
||||
"""
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
self.context, host=self.compute.host, vm_state=vm_states.STOPPED,
|
||||
expected_attrs=['system_metadata', 'flavor'])
|
||||
node='fake-node', expected_attrs=['system_metadata', 'flavor'])
|
||||
migration = mock.MagicMock(spec='nova.objects.Migration')
|
||||
request_spec = mock.MagicMock(spec='nova.objects.RequestSpec')
|
||||
ex = exception.InstanceFaultRollback(
|
||||
|
@ -859,8 +859,8 @@ class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
|
||||
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
|
||||
def setUp(self):
|
||||
self.driver_module = 'nova.virt.fake.FakeDriver'
|
||||
fake.set_nodes(['myhostname'])
|
||||
super(FakeConnectionTestCase, self).setUp()
|
||||
self.connection.init_host('myhostname')
|
||||
|
||||
def _check_available_resource_fields(self, host_status):
|
||||
super(FakeConnectionTestCase, self)._check_available_resource_fields(
|
||||
|
@ -25,7 +25,6 @@ semantics of real hypervisor connections.
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import time
|
||||
|
||||
import fixtures
|
||||
@ -52,31 +51,6 @@ CONF = nova.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_FAKE_NODES = None
|
||||
|
||||
|
||||
def set_nodes(nodes):
|
||||
"""Sets FakeDriver's node.list.
|
||||
|
||||
It has effect on the following methods:
|
||||
get_available_nodes()
|
||||
get_available_resource
|
||||
|
||||
To restore the change, call restore_nodes()
|
||||
"""
|
||||
global _FAKE_NODES
|
||||
_FAKE_NODES = nodes
|
||||
|
||||
|
||||
def restore_nodes():
|
||||
"""Resets FakeDriver's node list modified by set_nodes().
|
||||
|
||||
Usually called from tearDown().
|
||||
"""
|
||||
global _FAKE_NODES
|
||||
_FAKE_NODES = [CONF.host]
|
||||
|
||||
|
||||
class FakeInstance(object):
|
||||
|
||||
def __init__(self, name, state, uuid):
|
||||
@ -171,15 +145,21 @@ class FakeDriver(driver.ComputeDriver):
|
||||
self._mounts = {}
|
||||
self._interfaces = {}
|
||||
self.active_migrations = {}
|
||||
self._nodes = self._init_nodes()
|
||||
|
||||
def _init_nodes(self):
|
||||
if not _FAKE_NODES:
|
||||
set_nodes([CONF.host])
|
||||
return copy.copy(_FAKE_NODES)
|
||||
self._host = None
|
||||
self._nodes = None
|
||||
|
||||
def init_host(self, host):
|
||||
return
|
||||
self._host = host
|
||||
# NOTE(gibi): this is unnecessary complex and fragile but this is
|
||||
# how many current functional sample tests expect the node name.
|
||||
self._nodes = (['fake-mini'] if self._host == 'compute'
|
||||
else [self._host])
|
||||
|
||||
def _set_nodes(self, nodes):
|
||||
# NOTE(gibi): this is not part of the driver interface but used
|
||||
# by our tests to customize the discovered nodes by the fake
|
||||
# driver.
|
||||
self._nodes = nodes
|
||||
|
||||
def list_instances(self):
|
||||
return [self.instances[uuid].name for uuid in self.instances.keys()]
|
||||
|
Loading…
x
Reference in New Issue
Block a user