Browse Source

(Re)start caching scheduler after starting computes in tests

This moves the scheduler restart logic from the heal_allocations
tests into a more generic location (like restart_compute_service)
so that we can re-use it in other functional tests that rely on
the caching scheduler. There are a couple of other tests that don't
need to restart the scheduler if we just move the start of the
scheduler to after we start the computes.

Change-Id: I7720fe4a3a0e537b7b356947317766597d4b47cf
Related-Bug: #1781648
tags/19.0.0.0rc1
Matt Riedemann 1 year ago
parent
commit
5fe80b6339

+ 19
- 0
nova/test.py View File

@@ -435,6 +435,25 @@ class TestCase(testtools.TestCase):
435 435
         compute.manager._resource_tracker = None
436 436
         compute.start()
437 437
 
438
+    @staticmethod
439
+    def restart_scheduler_service(scheduler):
440
+        """Restart a scheduler service in a realistic way.
441
+
442
+        Deals with resetting the host state cache in the case of using the
443
+        CachingScheduler driver.
444
+
445
+        :param scheduler: The nova-scheduler service to be restarted.
446
+        """
447
+        scheduler.stop()
448
+        if hasattr(scheduler.manager.driver, 'all_host_states'):
449
+            # On startup, the CachingScheduler runs a periodic task to pull
450
+            # the initial set of compute nodes out of the database which it
451
+            # then puts into a cache (hence the name of the driver). This can
452
+            # race with actually starting the compute services so we need to
453
+            # restart the scheduler to refresh the cache.
454
+            scheduler.manager.driver.all_host_states = None
455
+        scheduler.start()
456
+
438 457
     def assertJsonEqual(self, expected, observed, message=''):
439 458
         """Asserts that 2 complex data structures are json equivalent.
440 459
 

+ 4
- 1
nova/tests/functional/regressions/test_bug_1671648.py View File

@@ -63,7 +63,6 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
63 63
 
64 64
         self.start_service('conductor')
65 65
         self.start_service('consoleauth')
66
-        self.start_service('scheduler')
67 66
 
68 67
         # We start two compute services because we're going to fake one
69 68
         # of them to fail the build so we can trigger the retry code.
@@ -80,6 +79,10 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase):
80 79
         self.addCleanup(fake.restore_nodes)
81 80
         self.start_service('compute', host='host2')
82 81
 
82
+        # Start the scheduler after the compute nodes are created in the DB
83
+        # in the case of using the CachingScheduler.
84
+        self.start_service('scheduler')
85
+
83 86
         self.useFixture(cast_as_call.CastAsCall(self))
84 87
 
85 88
         self.image_id = self.admin_api.get_images()[0]['id']

+ 3
- 0
nova/tests/functional/regressions/test_bug_1741125.py View File

@@ -36,6 +36,9 @@ class TestServerResizeReschedule(integrated_helpers.ProviderUsageBaseTestCase):
36 36
         self.compute3 = self._start_compute(host='host3')
37 37
         self.compute4 = self._start_compute(host='host4')
38 38
 
39
+        # Restart the scheduler to reset the host state cache.
40
+        self.restart_scheduler_service(self.scheduler_service)
41
+
39 42
         flavors = self.api.get_flavors()
40 43
         self.flavor1 = flavors[0]
41 44
         self.flavor2 = flavors[1]

+ 5
- 3
nova/tests/functional/regressions/test_bug_1741307.py View File

@@ -59,9 +59,6 @@ class TestResizeWithCachingScheduler(test.TestCase,
59 59
         self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
60 60
 
61 61
         self.start_service('conductor')
62
-        # Configure the CachingScheduler.
63
-        self.flags(driver='caching_scheduler', group='scheduler')
64
-        self.start_service('scheduler')
65 62
 
66 63
         # Create two compute nodes/services.
67 64
         for host in ('host1', 'host2'):
@@ -69,6 +66,11 @@ class TestResizeWithCachingScheduler(test.TestCase,
69 66
             self.addCleanup(fake.restore_nodes)
70 67
             self.start_service('compute', host=host)
71 68
 
69
+        # Start the scheduler after the compute nodes are created in the DB
70
+        # in the case of using the CachingScheduler.
71
+        self.flags(driver='caching_scheduler', group='scheduler')
72
+        self.start_service('scheduler')
73
+
72 74
         flavors = self.api.get_flavors()
73 75
         self.old_flavor = flavors[0]
74 76
         self.new_flavor = flavors[1]

+ 2
- 8
nova/tests/functional/test_nova_manage.py View File

@@ -387,14 +387,8 @@ class TestNovaManagePlacementHealAllocations(
387 387
         self.flavor = self.api.get_flavors()[0]
388 388
         self.output = StringIO()
389 389
         self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
390
-        # On startup, the CachingScheduler runs a periodic task to pull the
391
-        # initial set of compute nodes out of the database which it then puts
392
-        # into a cache (hence the name of the driver). This can race with
393
-        # actually starting the compute services so we need to restart the
394
-        # scheduler to refresh the cache.
395
-        self.scheduler_service.stop()
396
-        self.scheduler_service.manager.driver.all_host_states = None
397
-        self.scheduler_service.start()
390
+        # Restart the scheduler to reset the host state cache.
391
+        self.restart_scheduler_service(self.scheduler_service)
398 392
 
399 393
     def _boot_and_assert_no_allocations(self, flavor, hostname):
400 394
         """Creates a server on the given host and asserts neither have usage

Loading…
Cancel
Save