From 635d41c5d1ae5ab3a68aa9028ed035fa1019cb66 Mon Sep 17 00:00:00 2001 From: Nikolay Markov Date: Wed, 19 Feb 2014 18:32:03 +0400 Subject: [PATCH] Nailgun tests are now much faster Change-Id: I59f84732472e415149a9712a9a53303f0da8c4b5 --- nailgun/nailgun/task/fake.py | 94 +++++++++++++++---- nailgun/nailgun/test/base.py | 4 +- .../test/integration/test_capacity_handler.py | 2 +- .../test/integration/test_changes_model.py | 4 +- .../test/integration/test_charset_issues.py | 2 +- .../test_cluster_changes_handler.py | 3 +- .../test/integration/test_cluster_handler.py | 2 +- .../test/integration/test_horizon_url.py | 2 +- .../test/integration/test_network_models.py | 2 +- .../integration/test_reset_environment.py | 2 +- .../test/integration/test_stop_deployment.py | 11 +-- .../test/integration/test_task_managers.py | 6 +- run_tests.sh | 8 +- 13 files changed, 97 insertions(+), 45 deletions(-) diff --git a/nailgun/nailgun/task/fake.py b/nailgun/nailgun/task/fake.py index 06a9e93be3..f26fad6687 100644 --- a/nailgun/nailgun/task/fake.py +++ b/nailgun/nailgun/task/fake.py @@ -68,11 +68,24 @@ class FSMNodeFlow(Fysom): 'error' ], 'dst': 'error' - } + }, + { + 'name': 'ready', + 'src': [ + 'discover', + 'provisioning', + 'provisioned', + 'deployment', + 'ready', + 'error' + ], + 'dst': 'ready' + }, ], 'callbacks': { 'onnext': self.on_next, - 'onerror': self.on_error + 'onerror': self.on_error, + 'onready': self.on_ready } }) self.data = data @@ -82,6 +95,10 @@ class FSMNodeFlow(Fysom): else: self.next() + def on_ready(self, e): + self.data['status'] = 'ready' + self.data['progress'] = 100 + def on_error(self, e): self.data['status'] = 'error' if e.src in ['discover', 'provisioning']: @@ -111,11 +128,11 @@ class FakeThread(threading.Thread): self.data = data self.params = params self.join_to = join_to - self.tick_count = int(settings.FAKE_TASKS_TICK_COUNT) or 20 + self.tick_count = int(settings.FAKE_TASKS_TICK_COUNT) self.low_tick_count = self.tick_count - 10 if self.low_tick_count < 0: self.low_tick_count = 0 - self.tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) or 3 + self.tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) self.task_uuid = data['args'].get( 'task_uuid' @@ -144,9 +161,14 @@ class FakeThread(threading.Thread): super(FakeThread, self).join(timeout) def sleep(self, timeout): + if timeout == 0: + return + + step = 0.001 + map( lambda i: not self.stoprequest.isSet() and time.sleep(i), - repeat(1, timeout) + repeat(step, int(float(timeout) / step)) ) @@ -187,7 +209,8 @@ class FakeAmpqThread(FakeThread): class FakeDeploymentThread(FakeAmpqThread): def run_until_status(self, smart_nodes, status, - role=None, random_error=False): + role=None, random_error=False, + instant=False): ready = False if random_error: @@ -202,12 +225,15 @@ class FakeDeploymentThread(FakeAmpqThread): if any(continue_cases): continue - sn.update_progress( - randrange( - self.low_tick_count, - self.tick_count + if instant: + sn.ready() + else: + sn.update_progress( + randrange( + self.low_tick_count, + self.tick_count + ) ) - ) if role: test_nodes = [ @@ -237,12 +263,23 @@ class FakeDeploymentThread(FakeAmpqThread): # True or False task_ready = self.params.get("task_ready") + # instant deployment + godmode = self.params.get("godmode", False) + kwargs = { 'task_uuid': self.task_uuid, 'nodes': self.data['args']['deployment_info'], 'status': 'running' } + if godmode: + for n in kwargs["nodes"]: + n["status"] = "ready" + n["progress"] = 100 + kwargs["status"] = "ready" + yield kwargs + raise StopIteration + smart_nodes = [FSMNodeFlow(n) for n in kwargs['nodes']] stages_errors = { @@ -272,7 +309,9 @@ class FakeDeploymentThread(FakeAmpqThread): ) } - for nodes_status in stages_errors[error]: + mode = stages_errors[error] + + for nodes_status in mode: kwargs['nodes'] = nodes_status yield kwargs self.sleep(self.tick_interval) @@ -293,7 +332,7 @@ class FakeProvisionThread(FakeThread): super(FakeProvisionThread, self).run() receiver = NailgunReceiver - self.sleep(self.tick_interval) + self.sleep(self.tick_interval * 2) # Since we just add systems to cobbler and reboot nodes # We think this task is always successful if it is launched. @@ -320,6 +359,11 @@ class FakeDeletionThread(FakeThread): resp_method = getattr(receiver, self.respond_to) resp_method(**kwargs) + recover_nodes = self.params.get("recover_nodes", True) + + if not recover_nodes: + return + for node_data in nodes_to_restore: node = Node(**node_data) @@ -349,6 +393,10 @@ class FakeStopDeploymentThread(FakeThread): def run(self): super(FakeStopDeploymentThread, self).run() receiver = NailgunReceiver + + recover_nodes = self.params.get("recover_nodes", True) + + self.sleep(self.tick_interval) kwargs = { 'task_uuid': self.task_uuid, 'stop_task_uuid': self.data['args']['stop_task_uuid'], @@ -359,7 +407,9 @@ class FakeStopDeploymentThread(FakeThread): resp_method = getattr(receiver, self.respond_to) resp_method(**kwargs) - self.sleep(3) + if not recover_nodes: + return + nodes_db = db().query(Node).filter( Node.id.in_([ n['uid'] for n in self.data['args']['nodes'] @@ -367,7 +417,7 @@ class FakeStopDeploymentThread(FakeThread): ).all() for n in nodes_db: - self.sleep(2) + self.sleep(self.tick_interval) n.online = True n.status = "discover" db().add(n) @@ -378,6 +428,10 @@ class FakeResetEnvironmentThread(FakeThread): def run(self): super(FakeResetEnvironmentThread, self).run() receiver = NailgunReceiver + + recover_nodes = self.params.get("recover_nodes", True) + + self.sleep(self.tick_interval) kwargs = { 'task_uuid': self.task_uuid, 'nodes': self.data['args']['nodes'], @@ -387,7 +441,9 @@ class FakeResetEnvironmentThread(FakeThread): resp_method = getattr(receiver, self.respond_to) resp_method(**kwargs) - self.sleep(5) + if not recover_nodes: + return + nodes_db = db().query(Node).filter( Node.id.in_([ n['uid'] for n in self.data['args']['nodes'] @@ -395,7 +451,7 @@ class FakeResetEnvironmentThread(FakeThread): ).all() for n in nodes_db: - self.sleep(2) + self.sleep(self.tick_interval) n.online = True n.status = "discover" db().add(n) @@ -411,8 +467,8 @@ class FakeVerificationThread(FakeThread): 'progress': 0 } - tick_count = int(settings.FAKE_TASKS_TICK_COUNT) or 10 - tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) or 3 + tick_count = int(settings.FAKE_TASKS_TICK_COUNT) + tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) low_tick_count = tick_count - 20 if low_tick_count < 0: low_tick_count = 0 diff --git a/nailgun/nailgun/test/base.py b/nailgun/nailgun/test/base.py index 9a97ef326c..9dadd10e37 100644 --- a/nailgun/nailgun/test/base.py +++ b/nailgun/nailgun/test/base.py @@ -817,8 +817,8 @@ class BaseUnitTest(BaseTestCase): def fake_tasks(fake_rpc=True, mock_rpc=True, - tick_count=99, - tick_interval=1, + tick_count=100, + tick_interval=0, **kwargs): def wrapper(func): func = mock.patch( diff --git a/nailgun/nailgun/test/integration/test_capacity_handler.py b/nailgun/nailgun/test/integration/test_capacity_handler.py index f6ddcf7818..67b07c1605 100644 --- a/nailgun/nailgun/test/integration/test_capacity_handler.py +++ b/nailgun/nailgun/test/integration/test_capacity_handler.py @@ -128,7 +128,7 @@ class TestHandlers(BaseIntegrationTest): self.assertEquals(test_env['cluster'], 'test_name') self.assertEquals(test_env['nodes'], 6) - @fake_tasks() + @fake_tasks(godmode=True) def test_capacity_csv_log_with_unicode(self): self.env.create( cluster_kwargs={ diff --git a/nailgun/nailgun/test/integration/test_changes_model.py b/nailgun/nailgun/test/integration/test_changes_model.py index ba3937e313..9552c0399b 100644 --- a/nailgun/nailgun/test/integration/test_changes_model.py +++ b/nailgun/nailgun/test/integration/test_changes_model.py @@ -156,7 +156,7 @@ class TestClusterChanges(BaseIntegrationTest): ).all() self.assertEquals(len(pending_changes), 1) - @fake_tasks() + @fake_tasks(godmode=True) def test_successful_deployment_drops_all_changes(self): self.env.create( nodes_kwargs=[ @@ -191,7 +191,7 @@ class TestClusterChanges(BaseIntegrationTest): all_changes = self.db.query(ClusterChanges).all() self.assertEquals(len(all_changes), 2) - @fake_tasks() + @fake_tasks(godmode=True) def test_role_unassignment_drops_changes(self): self.env.create( nodes_kwargs=[ diff --git a/nailgun/nailgun/test/integration/test_charset_issues.py b/nailgun/nailgun/test/integration/test_charset_issues.py index c1c235d038..9ea4d2dea4 100644 --- a/nailgun/nailgun/test/integration/test_charset_issues.py +++ b/nailgun/nailgun/test/integration/test_charset_issues.py @@ -29,7 +29,7 @@ class TestCharsetIssues(BaseIntegrationTest): self._wait_for_threads() super(TestCharsetIssues, self).tearDown() - @fake_tasks() + @fake_tasks(godmode=True) def test_deployment_cyrillic_names(self): self.env.create( cluster_kwargs={"name": u"Тестовый кластер"}, diff --git a/nailgun/nailgun/test/integration/test_cluster_changes_handler.py b/nailgun/nailgun/test/integration/test_cluster_changes_handler.py index 4d587c8d91..765879db81 100644 --- a/nailgun/nailgun/test/integration/test_cluster_changes_handler.py +++ b/nailgun/nailgun/test/integration/test_cluster_changes_handler.py @@ -976,7 +976,7 @@ class TestHandlers(BaseIntegrationTest): 'Number of OSD nodes (1) cannot be less than ' 'the Ceph object replication factor (3)') - @fake_tasks() + @fake_tasks(godmode=True) def test_enough_osds_for_ceph(self): cluster = self.env.create( cluster_kwargs={ @@ -996,7 +996,6 @@ class TestHandlers(BaseIntegrationTest): task = self.env.launch_deployment() self.assertIn(task.status, ('running', 'ready')) - self.env.wait_ready(task) @fake_tasks() def test_admin_untagged_intersection(self): diff --git a/nailgun/nailgun/test/integration/test_cluster_handler.py b/nailgun/nailgun/test/integration/test_cluster_handler.py index b816f0f642..49208c2209 100644 --- a/nailgun/nailgun/test/integration/test_cluster_handler.py +++ b/nailgun/nailgun/test/integration/test_cluster_handler.py @@ -183,7 +183,7 @@ class TestHandlers(BaseIntegrationTest): self.assertEquals(node.cluster_id, None) @fake_tasks() - def test_cluster_deleteion_with_offline_nodes(self): + def test_cluster_deletion_with_offline_nodes(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ diff --git a/nailgun/nailgun/test/integration/test_horizon_url.py b/nailgun/nailgun/test/integration/test_horizon_url.py index 2e3dc4e9fc..792586dabe 100644 --- a/nailgun/nailgun/test/integration/test_horizon_url.py +++ b/nailgun/nailgun/test/integration/test_horizon_url.py @@ -27,7 +27,7 @@ class TestHorizonURL(BaseIntegrationTest): self._wait_for_threads() super(TestHorizonURL, self).tearDown() - @fake_tasks() + @fake_tasks(godmode=True) def test_horizon_url_ha_mode(self): self.env.create( cluster_kwargs={"mode": "ha_compact"}, diff --git a/nailgun/nailgun/test/integration/test_network_models.py b/nailgun/nailgun/test/integration/test_network_models.py index 9c72183af2..46343d1a61 100644 --- a/nailgun/nailgun/test/integration/test_network_models.py +++ b/nailgun/nailgun/test/integration/test_network_models.py @@ -51,7 +51,7 @@ class TestNetworkModels(BaseIntegrationTest): self.assertEquals(nets_db[0].name, kw['name']) self.assertEquals(nets_db[0].cidr, kw['cidr']) - @fake_tasks() + @fake_tasks(godmode=True) def test_cluster_locking_after_deployment(self): self.env.create( cluster_kwargs={ diff --git a/nailgun/nailgun/test/integration/test_reset_environment.py b/nailgun/nailgun/test/integration/test_reset_environment.py index 89ba19684c..d3d76f93ca 100644 --- a/nailgun/nailgun/test/integration/test_reset_environment.py +++ b/nailgun/nailgun/test/integration/test_reset_environment.py @@ -24,7 +24,7 @@ class TestResetEnvironment(BaseIntegrationTest): self._wait_for_threads() super(TestResetEnvironment, self).tearDown() - @fake_tasks() + @fake_tasks(godmode=True, recover_nodes=False) def test_reset_environment(self): self.env.create( cluster_kwargs={}, diff --git a/nailgun/nailgun/test/integration/test_stop_deployment.py b/nailgun/nailgun/test/integration/test_stop_deployment.py index 7067668593..cfbeb18f17 100644 --- a/nailgun/nailgun/test/integration/test_stop_deployment.py +++ b/nailgun/nailgun/test/integration/test_stop_deployment.py @@ -44,7 +44,7 @@ class TestStopDeployment(BaseIntegrationTest): self._wait_for_threads() super(TestStopDeployment, self).tearDown() - @fake_tasks() + @fake_tasks(recover_nodes=False) def test_stop_deployment(self): supertask = self.env.launch_deployment() deploy_task_uuid = supertask.uuid @@ -63,15 +63,12 @@ class TestStopDeployment(BaseIntegrationTest): self.assertEquals(n.roles, []) self.assertNotEquals(n.pending_roles, []) - @fake_tasks(tick_interval=3) + @fake_tasks(recover_nodes=False, tick_interval=1) def test_stop_provisioning(self): - provisioning_task = self.env.launch_provisioning_selected( + self.env.launch_provisioning_selected( self.node_uids ) - stop_task_resp = self.env.stop_deployment( - expect_http=400 - ) - self.db.refresh(provisioning_task) + stop_task_resp = self.env.stop_deployment(expect_http=400) self.assertEquals( stop_task_resp, u"Provisioning interruption for environment " diff --git a/nailgun/nailgun/test/integration/test_task_managers.py b/nailgun/nailgun/test/integration/test_task_managers.py index a860fd3e06..232ed75b0d 100644 --- a/nailgun/nailgun/test/integration/test_task_managers.py +++ b/nailgun/nailgun/test/integration/test_task_managers.py @@ -42,7 +42,7 @@ class TestTaskManagers(BaseIntegrationTest): self._wait_for_threads() super(TestTaskManagers, self).tearDown() - @fake_tasks() + @fake_tasks(godmode=True) def test_deployment_task_managers(self): self.env.create( cluster_kwargs={}, @@ -158,7 +158,7 @@ class TestTaskManagers(BaseIntegrationTest): # validation failed self.assertEqual(self.env.clusters[0].status, 'new') - @fake_tasks() + @fake_tasks(godmode=True) def test_redeployment_works(self): self.env.create( cluster_kwargs={"mode": "ha_compact"}, @@ -301,7 +301,7 @@ class TestTaskManagers(BaseIntegrationTest): cluster_db = self.db.query(Cluster).get(cluster_id) self.assertIsNone(cluster_db) - @fake_tasks() + @fake_tasks(godmode=True) def test_deletion_cluster_ha_3x3(self): self.env.create( cluster_kwargs={ diff --git a/run_tests.sh b/run_tests.sh index 0ebc8293dd..f2ca5c4f03 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -63,7 +63,7 @@ unit_tests=0 xunit=0 clean=0 ui_test_files= -default_noseopts="--with-timer" +default_noseopts="--with-timer --timer-warning=10 --timer-ok=2 --timer-top-n=10" noseargs= noseopts="$default_noseopts" @@ -216,7 +216,7 @@ function run_nailgun_tests { ./manage.py dropdb > /dev/null ./manage.py syncdb > /dev/null [ -z "$noseargs" ] && test_args=. || test_args="$noseargs" - stderr=$(nosetests $noseopts $test_args --verbosity=2 3>&1 1>&2 2>&3 | tee /dev/stderr) + stderr=$(nosetests -vv $noseopts $test_args 3>&1 1>&2 2>&3 | tee /dev/stderr) ) # TODO: uncomment after cluster deletion issue fix # if [[ "$stderr" =~ "Exception" ]]; then @@ -262,7 +262,7 @@ function run_cli_tests { echo "Test server started" clean test_args="../fuelclient/tests" - nosetests $noseopts $test_args --verbosity=2 + nosetests -vv $noseopts $test_args result=$(($result + $?)) kill $server_pid wait $server_pid 2> /dev/null @@ -300,7 +300,7 @@ function run_unit_tests { ./manage.py dropdb > /dev/null ./manage.py syncdb > /dev/null ) - nosetests $noseopts $test_args --verbosity=2 nailgun/nailgun/test/unit #shotgun + nosetests $noseopts $test_args -vv nailgun/nailgun/test/unit #shotgun } if [ $unit_tests -eq 1 ]; then