Merge "Nailgun tests are now much faster"

This commit is contained in:
Jenkins 2014-02-21 16:40:16 +00:00 committed by Gerrit Code Review
commit f786786894
13 changed files with 97 additions and 45 deletions

View File

@ -68,11 +68,24 @@ class FSMNodeFlow(Fysom):
'error'
],
'dst': 'error'
}
},
{
'name': 'ready',
'src': [
'discover',
'provisioning',
'provisioned',
'deployment',
'ready',
'error'
],
'dst': 'ready'
},
],
'callbacks': {
'onnext': self.on_next,
'onerror': self.on_error
'onerror': self.on_error,
'onready': self.on_ready
}
})
self.data = data
@ -82,6 +95,10 @@ class FSMNodeFlow(Fysom):
else:
self.next()
def on_ready(self, e):
self.data['status'] = 'ready'
self.data['progress'] = 100
def on_error(self, e):
self.data['status'] = 'error'
if e.src in ['discover', 'provisioning']:
@ -111,11 +128,11 @@ class FakeThread(threading.Thread):
self.data = data
self.params = params
self.join_to = join_to
self.tick_count = int(settings.FAKE_TASKS_TICK_COUNT) or 20
self.tick_count = int(settings.FAKE_TASKS_TICK_COUNT)
self.low_tick_count = self.tick_count - 10
if self.low_tick_count < 0:
self.low_tick_count = 0
self.tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) or 3
self.tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL)
self.task_uuid = data['args'].get(
'task_uuid'
@ -144,9 +161,14 @@ class FakeThread(threading.Thread):
super(FakeThread, self).join(timeout)
def sleep(self, timeout):
if timeout == 0:
return
step = 0.001
map(
lambda i: not self.stoprequest.isSet() and time.sleep(i),
repeat(1, timeout)
repeat(step, int(float(timeout) / step))
)
@ -187,7 +209,8 @@ class FakeAmpqThread(FakeThread):
class FakeDeploymentThread(FakeAmpqThread):
def run_until_status(self, smart_nodes, status,
role=None, random_error=False):
role=None, random_error=False,
instant=False):
ready = False
if random_error:
@ -202,12 +225,15 @@ class FakeDeploymentThread(FakeAmpqThread):
if any(continue_cases):
continue
sn.update_progress(
randrange(
self.low_tick_count,
self.tick_count
if instant:
sn.ready()
else:
sn.update_progress(
randrange(
self.low_tick_count,
self.tick_count
)
)
)
if role:
test_nodes = [
@ -237,12 +263,23 @@ class FakeDeploymentThread(FakeAmpqThread):
# True or False
task_ready = self.params.get("task_ready")
# instant deployment
godmode = self.params.get("godmode", False)
kwargs = {
'task_uuid': self.task_uuid,
'nodes': self.data['args']['deployment_info'],
'status': 'running'
}
if godmode:
for n in kwargs["nodes"]:
n["status"] = "ready"
n["progress"] = 100
kwargs["status"] = "ready"
yield kwargs
raise StopIteration
smart_nodes = [FSMNodeFlow(n) for n in kwargs['nodes']]
stages_errors = {
@ -272,7 +309,9 @@ class FakeDeploymentThread(FakeAmpqThread):
)
}
for nodes_status in stages_errors[error]:
mode = stages_errors[error]
for nodes_status in mode:
kwargs['nodes'] = nodes_status
yield kwargs
self.sleep(self.tick_interval)
@ -293,7 +332,7 @@ class FakeProvisionThread(FakeThread):
super(FakeProvisionThread, self).run()
receiver = NailgunReceiver
self.sleep(self.tick_interval)
self.sleep(self.tick_interval * 2)
# Since we just add systems to cobbler and reboot nodes
# We think this task is always successful if it is launched.
@ -320,6 +359,11 @@ class FakeDeletionThread(FakeThread):
resp_method = getattr(receiver, self.respond_to)
resp_method(**kwargs)
recover_nodes = self.params.get("recover_nodes", True)
if not recover_nodes:
return
for node_data in nodes_to_restore:
node = Node(**node_data)
@ -349,6 +393,10 @@ class FakeStopDeploymentThread(FakeThread):
def run(self):
super(FakeStopDeploymentThread, self).run()
receiver = NailgunReceiver
recover_nodes = self.params.get("recover_nodes", True)
self.sleep(self.tick_interval)
kwargs = {
'task_uuid': self.task_uuid,
'stop_task_uuid': self.data['args']['stop_task_uuid'],
@ -359,7 +407,9 @@ class FakeStopDeploymentThread(FakeThread):
resp_method = getattr(receiver, self.respond_to)
resp_method(**kwargs)
self.sleep(3)
if not recover_nodes:
return
nodes_db = db().query(Node).filter(
Node.id.in_([
n['uid'] for n in self.data['args']['nodes']
@ -367,7 +417,7 @@ class FakeStopDeploymentThread(FakeThread):
).all()
for n in nodes_db:
self.sleep(2)
self.sleep(self.tick_interval)
n.online = True
n.status = "discover"
db().add(n)
@ -378,6 +428,10 @@ class FakeResetEnvironmentThread(FakeThread):
def run(self):
super(FakeResetEnvironmentThread, self).run()
receiver = NailgunReceiver
recover_nodes = self.params.get("recover_nodes", True)
self.sleep(self.tick_interval)
kwargs = {
'task_uuid': self.task_uuid,
'nodes': self.data['args']['nodes'],
@ -387,7 +441,9 @@ class FakeResetEnvironmentThread(FakeThread):
resp_method = getattr(receiver, self.respond_to)
resp_method(**kwargs)
self.sleep(5)
if not recover_nodes:
return
nodes_db = db().query(Node).filter(
Node.id.in_([
n['uid'] for n in self.data['args']['nodes']
@ -395,7 +451,7 @@ class FakeResetEnvironmentThread(FakeThread):
).all()
for n in nodes_db:
self.sleep(2)
self.sleep(self.tick_interval)
n.online = True
n.status = "discover"
db().add(n)
@ -411,8 +467,8 @@ class FakeVerificationThread(FakeThread):
'progress': 0
}
tick_count = int(settings.FAKE_TASKS_TICK_COUNT) or 10
tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) or 3
tick_count = int(settings.FAKE_TASKS_TICK_COUNT)
tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL)
low_tick_count = tick_count - 20
if low_tick_count < 0:
low_tick_count = 0

View File

@ -817,8 +817,8 @@ class BaseUnitTest(BaseTestCase):
def fake_tasks(fake_rpc=True,
mock_rpc=True,
tick_count=99,
tick_interval=1,
tick_count=100,
tick_interval=0,
**kwargs):
def wrapper(func):
func = mock.patch(

View File

@ -128,7 +128,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEquals(test_env['cluster'], 'test_name')
self.assertEquals(test_env['nodes'], 6)
@fake_tasks()
@fake_tasks(godmode=True)
def test_capacity_csv_log_with_unicode(self):
self.env.create(
cluster_kwargs={

View File

@ -156,7 +156,7 @@ class TestClusterChanges(BaseIntegrationTest):
).all()
self.assertEquals(len(pending_changes), 1)
@fake_tasks()
@fake_tasks(godmode=True)
def test_successful_deployment_drops_all_changes(self):
self.env.create(
nodes_kwargs=[
@ -191,7 +191,7 @@ class TestClusterChanges(BaseIntegrationTest):
all_changes = self.db.query(ClusterChanges).all()
self.assertEquals(len(all_changes), 2)
@fake_tasks()
@fake_tasks(godmode=True)
def test_role_unassignment_drops_changes(self):
self.env.create(
nodes_kwargs=[

View File

@ -29,7 +29,7 @@ class TestCharsetIssues(BaseIntegrationTest):
self._wait_for_threads()
super(TestCharsetIssues, self).tearDown()
@fake_tasks()
@fake_tasks(godmode=True)
def test_deployment_cyrillic_names(self):
self.env.create(
cluster_kwargs={"name": u"Тестовый кластер"},

View File

@ -976,7 +976,7 @@ class TestHandlers(BaseIntegrationTest):
'Number of OSD nodes (1) cannot be less than '
'the Ceph object replication factor (3)')
@fake_tasks()
@fake_tasks(godmode=True)
def test_enough_osds_for_ceph(self):
cluster = self.env.create(
cluster_kwargs={
@ -996,7 +996,6 @@ class TestHandlers(BaseIntegrationTest):
task = self.env.launch_deployment()
self.assertIn(task.status, ('running', 'ready'))
self.env.wait_ready(task)
@fake_tasks()
def test_admin_untagged_intersection(self):

View File

@ -183,7 +183,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEquals(node.cluster_id, None)
@fake_tasks()
def test_cluster_deleteion_with_offline_nodes(self):
def test_cluster_deletion_with_offline_nodes(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[

View File

@ -27,7 +27,7 @@ class TestHorizonURL(BaseIntegrationTest):
self._wait_for_threads()
super(TestHorizonURL, self).tearDown()
@fake_tasks()
@fake_tasks(godmode=True)
def test_horizon_url_ha_mode(self):
self.env.create(
cluster_kwargs={"mode": "ha_compact"},

View File

@ -51,7 +51,7 @@ class TestNetworkModels(BaseIntegrationTest):
self.assertEquals(nets_db[0].name, kw['name'])
self.assertEquals(nets_db[0].cidr, kw['cidr'])
@fake_tasks()
@fake_tasks(godmode=True)
def test_cluster_locking_after_deployment(self):
self.env.create(
cluster_kwargs={

View File

@ -24,7 +24,7 @@ class TestResetEnvironment(BaseIntegrationTest):
self._wait_for_threads()
super(TestResetEnvironment, self).tearDown()
@fake_tasks()
@fake_tasks(godmode=True, recover_nodes=False)
def test_reset_environment(self):
self.env.create(
cluster_kwargs={},

View File

@ -44,7 +44,7 @@ class TestStopDeployment(BaseIntegrationTest):
self._wait_for_threads()
super(TestStopDeployment, self).tearDown()
@fake_tasks()
@fake_tasks(recover_nodes=False)
def test_stop_deployment(self):
supertask = self.env.launch_deployment()
deploy_task_uuid = supertask.uuid
@ -63,15 +63,12 @@ class TestStopDeployment(BaseIntegrationTest):
self.assertEquals(n.roles, [])
self.assertNotEquals(n.pending_roles, [])
@fake_tasks(tick_interval=3)
@fake_tasks(recover_nodes=False, tick_interval=1)
def test_stop_provisioning(self):
provisioning_task = self.env.launch_provisioning_selected(
self.env.launch_provisioning_selected(
self.node_uids
)
stop_task_resp = self.env.stop_deployment(
expect_http=400
)
self.db.refresh(provisioning_task)
stop_task_resp = self.env.stop_deployment(expect_http=400)
self.assertEquals(
stop_task_resp,
u"Provisioning interruption for environment "

View File

@ -42,7 +42,7 @@ class TestTaskManagers(BaseIntegrationTest):
self._wait_for_threads()
super(TestTaskManagers, self).tearDown()
@fake_tasks()
@fake_tasks(godmode=True)
def test_deployment_task_managers(self):
self.env.create(
cluster_kwargs={},
@ -158,7 +158,7 @@ class TestTaskManagers(BaseIntegrationTest):
# validation failed
self.assertEqual(self.env.clusters[0].status, 'new')
@fake_tasks()
@fake_tasks(godmode=True)
def test_redeployment_works(self):
self.env.create(
cluster_kwargs={"mode": "ha_compact"},
@ -301,7 +301,7 @@ class TestTaskManagers(BaseIntegrationTest):
cluster_db = self.db.query(Cluster).get(cluster_id)
self.assertIsNone(cluster_db)
@fake_tasks()
@fake_tasks(godmode=True)
def test_deletion_cluster_ha_3x3(self):
self.env.create(
cluster_kwargs={

View File

@ -63,7 +63,7 @@ unit_tests=0
xunit=0
clean=0
ui_test_files=
default_noseopts="--with-timer"
default_noseopts="--with-timer --timer-warning=10 --timer-ok=2 --timer-top-n=10"
noseargs=
noseopts="$default_noseopts"
@ -216,7 +216,7 @@ function run_nailgun_tests {
./manage.py dropdb > /dev/null
./manage.py syncdb > /dev/null
[ -z "$noseargs" ] && test_args=. || test_args="$noseargs"
stderr=$(nosetests $noseopts $test_args --verbosity=2 3>&1 1>&2 2>&3 | tee /dev/stderr)
stderr=$(nosetests -vv $noseopts $test_args 3>&1 1>&2 2>&3 | tee /dev/stderr)
)
# TODO: uncomment after cluster deletion issue fix
# if [[ "$stderr" =~ "Exception" ]]; then
@ -262,7 +262,7 @@ function run_cli_tests {
echo "Test server started"
clean
test_args="../fuelclient/tests"
nosetests $noseopts $test_args --verbosity=2
nosetests -vv $noseopts $test_args
result=$(($result + $?))
kill $server_pid
wait $server_pid 2> /dev/null
@ -300,7 +300,7 @@ function run_unit_tests {
./manage.py dropdb > /dev/null
./manage.py syncdb > /dev/null
)
nosetests $noseopts $test_args --verbosity=2 nailgun/nailgun/test/unit #shotgun
nosetests $noseopts $test_args -vv nailgun/nailgun/test/unit #shotgun
}
if [ $unit_tests -eq 1 ]; then