diff --git a/functional/base.py b/functional/base.py index dde098a..cec47c9 100644 --- a/functional/base.py +++ b/functional/base.py @@ -1,7 +1,7 @@ __author__ = 'ekonstantinov' -from unittest import TestCase from functional.client import TestingAdapterClient from functools import wraps +from unittest import TestCase class EmptyResponseError(Exception): @@ -10,22 +10,27 @@ class EmptyResponseError(Exception): class Response(object): """This is testing_adapter response object""" + test_name_mapping = {} + def __init__(self, response): - self._parse_json(response.json) - self.request = '{} {} \n with {}'\ - .format(response.request.method, response.request.url, response.request.body) + self.is_empty = False + if isinstance(response, list): + self._parse_json(response) + self.request = None + else: + self._parse_json(response.json()) + self.request = '{} {} \n with {}'\ + .format(response.request.method, response.request.url, response.request.body) def __getattr__(self, item): if item in self.test_sets or item in self._tests: return self.test_sets.get(item) or self._tests.get(item) - - def __getattribute__(self, item): - if self.is_empty: - raise EmptyResponseError() else: - super(Response, self).__getattribute__(item) + return super(type(self), self).__delattr__(item) def __str__(self): + if self.is_empty: + return "Empty" return self.test_sets.__str__() @classmethod @@ -42,8 +47,8 @@ class Response(object): self.test_sets = {} self._tests = {} for testset in json: - self._tests = self.test_sets[testset.pop('testset')] = testset - self._tests['tests'] = {self._friendly_name(item.pop('id')): item for item in testset['tests']} + self.test_sets[testset.pop('testset')] = testset + self._tests = {self._friendly_name(item.get('id')): item for item in testset['tests']} def _friendly_name(self, name): return self.test_name_mapping.get(name, name) @@ -58,6 +63,8 @@ class AdapterClientProxy(object): if item in TestingAdapterClient.__dict__: call = getattr(self.client, item) return self._decorate_call(call) + def _friendly_map(self, mapping): + Response.set_test_name_mapping(mapping) def _decorate_call(self, call): @wraps(call) @@ -74,39 +81,38 @@ class SubsetException(Exception): class BaseAdapterTest(TestCase): - def _verify_json(self, assertions, json): - """For the given json response verify that assertions are present - """ - for item in json: - for subitem in assertions: - if item['testset'] == subitem['testset']: - for s in subitem['tests']: - if s['id'] not in (i['id'] for i in item['tests']): - raise AssertionError('{} not in:\n{}'.format(s['id'], [i['id'] for i in item['tests']])) - - def is_subset(item, subset): - if type(item) != type(subset) and type(subset) not in (str, unicode): - return False - if type(subset) is list: - - return all(is_subset(i, s) for i in item for s in subset if i.get('id') == s.get('id') or s.get('id') == None) - elif type(subset) is dict: - try: - return all(is_subset(item[s], subset[s]) for s in subset) - except AssertionError as e: - real, expected = e.message.split('|') - key = [x for x in subset if subset[x] == expected][0] - msg = '"{}" was found, when "{}" was excepted in key = "{}"'.format(real, expected, key) - raise SubsetException(msg) - else: - msg = '{item}|{subset}'.format(item=item, subset=subset) - assert item == subset, msg - return item == subset - - msg = '{subset} IS NOT IN {item}'.format(subset=assertions, item=json) - try: - self.assertTrue(is_subset(json, assertions), msg) - except SubsetException as e: - msg = '{}\nwith response:\n{}\nand assertion:\n{}'.format(e.message, json, assertions) + def compare(self, response, comparable): + if response.is_empty: + msg = '{} is empty'.format(response.request) raise AssertionError(msg) + if not isinstance(comparable, Response): + comparable = Response(comparable) + test_set = comparable.test_sets.keys()[0] + test_set_data = comparable.test_sets[test_set] + tests = comparable._tests + diff = [] + + for item in test_set_data: + if item == 'tests': + continue + if response.test_sets[test_set][item] != test_set_data[item]: + msg = '"{}" != "{}" in {}.{}'.format(response.test_sets[test_set][item], + test_set_data[item], test_set, item) + diff.append(msg) + + for test_name, test in tests.iteritems(): + for t in test: + if response._tests[test_name][t] != test[t]: + msg = '"{}" != "{}" in {}.{}.{}'.format(response._tests[test_name][t], + test[t], test_set, test_name, t) + diff.append(msg) + if diff: + raise AssertionError(diff) + + @staticmethod + def init_client(url, mapping): + ac = AdapterClientProxy(url) + ac._friendly_map(mapping) + return ac + diff --git a/functional/client.py b/functional/client.py index 1566410..ac2f96b 100644 --- a/functional/client.py +++ b/functional/client.py @@ -2,7 +2,6 @@ __author__ = 'ekonstantinov' import requests from json import dumps import time -import logging class TestingAdapterClient(object): @@ -13,15 +12,11 @@ class TestingAdapterClient(object): headers = {'content-type': 'application/json'} if data: print data - r = requests.request(method, url, data=data, headers=headers, timeout=30.0) - else: - r = requests.request(method, url, headers=headers, timeout=30.0) - - #r = requests.request(method, url, data=data, headers=headers, timeout=30.0) + r = requests.request(method, url, data=data, headers=headers, timeout=30.0) if 2 != r.status_code/100: raise AssertionError('{method} "{url}" responded with "{code}" status code' .format(method=method.upper(), url=url, code=r.status_code)) - return r.json() + return r def __getattr__(self, item): getters = ['testsets', 'tests', 'testruns'] @@ -37,6 +32,12 @@ class TestingAdapterClient(object): def start_testrun(self, testset, cluster_id): return self.start_testrun_tests(testset, [], cluster_id) + '''url = ''.join([self.url, '/testruns']) + data = [{'testset': testset, + 'metadata': {'cluster_id': str(cluster_id)}}] + + return self._request('POST', url, data=dumps(data))''' + def start_testrun_tests(self, testset, tests, cluster_id): url = ''.join([self.url, '/testruns']) data = [{'testset': testset, @@ -51,7 +52,7 @@ class TestingAdapterClient(object): return self._request("PUT", url, data=dumps(data)) def stop_testrun_last(self, testset, cluster_id): - latest = self.testruns_last(cluster_id) + latest = self.testruns_last(cluster_id).json() testrun_id = [item['id'] for item in latest if item['testset'] == testset][0] return self.stop_testrun(testrun_id) @@ -63,24 +64,13 @@ class TestingAdapterClient(object): return self._request('PUT', url, data=dumps(body)) def restart_tests_last(self, testset, tests, cluster_id): - latest = self.testruns_last(cluster_id) + latest = self.testruns_last(cluster_id).json() testrun_id = [item['id'] for item in latest if item['testset'] == testset][0] return self.restart_tests(tests, testrun_id) - def run_and_timeout_unless_finished(self, action, testset, tests, cluster_id, timeout): - - if action == 'run': - action = lambda: self.start_testrun_tests(testset, tests, cluster_id) - elif action == 'restart': - action = lambda: self.restart_tests_last(testset, tests, cluster_id) - else: - raise KeyError('Not Appropriate action') - current_status = None - current_failed_tests_statuses = None - + def _with_timeout(self, action, testset, cluster_id, timeout): start_time = time.time() - - json = action() + json = action().json() if json == [{}]: self.stop_testrun_last(testset, cluster_id) @@ -91,21 +81,23 @@ class TestingAdapterClient(object): time.sleep(5) current_response = self.testruns_last(cluster_id) + current_status = [item['status'] for item in current_response.json() + if item['testset'] == testset][0] - current_testset = [item for item in current_response - if item.get('testset') == testset][0] - current_status = current_testset['status'] - current_failed_tests_statuses = {item['id']: [item['status'], item['message']] - for item in current_testset['tests'] if item['status'] != 'success'} + if current_status == 'finished': + break + else: + current_response = self.stop_testrun_last(testset, cluster_id) - if current_status == "finished": - return {'status': 'finished', - 'tests': current_failed_tests_statuses} + return current_response - self.stop_testrun_last(testset, cluster_id) + def run_with_timeout(self, testset, tests, cluster_id, timeout): + action = lambda: self.start_testrun_tests(testset, tests, cluster_id) + return self._with_timeout(action, testset, cluster_id, timeout) - return {'status': current_status, - 'tests': current_failed_tests_statuses} + def restart_with_timeout(self, testset, tests, cluster_id, timeout): + action = lambda: self.restart_tests_last(testset, tests, cluster_id) + return self._with_timeout(action, testset, cluster_id, timeout) diff --git a/functional/scenario.py b/functional/scenario.py new file mode 100644 index 0000000..361b09a --- /dev/null +++ b/functional/scenario.py @@ -0,0 +1,31 @@ +__author__ = 'ekonstantinov' +from functional.base import BaseAdapterTest, Response + +import time + + +class ScenarioTests(BaseAdapterTest): + @classmethod + def setUpClass(cls): + url = 'http://172.18.198.75:8989/v1' + mapping = {} + + cls.client = cls.init_client(url, mapping) + + def test_random_scenario(self): + testset = "fuel_sanity" + cluster_id = 3 + tests = [] + timeout = 60 + + from pprint import pprint + + for i in range(1): + r = self.client.run_with_timeout(testset, tests, cluster_id, timeout) + pprint([item for item in r.test_sets[testset]['tests']]) + if r.fuel_sanity['status'] == 'stopped': + running_tests = [test for test in r._tests + if r._tests[test]['status'] is 'stopped'] + print "restarting: ", running_tests + result = self.client.restart_with_timeout(testset, running_tests, cluster_id, timeout) + print 'Restart', result \ No newline at end of file diff --git a/functional/test_general_flow.py b/functional/test_general_flow.py index db274f7..3106b3f 100644 --- a/functional/test_general_flow.py +++ b/functional/test_general_flow.py @@ -14,6 +14,8 @@ class adapter_tests(unittest.TestCase): def _verify_json(self, assertions, json): """For the given json response verify that assertions are present """ + if json == [{}]: + raise AssertionError('response is empty') for item in json: for subitem in assertions: if item['testset'] == subitem['testset']: @@ -113,9 +115,10 @@ class adapter_tests(unittest.TestCase): 'tests': [ {'id': self.tests['fast_pass'], 'name': 'fast pass test', + 'description': """ This is a simple always pass test + """, 'status': 'success'}, {'id': self.tests['long_pass'], - 'description': ' ', 'status': 'running'}, {'id': self.tests['fail_step'], 'message': 'MEssaasasas', @@ -128,7 +131,7 @@ class adapter_tests(unittest.TestCase): 'status': 'failure'}], 'testset': 'plugin_general'}] self._verify_json(assertions, json) - time.sleep(25) + time.sleep(5) json = self.adapter.testruns_last(cluster_id) assertions[0]['status'] = 'finished' assertions[0]['tests'][1]['status'] = 'success' @@ -140,7 +143,7 @@ class adapter_tests(unittest.TestCase): testset = "plugin_stopped" cluster_id = 2 json = self.adapter.start_testrun(testset, cluster_id) - current_id = json[0]['id'] + #current_id = json[0]['id'] time.sleep(15) json = self.adapter.testruns_last(cluster_id) assertions = [ @@ -153,12 +156,13 @@ class adapter_tests(unittest.TestCase): {'id': self.tests['really_long'], 'status': 'running'}], 'testset': 'plugin_stopped'}] - self._verify_json(assertions, json) - self.adapter.stop_testrun(current_id) + print json + self._verify_json(assertions, json.json()) + self.adapter.stop_testrun_last(testset, cluster_id) json = self.adapter.testruns_last(cluster_id) assertions[0]['status'] = 'stopped' assertions[0]['tests'][2]['status'] = 'stopped' - self._verify_json(assertions, json) + self._verify_json(assertions, json.json()) def test_testruns(self): """Verify that you can't start new testrun for the same cluster_id while previous run is running""" @@ -180,15 +184,18 @@ class adapter_tests(unittest.TestCase): def test_load_runs(self): """Verify that you can start 20 testruns in a row with different cluster_id""" testset = "plugin_general" - json = self.adapter.testruns() - last_test_run = max(item['id'] for item in json) - self.assertTrue(last_test_run == len(json)) + #json = self.adapter.testruns().json() + #last_test_run = max(item['id'] for item in json) + #self.assertTrue(last_test_run == len(json)) - for cluster_id in xrange(100, 105): + for cluster_id in range(100, 105): json = self.adapter.start_testrun(testset, cluster_id) - - msg = 'Response for start_testset("{testset}", "{cluster_id}") was empty = {json}'.\ - format(testset=testset, cluster_id=cluster_id, json=json) + #r = json.request + #print ' : '.join([r.method, r.url, r.body]) + #print json.text + #json = json.json() + msg = 'Response for {url} start_testset("{testset}", "{cluster_id}") was empty = {json}'.\ + format(testset=testset, cluster_id=cluster_id, json=json, url=self.adapter.url) self.assertTrue(json != [{}], msg) @@ -363,13 +370,14 @@ class adapter_tests(unittest.TestCase): from pprint import pprint for i in range(1): - result = self.adapter.run_and_timeout_unless_finished('run', testset, tests, cluster_id, timeout) - pprint(result) + result = self.adapter.run_with_timeout('run', testset, tests, cluster_id, timeout) + print result.test_sets + print result._tests if result['status'] == 'running': running_tests = [test for test in result['tests'] if result['tests'][test][0] in ['running', 'wait_running']] print "restarting: ", running_tests - result = self.adapter.run_and_timeout_unless_finished('restart', testset, running_tests, cluster_id, timeout) + result = self.adapter.run_with_timeout('restart', testset, running_tests, cluster_id, timeout) print 'Restart', result, '\n' diff --git a/functional/tests.py b/functional/tests.py new file mode 100644 index 0000000..2ec6761 --- /dev/null +++ b/functional/tests.py @@ -0,0 +1,240 @@ +__author__ = 'ekonstantinov' +from functional.base import BaseAdapterTest, Response +from functional.client import TestingAdapterClient as adapter + +import time + + +class AdapterTests(BaseAdapterTest): + + @classmethod + def setUpClass(cls): + url = 'http://172.18.198.75:8989/v1' + cls.mapping = { + 'functional.dummy_tests.general_test.Dummy_test.test_fast_pass': 'fast_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_error': 'fast_error', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_fail': 'fast_fail', + 'functional.dummy_tests.general_test.Dummy_test.test_long_pass': 'long_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fail_with_step': 'fail_step', + 'functional.dummy_tests.stopped_test.dummy_tests_stopped.test_really_long': 'really_long', + 'functional.dummy_tests.stopped_test.dummy_tests_stopped.test_not_long_at_all': 'not_long', + 'functional.dummy_tests.stopped_test.dummy_tests_stopped.test_one_no_so_long': 'so_long' + } + cls.testsets = { + "fuel_smoke": None, + "fuel_sanity": None, + "plugin_general": ['fast_pass', 'fast_error', 'fast_fail', 'long_pass'], + "plugin_stopped": ['really_long', 'not_long', 'so_long'] + } + + cls.adapter = adapter(url) + cls.client = cls.init_client(url, cls.mapping) + + def test_list_testsets(self): + """Verify that self.testsets are in json response + """ + json = self.adapter.testsets().json() + response_testsets = [item['id'] for item in json] + for testset in self.testsets: + msg = '"{test}" not in "{response}"'.format(test=testset, response=response_testsets) + self.assertTrue(testset in response_testsets, msg) + + def test_list_tests(self): + """Verify that self.tests are in json response + """ + json = self.adapter.tests().json() + response_tests = [item['id'] for item in json] + + for test in self.mapping: + msg = '"{test}" not in "{response}"'.format(test=test.capitalize(), response=response_tests) + self.assertTrue(test in response_tests, msg) + + + def test_run_testset(self): + """Verify that test status changes in time from running to success + """ + testset = "plugin_general" + cluster_id = 1 + + self.client.start_testrun(testset, cluster_id) + time.sleep(2) + + r = self.client.testruns_last(cluster_id) + assertions = Response([{'status': 'running', + 'testset': 'plugin_general', + 'tests': [ + {'id': 'fast_pass', 'status': 'success', 'name': 'fast pass test', + 'description': """ This is a simple always pass test + """,}, + {'id': 'long_pass', 'status': 'running'}, + {'id': 'fail_step', 'message': 'MEssaasasas', 'status': 'failure'}, + {'id': 'fast_error', 'message': '', 'status': 'error'}, + {'id': 'fast_fail', 'message': 'Something goes wroooong', 'status': 'failure'}]}]) + + self.compare(r, assertions) + time.sleep(5) + + r = self.client.testruns_last(cluster_id) + + assertions.plugin_general['status'] = 'finished' + assertions.long_pass['status'] = 'success' + + self.compare(r, assertions) + + def test_stop_testset(self): + """Verify that long running testrun can be stopped + """ + testset = "plugin_stopped" + cluster_id = 2 + + self.client.start_testrun(testset, cluster_id) + time.sleep(15) + r = self.client.testruns_last(cluster_id) + assertions = Response([ + {'status': 'running', + 'testset': 'plugin_stopped', + 'tests': [ + {'id': 'not_long', 'status': 'success'}, + {'id': 'so_long', 'status': 'success'}, + {'id': 'really_long', 'status': 'running'}]}]) + + self.compare(r, assertions) + + self.client.stop_testrun_last(testset, cluster_id) + r = self.client.testruns_last(cluster_id) + + assertions.plugin_stopped['status'] = 'stopped' + assertions.really_long['status'] = 'stopped' + self.compare(r, assertions) + + def test_cant_start_while_running(self): + """Verify that you can't start new testrun for the same cluster_id while previous run is running""" + testsets = {"plugin_stopped": None, + "plugin_general": None} + cluster_id = 3 + + for testset in testsets: + self.client.start_testrun(testset, cluster_id) + self.client.testruns_last(cluster_id) + + for testset in testsets: + r = self.client.start_testrun(testset, cluster_id) + + msg = "Response {} is not empty when you try to start testrun" \ + " with testset and cluster_id that are already running".format(r) + + self.assertTrue(r.is_empty, msg) + + def test_start_many_runs(self): + """Verify that you can start 20 testruns in a row with different cluster_id""" + testset = "plugin_general" + + for cluster_id in range(100, 105): + r = self.client.start_testrun(testset, cluster_id) + msg = '{} was empty'.format(r.request) + self.assertFalse(r.is_empty, msg) + + '''TODO: Rewrite assertions to verity that all 5 testruns ended with appropriate status''' + + def test_run_single_test(self): + """Verify that you can run individual tests from given testset""" + testset = "plugin_general" + tests = ['functional.dummy_tests.general_test.Dummy_test.test_fast_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_fail'] + cluster_id = 50 + + r = self.client.start_testrun_tests(testset, tests, cluster_id) + assertions = Response([ + {'status': 'started', + 'testset': 'plugin_general', + 'tests': [ + {'status': 'disabled', 'id': 'fast_error'}, + {'status': 'wait_running', 'id': 'fast_fail'}, + {'status': 'wait_running', 'id': 'fast_pass'}, + {'status': 'disabled', 'id': 'long_pass'}]}]) + + self.compare(r, assertions) + time.sleep(2) + + r = self.client.testruns_last(cluster_id) + assertions.plugin_general['status'] = 'finished' + assertions.fast_fail['status'] = 'failure' + assertions.fast_pass['status'] = 'success' + self.compare(r, assertions) + + def test_single_test_restart(self): + """Verify that you restart individual tests for given testrun""" + testset = "plugin_general" + tests = ['functional.dummy_tests.general_test.Dummy_test.test_fast_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_fail'] + cluster_id = 60 + + self.client.start_testrun(testset, cluster_id) + + time.sleep(10) + + r = self.client.restart_tests_last(testset, tests, cluster_id) + assertions = Response([ + {'status': 'restarted', + 'testset': 'plugin_general', + 'tests': [ + {'id': 'fast_pass', 'status': 'wait_running'}, + {'id': 'long_pass', 'status': 'success'}, + {'id': 'fast_error', 'status': 'error'}, + {'id': 'fast_fail', 'status': 'wait_running'}]}]) + + self.compare(r, assertions) + time.sleep(5) + + r = self.client.testruns_last(cluster_id) + assertions.plugin_general['status'] = 'finished' + assertions.fast_pass['status'] = 'success' + assertions.fast_fail['status'] = 'failure' + + self.compare(r, assertions) + + def test_restart_combinations(self): + """Verify that you can restart both tests that ran and did not run during single test start""" + testset = "plugin_general" + tests = ['functional.dummy_tests.general_test.Dummy_test.test_fast_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_fail'] + disabled_test = ['functional.dummy_tests.general_test.Dummy_test.test_fast_error', ] + cluster_id = 70 + + self.client.start_testrun_tests(testset, tests, cluster_id) + time.sleep(5) + + self.client.restart_tests_last(testset, tests, cluster_id) + time.sleep(5) + + r = self.client.restart_tests_last(testset, disabled_test, cluster_id) + assertions = Response([ + {'status': 'restarted', + 'testset': 'plugin_general', + 'tests': [ + {'status': 'wait_running', 'id': 'fast_error'}, + {'status': 'failure', 'id': 'fast_fail'}, + {'status': 'success', 'id': 'fast_pass'}, + {'status': 'disabled', 'id': 'long_pass'}]}]) + self.compare(r, assertions) + time.sleep(5) + + r = self.client.testruns_last(cluster_id) + assertions.plugin_general['status'] = 'finished' + assertions.fast_error['status'] = 'error' + self.compare(r, assertions) + + def test_restart_during_run(self): + testset = 'plugin_general' + tests = ['functional.dummy_tests.general_test.Dummy_test.test_fast_pass', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_fail', + 'functional.dummy_tests.general_test.Dummy_test.test_fast_pass'] + cluster_id = 999 + + self.client.start_testrun(testset, cluster_id) + time.sleep(2) + + r = self.client.restart_tests_last(testset, tests, cluster_id) + msg = 'Response was not empty after trying to restart running testset:\n {}'.format(r.request) + self.assertTrue(r.is_empty, msg) + diff --git a/test_utils/commands b/test_utils/commands index d6a800d..a5fdfb6 100755 --- a/test_utils/commands +++ b/test_utils/commands @@ -2,9 +2,10 @@ function killapp { netstat -nplt | grep 8989 | grep -o [0-9]*/python | grep -o [0-9]* &> /dev/null || { echo "Not running" && return 1; } - declare app_pid=$(netstat -nplt | grep 8989 | grep -o [0-9]*/python | grep -o [0-9]*) - echo "Ostf-adapter pid is: $app_pid" - kill -9 $app_pid + while netstat -nplt | grep 8989 &> /dev/null; do + declare app_pid=$(netstat -nplt | grep 8989 | grep -o [0-9]*/python | grep -o [0-9]*) + echo "Ostf-adapter pid is: $app_pid" + kill -9 $app_pid; done } function stopapp { @@ -62,5 +63,5 @@ function migrate_db { function run_functional_tests { [[ ! -z $WORKSPACE ]] || export WORKSPACE=$(pwd) - nosetests -q functional/test_general_flow.py:adapter_tests --with-xunit --xunit-file=$WORKSPACE/functional.xml + nosetests -q functional/tests.py:AdapterTests --with-xunit --xunit-file=$WORKSPACE/reports/functional.xml } \ No newline at end of file