From 9c4d87e76bfd115c4b29980b3c85ffb897d3e023 Mon Sep 17 00:00:00 2001 From: Dmitry Tyzhnenko Date: Wed, 18 Nov 2015 16:56:09 +0200 Subject: [PATCH] Improvements of TestRail reporter for system_tests Improve behavior of upload_cases_description.py and report.py for new type of system test Implements blueprint template-based-testcases Change-Id: I965ce7159a1497840775219db4c601821f5a66b1 --- fuelweb_test/testrail/builds.py | 13 +- fuelweb_test/testrail/report.py | 121 ++++++++++++++---- fuelweb_test/testrail/testrail_client.py | 32 ++++- .../testrail/upload_cases_description.py | 30 ++++- system_test/tests/actions_base.py | 1 + system_test/tests/base_actions_factory.py | 8 +- 6 files changed, 167 insertions(+), 38 deletions(-) diff --git a/fuelweb_test/testrail/builds.py b/fuelweb_test/testrail/builds.py index 647cd6216..b269b71f1 100644 --- a/fuelweb_test/testrail/builds.py +++ b/fuelweb_test/testrail/builds.py @@ -115,15 +115,20 @@ class Build(object): logger.debug("Request build data from {}".format(build_url)) return json.load(urllib2.urlopen(build_url)) - def get_test_data(self, url): - test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json']) + def get_test_data(self, url, result_path=None): + if result_path: + test_url = "/".join( + [url.rstrip("/"), 'testReport'] + result_path + ['api/json']) + else: + test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json']) + logger.debug("Request test data from {}".format(test_url)) response = urllib2.urlopen(test_url) return json.load(response) - def test_data(self): + def test_data(self, result_path=None): try: - data = self.get_test_data(self.url) + data = self.get_test_data(self.url, result_path) except Exception as e: logger.warning("No test data for {0}: {1}".format( self.url, diff --git a/fuelweb_test/testrail/report.py b/fuelweb_test/testrail/report.py index 42fa23772..4b5b33448 100755 --- a/fuelweb_test/testrail/report.py +++ b/fuelweb_test/testrail/report.py @@ -39,8 +39,9 @@ from testrail_client import TestRailProject class TestResult(object): """TestResult.""" # TODO documentation - def __init__(self, name, group, status, duration, url=None, version=None, - description=None, comments=None, launchpad_bug=None): + def __init__(self, name, group, status, duration, url=None, + version=None, description=None, comments=None, + launchpad_bug=None, steps=None): self.name = name self.group = group self._status = status @@ -60,6 +61,7 @@ class TestResult(object): 'blocked': ['blocked'], 'custom_status2': ['in_progress'] } + self._steps = steps @property def version(self): @@ -84,6 +86,10 @@ class TestResult(object): def status(self, value): self._status = value + @property + def steps(self): + return self._steps + def __str__(self): result_dict = { 'name': self.name, @@ -229,26 +235,97 @@ def check_untested(test): def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) - for test in test_build.test_data()['suites'][0]['cases']: - if check_untested(test): - continue - check_blocked(test) - test_result = TestResult( - name=test['name'], - group=expand_test_group(test['className'], - systest_build['name'], - os), - status=test['status'].lower(), - duration='{0}s'.format(int(test['duration']) + 1), - url='{0}testReport/(root)/{1}/'.format(test_build.url, - test['name']), - version='_'.join([test_build.build_data["id"]] + - (test_build.build_data["description"] - or test['name']).split()), - description=test_build.build_data["description"] or - test['name'], - comments=test['skippedMessage'] - ) + run_test_data = test_build.test_data() + test_classes = {} + for one in run_test_data['suites'][0]['cases']: + className = one['className'] + if className not in test_classes: + test_classes[className] = {} + test_classes[className]['child'] = [] + test_classes[className]['duration'] = 0 + test_classes[className]["failCount"] = 0 + test_classes[className]["passCount"] = 0 + test_classes[className]["skipCount"] = 0 + + test_class = test_classes[className] + test_class['child'].append(one) + test_class['duration'] += float(one['duration']) + if one['status'].lower() in ('failed', 'error'): + test_class["failCount"] += 1 + if one['status'].lower() in ('passed'): + test_class["passCount"] += 1 + if one['skipped'] is True: + test_class["skipCount"] += 1 + + for klass in test_classes: + klass_result = test_classes[klass] + if len(klass_result['child']) == 1: + test = klass_result['child'][0] + if check_untested(test): + continue + check_blocked(test) + test_result = TestResult( + name=test['name'], + group=expand_test_group(test['className'], + systest_build['name'], + os), + status=test['status'].lower(), + duration='{0}s'.format(int(test['duration']) + 1), + url='{0}testReport/(root)/{1}/'.format(test_build.url, + test['name']), + version='_'.join([test_build.build_data["id"]] + + (test_build.build_data["description"] + or test['name']).split()), + description=test_build.build_data["description"] or + test['name'], + comments=test['skippedMessage'] + ) + else: + case_steps = [] + test_duration = sum( + [float(c['duration']) for c in klass_result['child']]) + steps = [c for c in klass_result['child'] + if c['name'].startswith('Step')] + steps = sorted(steps, key=lambda k: k['name']) + test_name = steps[0]['className'] + test_group = steps[0]['className'] + test_comments = None + is_test_failed = any([s['status'].lower() in ('failed', 'error') + for s in steps]) + + for step in steps: + if step['status'].lower() in ('failed', 'error'): + case_steps.append({ + "content": step['name'], + "actual": step['errorStackTrace'] or + step['errorDetails'], + "status": step['status'].lower()}) + test_comments = "{err}\n\n\n{details}".format( + err=step['errorDetails'], + stack=step['errorStackTrace']) + else: + case_steps.append({ + "content": step['name'], + "actual": "pass", + "status": step['status'].lower() + }) + test_result = TestResult( + name=test_name, + group=expand_test_group(test_group, + systest_build['name'], + os), + status='failed' if is_test_failed else 'passed', + duration='{0}s'.format(int(test_duration) + 1), + url='{0}testReport/(root)/{1}/'.format(test_build.url, + test_name), + version='_'.join([test_build.build_data["id"]] + + (test_build.build_data["description"] + or test_name).split()), + description=test_build.build_data["description"] or + test_name, + comments=test_comments, + steps=case_steps, + ) tests_results.append(test_result) return tests_results diff --git a/fuelweb_test/testrail/testrail_client.py b/fuelweb_test/testrail/testrail_client.py index 9b5f226ea..dda4f70c5 100644 --- a/fuelweb_test/testrail/testrail_client.py +++ b/fuelweb_test/testrail/testrail_client.py @@ -365,6 +365,8 @@ class TestRailProject(object): 'elapsed': test_results.duration, 'version': test_results.version } + if test_results.steps: + new_results['custom_step_results'] = test_results.steps return self.client.send_post(add_results_test_uri, new_results) def add_results_for_cases(self, run_id, suite_id, tests_results): @@ -373,10 +375,12 @@ class TestRailProject(object): new_results = {'results': []} tests_cases = self.get_cases(suite_id) for results in tests_results: + case = self.get_case_by_group(suite_id=suite_id, + group=results.group, + cases=tests_cases) + case_id = case['id'] new_result = { - 'case_id': self.get_case_by_group(suite_id=suite_id, - group=results.group, - cases=tests_cases)['id'], + 'case_id': case_id, 'status_id': self.get_status(results.status)['id'], 'comment': '\n'.join(filter(lambda x: x is not None, [results.description, @@ -386,6 +390,28 @@ class TestRailProject(object): 'version': results.version, 'custom_launchpad_bug': results.launchpad_bug } + if results.steps: + custom_step_results = [] + steps = case.get('custom_test_case_steps', None) + if steps and len(steps) == len(results.steps): + steps = zip(steps, results.steps) + for s in steps: + custom_step_results.append({ + "content": s[0]["content"], + "expected": s[0]["expected"], + "actual": s[1]['actual'], + "status_id": self.get_status(s[1]['status'])['id'] + }) + else: + for s in results.steps: + custom_step_results.append({ + "content": s['name'], + "expected": 'pass', + "actual": s['actual'], + "status_id": self.get_status(s['status'])['id'] + }) + new_result['custom_test_case_steps_results'] = \ + custom_step_results new_results['results'].append(new_result) return self.client.send_post(add_results_test_uri, new_results) diff --git a/fuelweb_test/testrail/upload_cases_description.py b/fuelweb_test/testrail/upload_cases_description.py index a0f49bb80..3ee8c0018 100644 --- a/fuelweb_test/testrail/upload_cases_description.py +++ b/fuelweb_test/testrail/upload_cases_description.py @@ -28,6 +28,7 @@ from testrail_client import TestRailProject def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups, default_test_priority): + from system_test.tests.actions_base import ActionsBase import_tests() tests = [] @@ -37,22 +38,40 @@ def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups, for case in TestProgram(groups=[group]).cases: if not case.entry.info.enabled: continue + home = case.entry.home + parent_home = case.entry.parent.home + case_state = case.state + if issubclass(parent_home, ActionsBase): + case_name = parent_home.__name__ + test_group = parent_home.__name__ + if any([x['custom_test_group'] == test_group for x in tests]): + continue + else: + case_name = home.func_name + test_group = case.entry.home.func_name if tests_include: - if tests_include not in case.entry.home.func_name: + if tests_include not in case_name: logger.debug("Skipping '{0}' test because it doesn't " "contain '{1}' in method name" - .format(case.entry.home.func_name, + .format(case_name, tests_include)) continue if tests_exclude: - if tests_exclude in case.entry.home.func_name: + if tests_exclude in case_name: logger.debug("Skipping '{0}' test because it contains" " '{1}' in method name" - .format(case.entry.home.func_name, + .format(case_name, tests_exclude)) continue - docstring = case.entry.home.func_doc or '' + if issubclass(parent_home, ActionsBase): + docstring = parent_home.__doc__.split('\n') + configuration = case_state.instance.config_name + docstring[0] = "{0} on {1}".format(docstring[0], configuration) + docstring = '\n'.join(docstring) + else: + docstring = home.func_doc or '' + configuration = None docstring = '\n'.join([s.strip() for s in docstring.split('\n')]) steps = [{"content": s, "expected": "pass"} for s in @@ -60,7 +79,6 @@ def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups, test_duration = re.search(r'Duration\s+(\d+[s,m])\b', docstring) title = docstring.split('\n')[0] or case.entry.home.func_name - test_group = case.entry.home.func_name if case.entry.home.func_name in GROUPS_TO_EXPAND: """Expand specified test names with the group names that are diff --git a/system_test/tests/actions_base.py b/system_test/tests/actions_base.py index 4230bb495..a54e63f75 100644 --- a/system_test/tests/actions_base.py +++ b/system_test/tests/actions_base.py @@ -143,6 +143,7 @@ class ActionsBase(PrepareBase): stop_deploy - stop deploying of environment (NotImplemented) """ + est_duration = None base_group = None actions_order = None diff --git a/system_test/tests/base_actions_factory.py b/system_test/tests/base_actions_factory.py index 3d889a61b..3b8a89359 100644 --- a/system_test/tests/base_actions_factory.py +++ b/system_test/tests/base_actions_factory.py @@ -46,7 +46,7 @@ class BaseActionsFactory(base_test_case.TestBasic): # Generate human readable class_name, if was method docstring not # described, use generated name - class_name = "Case_{}_{}".format(cls.__name__, case_group) + class_name = "Case_{}__Config_{}".format(cls.__name__, case_group) # Make methods for new testcase class, following by order scenario.append(" Scenario:") @@ -108,8 +108,10 @@ class BaseActionsFactory(base_test_case.TestBasic): groups = cls.base_group + groups # Generate test case docstring - test_steps["__doc__"] = "{}\n{}".format(cls.__doc__.splitlines()[0], - '\n'.join(scenario)) + test_steps["__doc__"] = "{}\n\n{}\n\nDuration {}".format( + cls.__doc__.splitlines()[0], + '\n'.join(scenario), + getattr(cls, 'est_duration', '180m') or '180m') ret = test( type(class_name, (cls,), test_steps), groups=groups)