Implement subunit_parser
subunit2json module was implemented based on subunit2html module, which cause several issues(see attached bugs for more details). The main approach of new parser: it designed to be used from Python code, not as live-filter for a stream. Since such parser can be used not only for verification, the implementation is moved to common dir. Also, this patch: - removes fake log, which was left from xml parser; - removes all db calls(except list call) from cli.verify module. Closes-Bug: #1456810 Closes-Bug: #1471400 Change-Id: Ifcfce827a0df1e75f58e198ee899d4bb85ca9376
This commit is contained in:
parent
695abe2f63
commit
c91b7e24ff
@ -179,7 +179,7 @@ class VerifyCommands(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
results = db.verification_result_get(verification_uuid)["data"]
|
||||
results = objects.Verification.get(verification_uuid).get_results()
|
||||
except exceptions.NotFoundException as e:
|
||||
print(six.text_type(e))
|
||||
return 1
|
||||
@ -218,8 +218,8 @@ class VerifyCommands(object):
|
||||
return 1
|
||||
|
||||
try:
|
||||
verification = db.verification_get(verification_uuid)
|
||||
tests = db.verification_result_get(verification_uuid)
|
||||
verification = objects.Verification.get(verification_uuid)
|
||||
tests = verification.get_results()
|
||||
except exceptions.NotFoundException as e:
|
||||
print(six.text_type(e))
|
||||
return 1
|
||||
@ -233,21 +233,19 @@ class VerifyCommands(object):
|
||||
fields = ["name", "time", "status"]
|
||||
|
||||
values = [objects.Verification(test)
|
||||
for test in six.itervalues(tests.data["test_cases"])]
|
||||
for test in six.itervalues(tests["test_cases"])]
|
||||
cliutils.print_list(values, fields, sortby_index=sortby_index)
|
||||
|
||||
if detailed:
|
||||
for test in six.itervalues(tests.data["test_cases"]):
|
||||
if test["status"] == "FAIL":
|
||||
for test in six.itervalues(tests["test_cases"]):
|
||||
if test["status"] == "fail":
|
||||
header = cliutils.make_header(
|
||||
"FAIL: %(name)s\n"
|
||||
"Time: %(time)s\n"
|
||||
"Type: %(type)s" % {"name": test["name"],
|
||||
"time": test["time"],
|
||||
"type": test["failure"]["type"]})
|
||||
"Time: %(time)s" % {"name": test["name"],
|
||||
"time": test["time"]})
|
||||
formatted_test = "%(header)s%(log)s\n" % {
|
||||
"header": header,
|
||||
"log": test["failure"]["log"]}
|
||||
"log": test["traceback"]}
|
||||
print (formatted_test)
|
||||
|
||||
@cliutils.args("--uuid", dest="verification_uuid", type=str,
|
||||
@ -294,9 +292,9 @@ class VerifyCommands(object):
|
||||
"""
|
||||
|
||||
try:
|
||||
results1 = db.verification_result_get(uuid1)["data"]["test_cases"]
|
||||
results2 = db.verification_result_get(uuid2)["data"]["test_cases"]
|
||||
_diff = diff.Diff(results1, results2, threshold)
|
||||
res_1 = objects.Verification.get(uuid1).get_results()["test_cases"]
|
||||
res_2 = objects.Verification.get(uuid2).get_results()["test_cases"]
|
||||
_diff = diff.Diff(res_1, res_2, threshold)
|
||||
except exceptions.NotFoundException as e:
|
||||
print(six.text_type(e))
|
||||
return 1
|
||||
@ -331,7 +329,7 @@ class VerifyCommands(object):
|
||||
:param verification: a UUID of verification
|
||||
"""
|
||||
print("Verification UUID: %s" % verification)
|
||||
db.verification_get(verification)
|
||||
objects.Verification.get(verification)
|
||||
fileutils.update_globals_file("RALLY_VERIFICATION", verification)
|
||||
|
||||
@cliutils.args("--deployment", dest="deployment", type=str,
|
||||
|
@ -207,6 +207,8 @@ class Verification(BASE, RallyBase):
|
||||
set_name = sa.Column(sa.String(20))
|
||||
|
||||
tests = sa.Column(sa.Integer, default=0)
|
||||
# TODO(andreykurilin): remove this variable, when rally will support db
|
||||
# migrations. Reason: It is not used anywhere :)
|
||||
errors = sa.Column(sa.Integer, default=0)
|
||||
failures = sa.Column(sa.Integer, default=0)
|
||||
time = sa.Column(sa.Float, default=0.0)
|
||||
|
0
rally/common/io/__init__.py
Normal file
0
rally/common/io/__init__.py
Normal file
188
rally/common/io/subunit_v2.py
Normal file
188
rally/common/io/subunit_v2.py
Normal file
@ -0,0 +1,188 @@
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_utils import encodeutils
|
||||
import six
|
||||
from subunit import v2
|
||||
|
||||
|
||||
def total_seconds(td):
|
||||
"""Return the total number of seconds contained in the duration.
|
||||
|
||||
NOTE(andreykurilin): python 2.6 compatible method
|
||||
"""
|
||||
if hasattr(td, "total_seconds"):
|
||||
s = td.total_seconds()
|
||||
else:
|
||||
# NOTE(andreykurilin): next calculation is proposed in python docs
|
||||
# https://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds
|
||||
s = (td.microseconds +
|
||||
(td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6
|
||||
return "%.5f" % s
|
||||
|
||||
|
||||
def preparse_input_args(func):
|
||||
def inner(self, test_id=None, test_status=None, test_tags=None,
|
||||
runnable=True, file_name=None, file_bytes=None, eof=False,
|
||||
mime_type=None, route_code=None, timestamp=None):
|
||||
# NOTE(andreykurilin): Variables 'runnable', 'eof', 'route_code' are
|
||||
# not used in parser. Variable 'test_tags' is used to store workers
|
||||
# info, which is not helpful in parser.
|
||||
|
||||
if not test_id:
|
||||
return
|
||||
|
||||
if (test_id.startswith("setUpClass (") or
|
||||
test_id.startswith("tearDown (")):
|
||||
test_id = test_id[test_id.find("(") + 1:-1]
|
||||
if test_id.find("[") > -1:
|
||||
test_id, tags = test_id.split("[")
|
||||
tags = tags[:-1].split(",")
|
||||
else:
|
||||
tags = []
|
||||
|
||||
if mime_type:
|
||||
mime_type, charset = mime_type.split("; ")[:2]
|
||||
charset = charset.split("=")[1]
|
||||
else:
|
||||
charset = None
|
||||
|
||||
func(self, test_id, test_status, tags, file_name, file_bytes,
|
||||
mime_type, timestamp, charset)
|
||||
return inner
|
||||
|
||||
|
||||
class SubunitV2StreamResult(object):
|
||||
"""A test result for reporting the activity of a test run."""
|
||||
|
||||
def __init__(self):
|
||||
self._tests = {}
|
||||
self._total_counts = {
|
||||
"fail": 0,
|
||||
"skip": 0,
|
||||
"success": 0,
|
||||
"uxsuccess": 0,
|
||||
"xfail": 0}
|
||||
self._timestaps = {}
|
||||
# NOTE(andreykurilin): _first_timestamp and _last_timestamp vars are
|
||||
# designed to calculate total time of tests executions
|
||||
self._first_timestamp = None
|
||||
self._last_timestamp = None
|
||||
# let's save unknown entities and process them after main test case
|
||||
self._unknown_entities = {}
|
||||
self._is_parsed = False
|
||||
|
||||
def _post_parse(self):
|
||||
# parse unknown entities
|
||||
for test_id in self._unknown_entities:
|
||||
# NOTE(andreykurilin): When whole TestCase is marked as skipped,
|
||||
# there is only one event with reason and status, so we should
|
||||
# modify all tests of TestCase manually.
|
||||
matcher = lambda i: i == test_id or i.startswith("%s." % test_id)
|
||||
known_ids = filter(matcher, self._tests)
|
||||
for id_ in known_ids:
|
||||
if self._tests[id_]["status"] == "init":
|
||||
self._tests[id_]["status"] = (
|
||||
self._unknown_entities[test_id]["status"])
|
||||
if self._unknown_entities[test_id].get("reason"):
|
||||
self._tests[id_]["reason"] = (
|
||||
self._unknown_entities[test_id]["reason"])
|
||||
elif self._unknown_entities[test_id].get("traceback"):
|
||||
self._tests[id_]["traceback"] = (
|
||||
self._unknown_entities[test_id]["traceback"])
|
||||
|
||||
# decode data
|
||||
for test_id in self._tests:
|
||||
for file_name in ["traceback", "reason"]:
|
||||
# FIXME(andreykurilin): decode fields based on mime_type
|
||||
if file_name in self._tests[test_id]:
|
||||
self._tests[test_id][file_name] = (
|
||||
encodeutils.safe_decode(
|
||||
self._tests[test_id][file_name]))
|
||||
self._is_parsed = True
|
||||
|
||||
@property
|
||||
def tests(self):
|
||||
if not self._is_parsed:
|
||||
self._post_parse()
|
||||
return self._tests
|
||||
|
||||
@property
|
||||
def total(self):
|
||||
return {"tests": len(self.tests),
|
||||
"time": total_seconds(
|
||||
self._last_timestamp - self._first_timestamp),
|
||||
"failures": self._total_counts["fail"],
|
||||
"skipped": self._total_counts["skip"],
|
||||
"success": self._total_counts["success"],
|
||||
"unexpected_success": self._total_counts["uxsuccess"],
|
||||
"expected_failures": self._total_counts["xfail"]}
|
||||
|
||||
@preparse_input_args
|
||||
def status(self, test_id=None, test_status=None, tags=None,
|
||||
file_name=None, file_bytes=None, mime_type=None,
|
||||
timestamp=None, charset=None):
|
||||
|
||||
if test_status == "exists":
|
||||
self._tests[test_id] = {"status": "init",
|
||||
"name": test_id,
|
||||
"time": 0.0}
|
||||
if tags:
|
||||
self._tests[test_id]["tags"] = tags
|
||||
elif test_id in self._tests:
|
||||
if test_status == "inprogress":
|
||||
if not self._first_timestamp:
|
||||
self._first_timestamp = timestamp
|
||||
self._timestaps[test_id] = timestamp
|
||||
elif test_status:
|
||||
self._tests[test_id]["time"] = total_seconds(
|
||||
timestamp - self._timestaps[test_id])
|
||||
self._tests[test_id]["status"] = test_status
|
||||
self._total_counts[test_status] += 1
|
||||
else:
|
||||
if file_name in ["traceback", "reason"]:
|
||||
if file_name not in self._tests[test_id]:
|
||||
self._tests[test_id][file_name] = file_bytes
|
||||
else:
|
||||
self._tests[test_id][file_name] += file_bytes
|
||||
else:
|
||||
self._unknown_entities.setdefault(test_id, {"name": test_id})
|
||||
self._unknown_entities[test_id]["status"] = test_status
|
||||
if file_name in ["traceback", "reason"]:
|
||||
if file_name not in self._unknown_entities[test_id]:
|
||||
self._unknown_entities[test_id][file_name] = file_bytes
|
||||
else:
|
||||
self._unknown_entities[test_id][file_name] += file_bytes
|
||||
|
||||
if timestamp:
|
||||
self._last_timestamp = timestamp
|
||||
|
||||
def filter_tests(self, status):
|
||||
"""Filter results by given status."""
|
||||
filtered_tests = {}
|
||||
for test in self.tests:
|
||||
if self.tests[test]["status"] == status:
|
||||
filtered_tests[test] = self.tests[test]
|
||||
|
||||
return filtered_tests
|
||||
|
||||
|
||||
def parse_results_file(filename):
|
||||
with open(filename, "rb") as source:
|
||||
results = SubunitV2StreamResult()
|
||||
v2.ByteStreamToStreamResult(
|
||||
source=source, non_subunit_name=six.text_type).run(results)
|
||||
return results
|
@ -18,6 +18,13 @@ from rally import consts
|
||||
from rally import exceptions
|
||||
|
||||
|
||||
_MAP_OLD_TO_NEW_STATUSES = {
|
||||
"OK": "success",
|
||||
"FAIL": "fail",
|
||||
"SKIP": "skip"
|
||||
}
|
||||
|
||||
|
||||
class Verification(object):
|
||||
"""Represents results of verification."""
|
||||
|
||||
@ -57,7 +64,13 @@ class Verification(object):
|
||||
|
||||
def finish_verification(self, total, test_cases):
|
||||
# update verification db object
|
||||
self._update(status=consts.TaskStatus.FINISHED, **total)
|
||||
self._update(status=consts.TaskStatus.FINISHED,
|
||||
tests=total["tests"],
|
||||
# Expected failures are still failures, so we should
|
||||
# merge them together in main info of Verification
|
||||
# (see db model for Verification for more details)
|
||||
failures=(total["failures"] + total["expected_failures"]),
|
||||
time=total["time"])
|
||||
|
||||
# create db object for results
|
||||
data = total.copy()
|
||||
@ -66,6 +79,24 @@ class Verification(object):
|
||||
|
||||
def get_results(self):
|
||||
try:
|
||||
return db.verification_result_get(self.uuid)
|
||||
results = db.verification_result_get(self.uuid).data
|
||||
except exceptions.NotFoundException:
|
||||
return None
|
||||
|
||||
if "errors" in results:
|
||||
# NOTE(andreykurilin): there is no "error" status in verification
|
||||
# and this key presents only in old format, so it can be used as
|
||||
# an identifier for old format.
|
||||
for test in results["test_cases"].keys():
|
||||
old_status = results["test_cases"][test]["status"]
|
||||
new_status = _MAP_OLD_TO_NEW_STATUSES.get(old_status,
|
||||
old_status.lower())
|
||||
results["test_cases"][test]["status"] = new_status
|
||||
|
||||
if "failure" in results["test_cases"][test]:
|
||||
results["test_cases"][test]["traceback"] = results[
|
||||
"test_cases"][test]["failure"]["log"]
|
||||
results["test_cases"][test].pop("failure")
|
||||
results["unexpected_success"] = 0
|
||||
results["expected_failures"] = 0
|
||||
return results
|
||||
|
@ -15,11 +15,16 @@
|
||||
|
||||
.nav { margin: 15px 0 }
|
||||
.nav span { padding:1px 15px; margin:0 2px 0 0; cursor:pointer; background:#f3f3f3;
|
||||
color:#666; font-size:11px; border:2px #ddd solid; border-radius:10px }
|
||||
color: black; font-size:12px; border:2px #ddd solid; border-radius:10px }
|
||||
.nav span.active { background:#cfe3ff; border-color:#ace; color:#369 }
|
||||
|
||||
table td { padding:4px 8px; word-wrap:break-word; word-break:break-all }
|
||||
table.stat { width:auto; margin:0 0 15px }
|
||||
td.not_break_column {word-break:keep-all}
|
||||
|
||||
.status-success, .status-success td { color:green }
|
||||
.status-uxsuccess, .status-uxsuccess td { color:orange }
|
||||
.status-xfail, .status-xfail td { color:#CCCC00}
|
||||
</%block>
|
||||
|
||||
<%block name="css_content_wrap">
|
||||
@ -46,28 +51,34 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Total
|
||||
<th>Pass
|
||||
<th>Fail
|
||||
<th>Error
|
||||
<th>Skip
|
||||
<th>Total time
|
||||
<th>Success
|
||||
<th>Fails
|
||||
<th>Unexpected Success
|
||||
<th>Expected Fails
|
||||
<th>Skipped
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>${report['total']}
|
||||
<td>${report['passed']}
|
||||
<td>${report['failed']}
|
||||
<td>${report['errors']}
|
||||
<td>${report['time']}
|
||||
<td>${report['success']}
|
||||
<td>${report['failures']}
|
||||
<td>${report['unexpected_success']}
|
||||
<td>${report['expected_failures']}
|
||||
<td>${report['skipped']}
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<div class="nav">
|
||||
<span data-navselector=".test-row">ALL</span>
|
||||
<span data-navselector=".status-fail">FAILED</span>
|
||||
<span data-navselector=".status-skip">SKIPPED</span>
|
||||
<span data-navselector=".status-pass">PASSED</span>
|
||||
<span data-navselector=".test-row">all</span>
|
||||
<span data-navselector=".status-success">success</span>
|
||||
<span data-navselector=".status-fail">failed</span>
|
||||
<span data-navselector=".status-uxsuccess">uxsuccess</span>
|
||||
<span data-navselector=".status-xfail">xfailed</span>
|
||||
<span data-navselector=".status-skip">skipped</span>
|
||||
</div>
|
||||
|
||||
<table id="tests">
|
||||
@ -81,9 +92,9 @@
|
||||
<tbody>
|
||||
% for test in report['tests']:
|
||||
<tr id="${test['id']}" class="test-row status-${test['status']}">
|
||||
<td>${test['status']}
|
||||
<td>${test['time']}
|
||||
<td colspan="5">${test['desc']}
|
||||
<td class="not_break_column">${test['status']}
|
||||
<td class="not_break_column">${test['time']}
|
||||
<td colspan="5">${test['name']}
|
||||
</tr>
|
||||
% if 'output' in test:
|
||||
<tr class="test-details-row">
|
||||
@ -110,7 +121,7 @@
|
||||
$navs.click(function(){
|
||||
var $this = $(this);
|
||||
$navs.removeClass("active").filter($this).addClass("active");
|
||||
$("#tests tbody tr").hide().filter($this.attr("data-navselector")).show()
|
||||
$("#tests tbody tr").hide().filter($this.attr("data-navselector")).show();
|
||||
}).first().click()
|
||||
}($(".nav [data-navselector]")));
|
||||
})
|
||||
|
@ -42,11 +42,9 @@ class Diff(object):
|
||||
|
||||
Typical test case json schema:
|
||||
"test_case_key": {
|
||||
"failure": {
|
||||
"log": ""
|
||||
},
|
||||
"traceback": "", # exists only for "fail" status
|
||||
"reason": "", # exists only for "skip" status
|
||||
"name": "",
|
||||
"output": "",
|
||||
"status": "",
|
||||
"time": 0.0
|
||||
}
|
||||
@ -69,15 +67,15 @@ class Diff(object):
|
||||
return diffs
|
||||
|
||||
def _diff_values(self, name, result1, result2):
|
||||
fields = ["status", "time", "output"]
|
||||
fields = ["status", "time", "traceback", "reason"]
|
||||
diffs = []
|
||||
for field in fields:
|
||||
val1 = result1[field]
|
||||
val2 = result2[field]
|
||||
val1 = result1.get(field, 0)
|
||||
val2 = result2.get(field, 0)
|
||||
if val1 != val2:
|
||||
if field == "time":
|
||||
max_ = max(val1, val2)
|
||||
min_ = min(val1, val2)
|
||||
max_ = max(float(val1), float(val2))
|
||||
min_ = min(float(val1), float(val2))
|
||||
time_threshold = ((max_ - min_) / (min_ or 1)) * 100
|
||||
if time_threshold < self.threshold:
|
||||
continue
|
||||
|
@ -11,41 +11,44 @@
|
||||
# under the License.
|
||||
|
||||
from rally.ui import utils as ui_utils
|
||||
from rally.verification.tempest import subunit2json
|
||||
|
||||
|
||||
STATUS_MAP = {subunit2json.STATUS_PASS: "pass",
|
||||
subunit2json.STATUS_SKIP: "skip",
|
||||
subunit2json.STATUS_FAIL: "fail",
|
||||
subunit2json.STATUS_ERROR: "error"}
|
||||
|
||||
|
||||
class HtmlOutput(object):
|
||||
"""Output test results in HTML."""
|
||||
|
||||
def __init__(self, results):
|
||||
self.num_passed = results["success"]
|
||||
self.num_failed = results["failures"]
|
||||
self.num_errors = results["errors"]
|
||||
self.num_skipped = results["skipped"]
|
||||
self.num_total = results["tests"]
|
||||
self.results = results["test_cases"]
|
||||
self.results = results
|
||||
|
||||
def _generate_report(self):
|
||||
tests = []
|
||||
for i, name in enumerate(sorted(self.results)):
|
||||
test = self.results[name]
|
||||
log = test.get("failure", {}).get("log", "")
|
||||
status = STATUS_MAP.get(test["status"])
|
||||
for i, name in enumerate(sorted(self.results["test_cases"])):
|
||||
test = self.results["test_cases"][name]
|
||||
if "tags" in test:
|
||||
name = "%(name)s [%(tags)s]" % {
|
||||
"name": name, "tags": ", ".join(test["tags"])}
|
||||
|
||||
if "traceback" in test:
|
||||
output = test["traceback"]
|
||||
elif "reason" in test:
|
||||
output = test["reason"]
|
||||
else:
|
||||
output = ""
|
||||
|
||||
tests.append({"id": i,
|
||||
"time": test["time"],
|
||||
"desc": name,
|
||||
"output": test["output"] + log,
|
||||
"status": status})
|
||||
"name": name,
|
||||
"output": output,
|
||||
"status": test["status"]})
|
||||
|
||||
return dict(tests=tests, total=self.num_total,
|
||||
passed=self.num_passed, failed=self.num_failed,
|
||||
errors=self.num_errors, skipped=self.num_skipped)
|
||||
return {
|
||||
"tests": tests,
|
||||
"total": self.results["tests"],
|
||||
"time": self.results["time"],
|
||||
"success": self.results["success"],
|
||||
"failures": self.results["failures"],
|
||||
"skipped": self.results["skipped"],
|
||||
"expected_failures": self.results["expected_failures"],
|
||||
"unexpected_success": self.results["unexpected_success"]}
|
||||
|
||||
def create_report(self):
|
||||
template = ui_utils.get_template("verification/report.mako")
|
||||
|
@ -1,181 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import errno
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
import subunit
|
||||
import testtools
|
||||
|
||||
|
||||
STATUS_PASS = "OK"
|
||||
STATUS_SKIP = "SKIP"
|
||||
STATUS_FAIL = "FAIL"
|
||||
STATUS_ERROR = "ERROR"
|
||||
|
||||
|
||||
class JsonOutput(testtools.TestResult):
|
||||
"""Output test results in Json."""
|
||||
|
||||
def __init__(self, results_file):
|
||||
super(JsonOutput, self).__init__()
|
||||
self.success_count = 0
|
||||
self.failure_count = 0
|
||||
self.error_count = 0
|
||||
self.skip_count = 0
|
||||
self.total_time = 0
|
||||
self.test_cases = {}
|
||||
self.results_file = results_file
|
||||
|
||||
def _format_result(self, name, time, status, output, failure=None):
|
||||
# We do not need `setUpClass' in test name
|
||||
if name[:12] == "setUpClass (" and name[-1] == ")":
|
||||
name = name[12:-1]
|
||||
|
||||
self.test_cases[name] = {"name": name, "status": status,
|
||||
"time": time, "output": output}
|
||||
if failure:
|
||||
self.test_cases[name].update({"failure": failure})
|
||||
|
||||
def _test_time(self, before, after):
|
||||
return timeutils.delta_seconds(before, after)
|
||||
|
||||
def addSuccess(self, test):
|
||||
self.success_count += 1
|
||||
test_time = self._test_time(test._timestamps[0],
|
||||
test._timestamps[1])
|
||||
self.total_time += test_time
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
self._format_result(test.id(), test_time, STATUS_PASS, output)
|
||||
|
||||
def addSkip(self, test, err):
|
||||
output = test.shortDescription()
|
||||
test_time = self._test_time(test._timestamps[0],
|
||||
test._timestamps[1])
|
||||
self.total_time += test_time
|
||||
|
||||
if output is None:
|
||||
output = test.id()
|
||||
self.skip_count += 1
|
||||
self._format_result(test.id(), test_time, STATUS_SKIP, output)
|
||||
|
||||
def addError(self, test, err):
|
||||
output = test.shortDescription()
|
||||
test_time = self._test_time(test._timestamps[0],
|
||||
test._timestamps[1])
|
||||
self.total_time += test_time
|
||||
if output is None:
|
||||
output = test.id()
|
||||
else:
|
||||
self.error_count += 1
|
||||
_exc_str = self.formatErr(err)
|
||||
failure_type = "%s.%s" % (err[0].__module__, err[1].__name__)
|
||||
self._format_result(test.id(), test_time, STATUS_ERROR, output,
|
||||
failure={"type": failure_type,
|
||||
"log": _exc_str})
|
||||
|
||||
def addFailure(self, test, err):
|
||||
self.failure_count += 1
|
||||
test_time = self._test_time(test._timestamps[0],
|
||||
test._timestamps[1])
|
||||
self.total_time += test_time
|
||||
_exc_str = self.formatErr(err)
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
failure_type = "%s.%s" % (err[0].__module__, err[0].__name__)
|
||||
self._format_result(test.id(), test_time, STATUS_FAIL, output,
|
||||
failure={"type": failure_type, "log": _exc_str})
|
||||
|
||||
def formatErr(self, err):
|
||||
exctype, value, tb = err
|
||||
return "".join(traceback.format_exception(exctype, value, tb))
|
||||
|
||||
def stopTestRun(self):
|
||||
super(JsonOutput, self).stopTestRun()
|
||||
self.stopTime = datetime.datetime.now()
|
||||
total_count = (self.success_count + self.failure_count +
|
||||
self.error_count + self.skip_count)
|
||||
total = {"tests": total_count, "errors": self.error_count,
|
||||
"skipped": self.skip_count, "success": self.success_count,
|
||||
"failures": self.failure_count, "time": self.total_time}
|
||||
if self.results_file:
|
||||
with open(self.results_file, "wb") as results_file:
|
||||
output = jsonutils.dumps({"total": total,
|
||||
"test_cases": self.test_cases})
|
||||
results_file.write(output)
|
||||
|
||||
def startTestRun(self):
|
||||
super(JsonOutput, self).startTestRun()
|
||||
|
||||
|
||||
class FileAccumulator(testtools.StreamResult):
|
||||
|
||||
def __init__(self):
|
||||
super(FileAccumulator, self).__init__()
|
||||
self.route_codes = collections.defaultdict(io.BytesIO)
|
||||
|
||||
def status(self, **kwargs):
|
||||
if kwargs.get("file_name") != "stdout":
|
||||
return
|
||||
file_bytes = kwargs.get("file_bytes")
|
||||
if not file_bytes:
|
||||
return
|
||||
route_code = kwargs.get("route_code")
|
||||
stream = self.route_codes[route_code]
|
||||
stream.write(file_bytes)
|
||||
|
||||
|
||||
def main(subunit_log_file):
|
||||
fd, results_file = tempfile.mkstemp()
|
||||
result = JsonOutput(results_file)
|
||||
stream = open(subunit_log_file, "rb")
|
||||
|
||||
# Feed the subunit stream through both a V1 and V2 parser.
|
||||
# Depends on having the v2 capable libraries installed.
|
||||
# First V2.
|
||||
# Non-v2 content and captured non-test output will be presented as file
|
||||
# segments called stdout.
|
||||
suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name="stdout")
|
||||
# The JSON output code is in legacy mode.
|
||||
raw_result = testtools.StreamToExtendedDecorator(result)
|
||||
# Divert non-test output
|
||||
accumulator = FileAccumulator()
|
||||
result = testtools.StreamResultRouter(raw_result)
|
||||
result.add_rule(accumulator, "test_id", test_id=None)
|
||||
result.startTestRun()
|
||||
suite.run(result)
|
||||
# Now reprocess any found stdout content as V1 subunit
|
||||
for bytes_io in accumulator.route_codes.values():
|
||||
bytes_io.seek(0)
|
||||
suite = subunit.ProtocolTestCase(bytes_io)
|
||||
suite.run(result)
|
||||
result.stopTestRun()
|
||||
with open(results_file, "rb") as temp_results_file:
|
||||
data = temp_results_file.read()
|
||||
try:
|
||||
os.unlink(results_file)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
return data
|
@ -20,16 +20,16 @@ import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
from rally.common import costilius
|
||||
from rally.common.i18n import _
|
||||
from rally.common.io import subunit_v2
|
||||
from rally.common import log as logging
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from rally.verification.tempest import config
|
||||
from rally.verification.tempest import subunit2json
|
||||
|
||||
|
||||
TEMPEST_SOURCE = "https://git.openstack.org/openstack/tempest"
|
||||
|
||||
@ -378,19 +378,18 @@ class Tempest(object):
|
||||
"""Parse subunit raw log file."""
|
||||
log_file_raw = log_file or self.log_file_raw
|
||||
if os.path.isfile(log_file_raw):
|
||||
data = jsonutils.loads(subunit2json.main(log_file_raw))
|
||||
return data["total"], data["test_cases"]
|
||||
return subunit_v2.parse_results_file(log_file_raw)
|
||||
else:
|
||||
LOG.error("JSON-log file not found.")
|
||||
return None, None
|
||||
return None
|
||||
|
||||
@logging.log_verification_wrapper(
|
||||
LOG.info, _("Saving verification results."))
|
||||
def _save_results(self, log_file=None):
|
||||
total, test_cases = self.parse_results(log_file)
|
||||
if total and test_cases and self.verification:
|
||||
self.verification.finish_verification(total=total,
|
||||
test_cases=test_cases)
|
||||
results = self.parse_results(log_file)
|
||||
if results and self.verification:
|
||||
self.verification.finish_verification(total=results.total,
|
||||
test_cases=results.tests)
|
||||
else:
|
||||
self.verification.set_failed()
|
||||
|
||||
|
@ -153,103 +153,94 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
"Created at"))
|
||||
|
||||
@mock.patch("rally.cli.cliutils.print_list")
|
||||
@mock.patch("rally.common.db.verification_get")
|
||||
@mock.patch("rally.common.db.verification_result_get")
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
def test_show(self, mock_objects_verification,
|
||||
mock_verification_result_get, mock_verification_get,
|
||||
mock_print_list):
|
||||
def test_show(self, mock_verification, mock_print_list):
|
||||
verification = mock_verification.get.return_value
|
||||
|
||||
class Test_dummy():
|
||||
data = {"test_cases": {"test_a": {"name": "test_a", "time": 20,
|
||||
"status": "PASS"},
|
||||
"test_b": {"name": "test_b", "time": 20,
|
||||
"status": "SKIP"},
|
||||
"test_c": {"name": "test_c", "time": 20,
|
||||
"status": "FAIL"}}}
|
||||
tests = {"test_cases": {"test_a": {"name": "test_a", "time": 20,
|
||||
"status": "success"},
|
||||
"test_b": {"name": "test_b", "time": 20,
|
||||
"status": "skip"},
|
||||
"test_c": {"name": "test_c", "time": 20,
|
||||
"status": "fail"}}}
|
||||
|
||||
verification_id = "39121186-b9a4-421d-b094-6c6b270cf9e9"
|
||||
total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
|
||||
"Failures", "Created at", "Status"]
|
||||
fields = ["name", "time", "status"]
|
||||
verification = mock.MagicMock()
|
||||
tests = Test_dummy()
|
||||
mock_verification_result_get.return_value = tests
|
||||
mock_verification_get.return_value = verification
|
||||
mock_objects_verification.return_value = 1
|
||||
verification.get_results.return_value = tests
|
||||
values = [objects.Verification(t)
|
||||
for t in six.itervalues(tests.data["test_cases"])]
|
||||
for t in six.itervalues(tests["test_cases"])]
|
||||
self.verify.show(verification_id)
|
||||
self.assertEqual([mock.call([verification], fields=total_fields),
|
||||
mock.call(values, fields, sortby_index=0)],
|
||||
mock_print_list.call_args_list)
|
||||
mock_verification_get.assert_called_once_with(verification_id)
|
||||
mock_verification_result_get.assert_called_once_with(verification_id)
|
||||
mock_verification.get.assert_called_once_with(verification_id)
|
||||
verification.get_results.assert_called_once_with()
|
||||
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
return_value={"data": {}})
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
@mock.patch("json.dumps")
|
||||
def test_results(self, mock_json_dumps, mock_verification_result_get):
|
||||
def test_results(self, mock_json_dumps, mock_verification):
|
||||
mock_verification.get.return_value.get_results.return_value = {}
|
||||
verification_uuid = "a0231bdf-6a4e-4daf-8ab1-ae076f75f070"
|
||||
self.verify.results(verification_uuid, output_html=False,
|
||||
output_json=True)
|
||||
|
||||
mock_verification_result_get.assert_called_once_with(verification_uuid)
|
||||
mock_verification.get.assert_called_once_with(verification_uuid)
|
||||
mock_json_dumps.assert_called_once_with({}, sort_keys=True, indent=4)
|
||||
|
||||
@mock.patch("rally.common.db.verification_result_get")
|
||||
@mock.patch("rally.common.objects.Verification.get")
|
||||
def test_results_verification_not_found(
|
||||
self, mock_verification_result_get):
|
||||
self, mock_verification_get):
|
||||
verification_uuid = "9044ced5-9c84-4666-8a8f-4b73a2b62acb"
|
||||
mock_verification_result_get.side_effect = (
|
||||
mock_verification_get.side_effect = (
|
||||
exceptions.NotFoundException()
|
||||
)
|
||||
self.assertEqual(self.verify.results(verification_uuid,
|
||||
output_html=False,
|
||||
output_json=True), 1)
|
||||
|
||||
mock_verification_result_get.assert_called_once_with(verification_uuid)
|
||||
mock_verification_get.assert_called_once_with(verification_uuid)
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.open",
|
||||
side_effect=mock.mock_open(), create=True)
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
return_value={"data": {}})
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
def test_results_with_output_json_and_output_file(
|
||||
self, mock_verification_result_get, mock_open):
|
||||
self, mock_verification, mock_open):
|
||||
mock_verification.get.return_value.get_results.return_value = {}
|
||||
mock_open.side_effect = mock.mock_open()
|
||||
verification_uuid = "94615cd4-ff45-4123-86bd-4b0741541d09"
|
||||
self.verify.results(verification_uuid, output_file="results",
|
||||
output_html=False, output_json=True)
|
||||
|
||||
mock_verification_result_get.assert_called_once_with(verification_uuid)
|
||||
mock_verification.get.assert_called_once_with(verification_uuid)
|
||||
mock_open.assert_called_once_with("results", "wb")
|
||||
mock_open.side_effect().write.assert_called_once_with("{}")
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.open",
|
||||
side_effect=mock.mock_open(), create=True)
|
||||
@mock.patch("rally.common.db.verification_result_get")
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
@mock.patch("rally.verification.tempest.json2html.HtmlOutput")
|
||||
def test_results_with_output_html_and_output_file(
|
||||
self, mock_html_output, mock_verification_result_get, mock_open):
|
||||
self, mock_html_output, mock_verification, mock_open):
|
||||
|
||||
verification_uuid = "7140dd59-3a7b-41fd-a3ef-5e3e615d7dfa"
|
||||
fake_data = {}
|
||||
results = {"data": fake_data}
|
||||
mock_verification_result_get.return_value = results
|
||||
mock_create = mock.Mock(return_value="html_report")
|
||||
mock_html_output.return_value = mock.Mock(create_report=mock_create)
|
||||
self.verify.results(verification_uuid, output_html=True,
|
||||
output_json=False, output_file="results")
|
||||
|
||||
mock_verification_result_get.assert_called_once_with(verification_uuid)
|
||||
mock_html_output.assert_called_once_with(fake_data)
|
||||
mock_verification.get.assert_called_once_with(verification_uuid)
|
||||
mock_html_output.assert_called_once_with(
|
||||
mock_verification.get.return_value.get_results.return_value)
|
||||
mock_open.assert_called_once_with("results", "wb")
|
||||
mock_open.side_effect().write.assert_called_once_with("html_report")
|
||||
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
return_value={"data": {"test_cases": {}}})
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
@mock.patch("json.dumps")
|
||||
def test_compare(self, mock_json_dumps, mock_verification_result_get):
|
||||
def test_compare(self, mock_json_dumps, mock_verification):
|
||||
mock_verification.get.return_value.get_results.return_value = {
|
||||
"test_cases": {}}
|
||||
uuid1 = "8eda1b10-c8a4-4316-9603-8468ff1d1560"
|
||||
uuid2 = "f6ef0a98-1b18-452f-a6a7-922555c2e326"
|
||||
self.verify.compare(uuid1, uuid2, output_csv=False, output_html=False,
|
||||
@ -258,14 +249,13 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
fake_data = []
|
||||
calls = [mock.call(uuid1),
|
||||
mock.call(uuid2)]
|
||||
mock_verification_result_get.assert_has_calls(calls, True)
|
||||
mock_verification.get.assert_has_calls(calls, True)
|
||||
mock_json_dumps.assert_called_once_with(fake_data, sort_keys=True,
|
||||
indent=4)
|
||||
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
@mock.patch("rally.common.objects.Verification.get",
|
||||
side_effect=exceptions.NotFoundException())
|
||||
def test_compare_verification_not_found(self,
|
||||
mock_verification_result_get):
|
||||
def test_compare_verification_not_found(self, mock_verification_get):
|
||||
uuid1 = "f7dc82da-31a6-4d40-bbf8-6d366d58960f"
|
||||
uuid2 = "2f8a05f3-d310-4f02-aabf-e1165aaa5f9c"
|
||||
|
||||
@ -273,14 +263,15 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
output_html=False,
|
||||
output_json=True), 1)
|
||||
|
||||
mock_verification_result_get.assert_called_once_with(uuid1)
|
||||
mock_verification_get.assert_called_once_with(uuid1)
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.open",
|
||||
side_effect=mock.mock_open(), create=True)
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
return_value={"data": {"test_cases": {}}})
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
def test_compare_with_output_csv_and_output_file(
|
||||
self, mock_verification_result_get, mock_open):
|
||||
self, mock_verification, mock_open):
|
||||
mock_verification.get.return_value.get_results.return_value = {
|
||||
"test_cases": {}}
|
||||
|
||||
fake_string = "Type,Field,Value 1,Value 2,Test Name\r\n"
|
||||
uuid1 = "5e744557-4c3a-414f-9afb-7d3d8708028f"
|
||||
@ -291,16 +282,18 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
|
||||
calls = [mock.call(uuid1),
|
||||
mock.call(uuid2)]
|
||||
mock_verification_result_get.assert_has_calls(calls, True)
|
||||
mock_verification.get.assert_has_calls(calls, True)
|
||||
mock_open.assert_called_once_with("results", "wb")
|
||||
mock_open.side_effect().write.assert_called_once_with(fake_string)
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.open",
|
||||
side_effect=mock.mock_open(), create=True)
|
||||
@mock.patch("rally.common.db.verification_result_get",
|
||||
return_value={"data": {"test_cases": {}}})
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
def test_compare_with_output_json_and_output_file(
|
||||
self, mock_verification_result_get, mock_open):
|
||||
self, mock_verification, mock_open):
|
||||
mock_verification.get.return_value.get_results.return_value = {
|
||||
"test_cases": {}}
|
||||
|
||||
fake_json_string = "[]"
|
||||
uuid1 = "0505e33a-738d-4474-a611-9db21547d863"
|
||||
uuid2 = "b1908417-934e-481c-8d23-bc0badad39ed"
|
||||
@ -310,39 +303,39 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
|
||||
calls = [mock.call(uuid1),
|
||||
mock.call(uuid2)]
|
||||
mock_verification_result_get.assert_has_calls(calls, True)
|
||||
mock_verification.get.assert_has_calls(calls, True)
|
||||
mock_open.assert_called_once_with("results", "wb")
|
||||
mock_open.side_effect().write.assert_called_once_with(fake_json_string)
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.open",
|
||||
side_effect=mock.mock_open(), create=True)
|
||||
@mock.patch("rally.common.db.verification_result_get")
|
||||
@mock.patch(("rally.verification.tempest."
|
||||
"compare2html.create_report"), return_value="")
|
||||
@mock.patch("rally.common.objects.Verification")
|
||||
@mock.patch("rally.verification.tempest.compare2html.create_report",
|
||||
return_value="")
|
||||
def test_compare_with_output_html_and_output_file(
|
||||
self, mock_compare2html_create_report,
|
||||
mock_verification_result_get, mock_open):
|
||||
mock_verification, mock_open):
|
||||
mock_verification.get.return_value.get_results.return_value = {
|
||||
"test_cases": {}}
|
||||
|
||||
uuid1 = "cdf64228-77e9-414d-9d4b-f65e9d62c61f"
|
||||
uuid2 = "39393eec-1b45-4103-8ec1-631edac4b8f0"
|
||||
results = {"data": {"test_cases": {}}}
|
||||
|
||||
fake_data = []
|
||||
self.verify.compare(uuid1, uuid2,
|
||||
output_file="results",
|
||||
output_csv=False, output_html=True,
|
||||
output_json=False)
|
||||
mock_verification_result_get.return_value = results
|
||||
calls = [mock.call(uuid1),
|
||||
mock.call(uuid2)]
|
||||
mock_verification_result_get.assert_has_calls(calls, True)
|
||||
mock_verification.get.assert_has_calls(calls, True)
|
||||
mock_compare2html_create_report.assert_called_once_with(fake_data)
|
||||
|
||||
mock_open.assert_called_once_with("results", "wb")
|
||||
mock_open.side_effect().write.assert_called_once_with("")
|
||||
|
||||
@mock.patch("rally.common.fileutils._rewrite_env_file")
|
||||
@mock.patch("rally.cli.commands.verify.db.verification_get",
|
||||
return_value=True)
|
||||
@mock.patch("rally.common.objects.Verification.get")
|
||||
def test_use(self, mock_verification_get, mock__rewrite_env_file):
|
||||
verification_id = "80422553-5774-44bd-98ac-38bd8c7a0feb"
|
||||
self.verify.use(verification_id)
|
||||
@ -350,7 +343,7 @@ class VerifyCommandsTestCase(test.TestCase):
|
||||
os.path.expanduser("~/.rally/globals"),
|
||||
["RALLY_VERIFICATION=%s\n" % verification_id])
|
||||
|
||||
@mock.patch("rally.cli.commands.verify.db.verification_get")
|
||||
@mock.patch("rally.common.objects.Verification.get")
|
||||
def test_use_not_found(self, mock_verification_get):
|
||||
verification_id = "ddc3f8ba-082a-496d-b18f-72cdf5c10a14"
|
||||
mock_verification_get.side_effect = exceptions.NotFoundException(
|
||||
|
0
tests/unit/common/io/__init__.py
Normal file
0
tests/unit/common/io/__init__.py
Normal file
BIN
tests/unit/common/io/subunit_v2.stream
Normal file
BIN
tests/unit/common/io/subunit_v2.stream
Normal file
Binary file not shown.
108
tests/unit/common/io/test_subunit_v2.py
Normal file
108
tests/unit/common/io/test_subunit_v2.py
Normal file
@ -0,0 +1,108 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
import mock
|
||||
|
||||
from rally.common.io import subunit_v2
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class SubunitParserTestCase(test.TestCase):
|
||||
fake_stream = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"subunit_v2.stream")
|
||||
|
||||
def test_parse_results_file(self):
|
||||
result = subunit_v2.parse_results_file(self.fake_stream)
|
||||
|
||||
self.assertEqual({"skipped": 1,
|
||||
"success": 2,
|
||||
"time": "5.00732",
|
||||
"failures": 3,
|
||||
"expected_failures": 0,
|
||||
"tests": 7,
|
||||
"unexpected_success": 1}, result.total)
|
||||
self.assertEqual(len(result.tests), result.total["tests"])
|
||||
|
||||
skipped_tests = result.filter_tests("skip")
|
||||
skipped_test = "test_foo.SimpleTestCase.test_skip_something"
|
||||
|
||||
self.assertEqual(result.total["skipped"], len(skipped_tests))
|
||||
self.assertSequenceEqual([skipped_test], skipped_tests.keys())
|
||||
self.assertEqual(
|
||||
{"status": "skip", "reason": "This should be skipped.",
|
||||
"time": "0.00007", "name": skipped_test},
|
||||
skipped_tests[skipped_test])
|
||||
|
||||
failed_tests = result.filter_tests("fail")
|
||||
failed_test = "test_foo.SimpleTestCaseWithBrokenSetup.test_something"
|
||||
|
||||
self.assertEqual(result.total["failures"], len(failed_tests))
|
||||
self.assertIn(failed_test, failed_tests)
|
||||
trace = """Traceback (most recent call last):
|
||||
File "test_foo.py", line 34, in setUp
|
||||
raise RuntimeError("broken setUp method")
|
||||
RuntimeError: broken setUp method
|
||||
"""
|
||||
self.assertEqual({"status": "fail", "traceback": trace,
|
||||
"time": "0.00005", "name": failed_test},
|
||||
failed_tests[failed_test])
|
||||
|
||||
def test_filter_results(self):
|
||||
results = subunit_v2.SubunitV2StreamResult()
|
||||
results._tests = {
|
||||
"failed_test_1": {"status": "fail"},
|
||||
"failed_test_2": {"status": "fail"},
|
||||
"passed_test_1": {"status": "success"},
|
||||
"passed_test_2": {"status": "success"},
|
||||
"passed_test_3": {"status": "success"}}
|
||||
self.assertEqual({"failed_test_1": results.tests["failed_test_1"],
|
||||
"failed_test_2": results.tests["failed_test_2"]},
|
||||
results.filter_tests("fail"))
|
||||
self.assertEqual({"passed_test_1": results.tests["passed_test_1"],
|
||||
"passed_test_2": results.tests["passed_test_2"],
|
||||
"passed_test_3": results.tests["passed_test_3"]},
|
||||
results.filter_tests("success"))
|
||||
|
||||
def test_property_test(self):
|
||||
results = subunit_v2.SubunitV2StreamResult()
|
||||
results._tests = {
|
||||
"SkippedTestCase.test_1": {"status": "init"},
|
||||
"SkippedTestCase.test_2": {"status": "init"}}
|
||||
results._unknown_entities = {"SkippedTestCase": {"status": "skip",
|
||||
"reason": ":("}}
|
||||
|
||||
self.assertFalse(results._is_parsed)
|
||||
|
||||
self.assertEqual(
|
||||
{"SkippedTestCase.test_1": {"status": "skip", "reason": ":("},
|
||||
"SkippedTestCase.test_2": {"status": "skip", "reason": ":("}},
|
||||
results.tests)
|
||||
|
||||
self.assertTrue(results._is_parsed)
|
||||
|
||||
def test_preparse_input_args(self):
|
||||
some_mock = mock.MagicMock()
|
||||
|
||||
@subunit_v2.preparse_input_args
|
||||
def some_a(self_, test_id, test_status, test_tags, file_name,
|
||||
file_bytes, mime_type, timestamp, charset):
|
||||
some_mock(test_id, test_tags)
|
||||
|
||||
some_a("", "setUpClass (some_test[tag1,tag2])")
|
||||
some_mock.assert_called_once_with("some_test", ["tag1", "tag2"])
|
||||
|
||||
some_mock.reset_mock()
|
||||
some_a("", "tearDown (some_test[tag1,tag2])")
|
||||
some_mock.assert_called_once_with("some_test", ["tag1", "tag2"])
|
@ -27,8 +27,10 @@ class VerificationTestCase(test.TestCase):
|
||||
"id": 777,
|
||||
"uuid": "test_uuid",
|
||||
"failures": 0, "tests": 2, "errors": 0, "time": "0.54",
|
||||
"expected_failures": 0,
|
||||
"details": {
|
||||
"failures": 0, "tests": 2, "errors": 0, "time": "0.54",
|
||||
"expected_failures": 0,
|
||||
"test_cases": [
|
||||
{"classname": "foo.Test",
|
||||
"name": "foo_test[gate,negative]",
|
||||
@ -87,6 +89,8 @@ class VerificationTestCase(test.TestCase):
|
||||
|
||||
expected_values = {"status": "finished"}
|
||||
expected_values.update(fake_results["total"])
|
||||
# expected_failures should be merged with failures
|
||||
expected_values.pop("expected_failures")
|
||||
mock_verification_update.assert_called_with(
|
||||
self.db_obj["uuid"], expected_values)
|
||||
|
||||
|
@ -1,13 +0,0 @@
|
||||
<testsuite errors="0" failures="2" name="" tests="2" time="1.412">
|
||||
<testcase classname="fake.failed.TestCase" name="with_StringException[gate,negative]" time="0.706">
|
||||
<failure type="testtools.testresult.real._StringException">_StringException: Empty attachments:
|
||||
Oops...There was supposed to be fake traceback, but it is not.
|
||||
</failure>
|
||||
</testcase>
|
||||
<testcase classname="fake.successful.TestCase" name="fake_test[gate,negative]" time="0.706" />
|
||||
<testcase classname="" name="process-returncode" time="0.000">
|
||||
<failure type="testtools.testresult.real._StringException">_StringException: Binary content:
|
||||
traceback (test/plain; charset="utf8")
|
||||
</failure>
|
||||
</testcase>
|
||||
</testsuite>
|
@ -19,24 +19,21 @@ def get_fake_test_case():
|
||||
"total": {
|
||||
"failures": 1,
|
||||
"tests": 2,
|
||||
"errors": 0,
|
||||
"expected_failures": 0,
|
||||
"time": 1.412},
|
||||
"test_cases": {
|
||||
"fake.failed.TestCase.with_StringException[gate,negative]": {
|
||||
"name":
|
||||
"fake.failed.TestCase.with_StringException[gate,negative]",
|
||||
"failure": {
|
||||
"type": "testtools.testresult.real._StringException",
|
||||
"log":
|
||||
("_StringException: Empty attachments:\nOops...There "
|
||||
"was supposed to be fake traceback, but it is not.\n")
|
||||
},
|
||||
"traceback": ("_StringException: Empty attachments:\nOops..."
|
||||
"There was supposed to be fake traceback, but it"
|
||||
" is not.\n"),
|
||||
"time": 0.706,
|
||||
"status": "FAIL"},
|
||||
"status": "fail"},
|
||||
"fake.successful.TestCase.fake_test[gate,negative]": {
|
||||
"name": "fake.successful.TestCase.fake_test[gate,negative]",
|
||||
"time": 0.706,
|
||||
"status": "OK"
|
||||
"status": "success"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,71 +23,53 @@ class HtmlOutputTestCase(test.TestCase):
|
||||
results = {
|
||||
"time": 22,
|
||||
"tests": 4,
|
||||
"errors": 1,
|
||||
"success": 1,
|
||||
"skipped": 1,
|
||||
"failures": 1,
|
||||
"expected_failures": 0,
|
||||
"unexpected_success": 0,
|
||||
"test_cases": {
|
||||
"tp": {"name": "tp",
|
||||
"status": "OK",
|
||||
"output": "tp_ok",
|
||||
"status": "success",
|
||||
"time": 2},
|
||||
"ts": {"name": "ts",
|
||||
"status": "SKIP",
|
||||
"output": "ts_skip",
|
||||
"status": "skip",
|
||||
"reason": "ts_skip",
|
||||
"time": 4},
|
||||
"tf": {"name": "tf",
|
||||
"status": "FAIL",
|
||||
"output": "tf_fail",
|
||||
"status": "fail",
|
||||
"time": 6,
|
||||
"failure": {"type": "tf", "log": "fail_log"}},
|
||||
"te": {"name": "te",
|
||||
"time": 2,
|
||||
"status": "ERROR",
|
||||
"output": "te_error",
|
||||
"failure": {"type": "te", "log": "error+log"}}}}
|
||||
|
||||
def test__init(self):
|
||||
obj = json2html.HtmlOutput(self.results)
|
||||
self.assertEqual(obj.num_passed, self.results["success"])
|
||||
self.assertEqual(obj.num_failed, self.results["failures"])
|
||||
self.assertEqual(obj.num_skipped, self.results["skipped"])
|
||||
self.assertEqual(obj.num_errors, self.results["errors"])
|
||||
self.assertEqual(obj.num_total, self.results["tests"])
|
||||
self.assertEqual(obj.results, self.results["test_cases"])
|
||||
"traceback": "fail_log"}}}
|
||||
|
||||
def test__generate_report(self):
|
||||
|
||||
obj = json2html.HtmlOutput(self.results)
|
||||
expected_report = {
|
||||
"errors": 1,
|
||||
"failed": 1,
|
||||
"passed": 1,
|
||||
"failures": 1,
|
||||
"success": 1,
|
||||
"skipped": 1,
|
||||
"expected_failures": 0,
|
||||
"unexpected_success": 0,
|
||||
"total": 4,
|
||||
"tests": [{"desc": "te",
|
||||
"time": 22,
|
||||
"tests": [{"name": "tf",
|
||||
"id": 0,
|
||||
"output": "te_errorerror+log",
|
||||
"status": "error",
|
||||
"time": 2},
|
||||
{"desc": "tf",
|
||||
"id": 1,
|
||||
"output": "tf_failfail_log",
|
||||
"output": "fail_log",
|
||||
"status": "fail",
|
||||
"time": 6},
|
||||
{"desc": "tp",
|
||||
"id": 2,
|
||||
"output": "tp_ok",
|
||||
"status": "pass",
|
||||
{"name": "tp",
|
||||
"id": 1,
|
||||
"output": "",
|
||||
"status": "success",
|
||||
"time": 2},
|
||||
{"desc": "ts",
|
||||
"id": 3,
|
||||
{"name": "ts",
|
||||
"id": 2,
|
||||
"output": "ts_skip",
|
||||
"status": "skip",
|
||||
"time": 4}]}
|
||||
|
||||
report = obj._generate_report()
|
||||
self.assertEqual(report, expected_report)
|
||||
self.assertEqual(expected_report, report)
|
||||
|
||||
@mock.patch(BASE + ".json2html.ui_utils.get_template")
|
||||
@mock.patch(BASE + ".json2html.HtmlOutput._generate_report",
|
||||
|
@ -18,10 +18,8 @@ import os
|
||||
import subprocess
|
||||
|
||||
import mock
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from rally import exceptions
|
||||
from rally.verification.tempest import subunit2json
|
||||
from rally.verification.tempest import tempest
|
||||
from tests.unit import test
|
||||
|
||||
@ -154,29 +152,31 @@ class TempestUtilsTestCase(BaseTestCase):
|
||||
"%s testr init" % self.verifier.venv_wrapper, shell=True,
|
||||
cwd=self.verifier.path())
|
||||
|
||||
@mock.patch.object(subunit2json, "main")
|
||||
@mock.patch("%s.tempest.subunit_v2.parse_results_file" % TEMPEST_PATH)
|
||||
@mock.patch("os.path.isfile", return_value=False)
|
||||
def test__save_results_without_log_file(
|
||||
self, mock_isfile, mock_main):
|
||||
self, mock_isfile, mock_parse_results_file):
|
||||
|
||||
self.verifier._save_results()
|
||||
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
|
||||
self.assertEqual(0, mock_main.call_count)
|
||||
self.assertEqual(0, mock_parse_results_file.call_count)
|
||||
|
||||
@mock.patch("%s.tempest.subunit_v2.parse_results_file" % TEMPEST_PATH)
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test__save_results_with_log_file(self, mock_isfile):
|
||||
with mock.patch.object(subunit2json, "main") as mock_main:
|
||||
data = {"total": True, "test_cases": True}
|
||||
mock_main.return_value = jsonutils.dumps(data)
|
||||
self.verifier.log_file_raw = os.path.join(
|
||||
os.path.dirname(__file__), "subunit.stream")
|
||||
self.verifier._save_results()
|
||||
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
|
||||
mock_main.assert_called_once_with(
|
||||
self.verifier.log_file_raw)
|
||||
def test__save_results_with_log_file(self, mock_isfile,
|
||||
mock_parse_results_file):
|
||||
results = mock.MagicMock(total="some", tests=["some_test_1"])
|
||||
mock_parse_results_file.return_value = results
|
||||
self.verifier.log_file_raw = os.path.join(
|
||||
os.path.dirname(__file__), "subunit.stream")
|
||||
self.verifier._save_results()
|
||||
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
|
||||
mock_parse_results_file.assert_called_once_with(
|
||||
self.verifier.log_file_raw)
|
||||
|
||||
verification = self.verifier.verification
|
||||
verification.finish_verification.assert_called_once_with(**data)
|
||||
verification = self.verifier.verification
|
||||
verification.finish_verification.assert_called_once_with(
|
||||
total="some", test_cases=["some_test_1"])
|
||||
|
||||
|
||||
class TempestInstallAndUninstallTestCase(BaseTestCase):
|
||||
@ -335,7 +335,7 @@ class TempestVerifyTestCase(BaseTestCase):
|
||||
"tempest_path": self.verifier.path()})
|
||||
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
|
||||
return_value=(None, None))
|
||||
return_value=None)
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
|
||||
@mock.patch(TEMPEST_PATH + ".config.TempestResourcesContext")
|
||||
@ -366,7 +366,7 @@ class TempestVerifyTestCase(BaseTestCase):
|
||||
mock_tempest_parse_results.assert_called_once_with(None)
|
||||
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
|
||||
return_value=(None, None))
|
||||
return_value=None)
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
|
||||
@mock.patch(TEMPEST_PATH + ".config.TempestResourcesContext")
|
||||
@ -394,7 +394,7 @@ class TempestVerifyTestCase(BaseTestCase):
|
||||
mock_tempest_parse_results.assert_called_once_with(None)
|
||||
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
|
||||
return_value=(None, None))
|
||||
return_value=None)
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
|
||||
@mock.patch(TEMPEST_PATH + ".config.TempestResourcesContext")
|
||||
@ -424,7 +424,7 @@ class TempestVerifyTestCase(BaseTestCase):
|
||||
self.verifier.verification.set_failed.assert_called_once_with()
|
||||
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
|
||||
return_value=(None, None))
|
||||
return_value=None)
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
|
||||
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
|
||||
@mock.patch(TEMPEST_PATH + ".config.TempestResourcesContext")
|
||||
|
Loading…
Reference in New Issue
Block a user