Changes tempest subunit log handling

* Remove subunit2junitxml dependency and code from rally
* Convert raw subunit stream from tempest tests to json and store only
  json in DB
* Run tempest scenario (smoke) in rally gate

Change-Id: I6628375d83da9af3984819dfc5815ae0c06ec1aa
Implements: blueprint tempest-subunit-to-json
This commit is contained in:
Rohan Kanade 2014-06-17 12:06:06 +02:00
parent 7d461b19ed
commit 3ed98ecec0
6 changed files with 209 additions and 63 deletions

View File

@ -0,0 +1,172 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
import errno
import io
import os
import tempfile
import traceback
from rally.openstack.common import jsonutils
from rally.openstack.common import timeutils
import subunit
import testtools
class JsonOutput(testtools.TestResult):
"""Output test results in Json."""
def __init__(self, results_file):
super(JsonOutput, self).__init__()
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.skip_count = 0
self.total_time = 0
self.test_cases = {}
self.results_file = results_file
def _format_result(self, name, time, status, output, failure=None):
self.test_cases[name] = {'name': name, 'time': time,
'status': status, 'output': output}
if failure:
self.test_cases[name].update({'failure': failure})
def _test_time(self, before, after):
return timeutils.delta_seconds(before, after)
def addSuccess(self, test):
self.success_count += 1
test_time = self._test_time(test._timestamps[0],
test._timestamps[1])
self.total_time += test_time
output = test.shortDescription()
if output is None:
output = test.id()
self._format_result(test.id(), test_time, 'OK', output)
def addSkip(self, test, err):
output = test.shortDescription()
test_time = self._test_time(test._timestamps[0],
test._timestamps[1])
self.total_time += test_time
if output is None:
output = test.id()
self.skip_count += 1
self._format_result(test.id(), test_time, 'SKIP', output)
def addError(self, test, err):
output = test.shortDescription()
test_time = self._test_time(test._timestamps[0],
test._timestamps[1])
self.total_time += test_time
if output is None:
output = test.id()
else:
self.error_count += 1
_exc_str = self.formatErr(err)
failure_type = "%s.%s" % (err[0].__module__, err[1].__name__)
self._format_result(test.id(), test_time, 'ERROR', output,
failure={'type': failure_type,
'log': _exc_str})
def addFailure(self, test, err):
self.failure_count += 1
test_time = self._test_time(test._timestamps[0],
test._timestamps[1])
self.total_time += test_time
_exc_str = self.formatErr(err)
output = test.shortDescription()
if output is None:
output = test.id()
failure_type = "%s.%s" % (err[0].__module__, err[0].__name__)
self._format_result(test.id(), test_time, 'FAIL', output,
failure={'type': failure_type, 'log': _exc_str})
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def stopTestRun(self):
super(JsonOutput, self).stopTestRun()
self.stopTime = datetime.datetime.now()
total_count = (self.success_count + self.failure_count +
self.error_count + self.skip_count)
total = {"tests": total_count, "errors": self.error_count,
"skipped": self.skip_count, "success": self.success_count,
"failures": self.failure_count, "time": self.total_time}
if self.results_file:
with open(self.results_file, 'wb') as results_file:
output = jsonutils.dumps({'total': total,
'test_cases': self.test_cases})
results_file.write(output)
def startTestRun(self):
super(JsonOutput, self).startTestRun()
class FileAccumulator(testtools.StreamResult):
def __init__(self):
super(FileAccumulator, self).__init__()
self.route_codes = collections.defaultdict(io.BytesIO)
def status(self, **kwargs):
if kwargs.get('file_name') != 'stdout':
return
file_bytes = kwargs.get('file_bytes')
if not file_bytes:
return
route_code = kwargs.get('route_code')
stream = self.route_codes[route_code]
stream.write(file_bytes)
def main(subunit_log_file):
results_file, _ = tempfile.mkstemp()
result = JsonOutput(results_file)
stream = open(subunit_log_file, 'rb')
# Feed the subunit stream through both a V1 and V2 parser.
# Depends on having the v2 capable libraries installed.
# First V2.
# Non-v2 content and captured non-test output will be presented as file
# segments called stdout.
suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout')
# The JSON output code is in legacy mode.
raw_result = testtools.StreamToExtendedDecorator(result)
# Divert non-test output
accumulator = FileAccumulator()
result = testtools.StreamResultRouter(raw_result)
result.add_rule(accumulator, 'test_id', test_id=None)
result.startTestRun()
suite.run(result)
# Now reprocess any found stdout content as V1 subunit
for bytes_io in accumulator.route_codes.values():
bytes_io.seek(0)
suite = subunit.ProtocolTestCase(bytes_io)
suite.run(result)
result.stopTestRun()
with open(results_file, 'rb') as results_file:
data = results_file.read()
try:
os.unlink(results_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return data

View File

@ -18,12 +18,13 @@ import logging
import os
import shutil
import subprocess
from xml.dom import minidom as md
from rally import exceptions
from rally.openstack.common.gettextutils import _
from rally.openstack.common import jsonutils
from rally import utils
from rally.verification.verifiers.tempest import config
from rally.verification.verifiers.tempest import subunit2json
LOG = logging.getLogger(__name__)
@ -39,7 +40,7 @@ class Tempest(object):
".rally/tempest",
"for-deployment-%s" % deploy_id)
self.config_file = os.path.join(self.tempest_path, "tempest.conf")
self.log_file = os.path.join(self.tempest_path, "testr_log.xml")
self.log_file_raw = os.path.join(self.tempest_path, "subunit.stream")
self.venv_wrapper = os.path.join(self.tempest_path,
"tools/with_venv.sh")
self.verification = verification
@ -71,12 +72,6 @@ class Tempest(object):
os.path.join(self.tempest_path, ".venv"))
subprocess.check_call("python ./tools/install_venv.py", shell=True,
cwd=self.tempest_path)
# NOTE(akurilin): junitxml is required for subunit2junitxml filter.
# This library not in openstack/requirements, so we must install it
# by this way.
subprocess.check_call(
"%s pip install junitxml" % self.venv_wrapper,
shell=True, cwd=self.tempest_path)
subprocess.check_call(
"%s python setup.py install" % self.venv_wrapper,
shell=True, cwd=self.tempest_path)
@ -176,8 +171,9 @@ class Tempest(object):
:param testr_arg: argument which will be transmitted into testr
:type testr_arg: str
:param log_file: file name for junitxml results of tests. If not
specified, value from "self.log_file" will be chosen.
:param log_file: file name for raw subunit results of tests. If not
specified, value from "self.log_file_raw"
will be chosen.
:type testr_arg: str
:raises: :class:`subprocess.CalledProcessError` if tests has been
@ -186,14 +182,14 @@ class Tempest(object):
test_cmd = (
"%(venv)s testr run --parallel --subunit %(arg)s "
"| %(venv)s subunit2junitxml --forward --output-to=%(log_file)s "
"| tee %(log_file)s "
"| %(venv)s subunit-2to1 "
"| %(venv)s %(tempest_path)s/tools/colorizer.py" %
{
"venv": self.venv_wrapper,
"arg": testr_arg,
"tempest_path": self.tempest_path,
"log_file": log_file or self.log_file
"log_file": log_file or self.log_file_raw
})
LOG.debug("Test(s) started by the command: %s" % test_cmd)
subprocess.check_call(test_cmd, cwd=self.tempest_path,
@ -221,48 +217,20 @@ class Tempest(object):
return tests
@staticmethod
def parse_results(log_file):
"""Parse junitxml file."""
def parse_results(log_file_raw):
"""Parse subunit raw log file."""
if os.path.isfile(log_file):
dom = md.parse(log_file).getElementsByTagName("testsuite")[0]
total = {
"tests": int(dom.getAttribute("tests")),
"errors": int(dom.getAttribute("errors")),
"failures": int(dom.getAttribute("failures")),
"time": float(dom.getAttribute("time")),
}
test_cases = {}
for test_elem in dom.getElementsByTagName('testcase'):
if test_elem.getAttribute('name') == 'process-returncode':
total['failures'] -= 1
else:
test = {
"name": ".".join((test_elem.getAttribute("classname"),
test_elem.getAttribute("name"))),
"time": float(test_elem.getAttribute("time"))
}
failure = test_elem.getElementsByTagName('failure')
if failure:
test["status"] = "FAIL"
test["failure"] = {
"type": failure[0].getAttribute("type"),
"log": failure[0].firstChild.nodeValue}
else:
test["status"] = "OK"
test_cases[test["name"]] = test
return total, test_cases
if os.path.isfile(log_file_raw):
data = jsonutils.loads(subunit2json.main(log_file_raw))
return data['total'], data['test_cases']
else:
LOG.error("XML-log file not found.")
LOG.error("JSON-log file not found.")
return None, None
@utils.log_verification_wrapper(
LOG.info, _("Saving verification results."))
def _save_results(self):
total, test_cases = self.parse_results(self.log_file)
total, test_cases = self.parse_results(self.log_file_raw)
if total and test_cases and self.verification:
self.verification.finish_verification(total=total,
test_cases=test_cases)

View File

@ -18,6 +18,7 @@ python-cinderclient>=1.0.6
python-heatclient>=0.2.9
python-ceilometerclient>=1.0.6
python-ironicclient
python-subunit>=0.0.18
requests>=1.1
SQLAlchemy>=0.7.8,<=0.9.99
six>=1.7.0

View File

@ -3,7 +3,6 @@ hacking>=0.8.0,<0.9
coverage>=3.6
discover
mock>=1.0
python-subunit>=0.0.18
testrepository>=0.0.18
testtools>=0.9.34

View File

@ -28,7 +28,7 @@ class TempestScenarioTestCase(test.TestCase):
def setUp(self):
super(TempestScenarioTestCase, self).setUp()
self.verifier = verifier.Tempest("fake_uuid")
self.verifier.log_file = "/dev/null"
self.verifier.log_file_raw = "/dev/null"
self.verifier.parse_results = mock.MagicMock()
self.verifier.parse_results.return_value = ({"fake": True},
{"have_results": True})
@ -39,7 +39,7 @@ class TempestScenarioTestCase(test.TestCase):
def get_tests_launcher_cmd(self, tests):
return ("%(venv)s testr run --parallel --subunit %(tests)s "
"| %(venv)s subunit2junitxml --forward --output-to=/dev/null "
"| tee /dev/null "
"| %(venv)s subunit-2to1 "
"| %(venv)s %(tempest_path)s/tools/colorizer.py" %
{

View File

@ -17,6 +17,8 @@ import os
import mock
from rally.openstack.common import jsonutils
from rally.verification.verifiers.tempest import subunit2json
from rally.verification.verifiers.tempest import tempest
from tests import test
@ -33,7 +35,7 @@ class TempestTestCase(test.TestCase):
self.verifier.tempest_path = '/tmp'
self.verifier.config_file = '/tmp/tempest.conf'
self.verifier.log_file = '/tmp/tests_log.xml'
self.verifier.log_file_raw = '/tmp/subunit.stream'
self.regex = None
@mock.patch('six.moves.builtins.open')
@ -95,8 +97,7 @@ class TempestTestCase(test.TestCase):
self.verifier.run('tempest.api.image')
fake_call = (
'%(venv)s testr run --parallel --subunit tempest.api.image '
'| %(venv)s subunit2junitxml --forward '
'--output-to=%(tempest_path)s/tests_log.xml '
'| tee %(tempest_path)s/subunit.stream '
'| %(venv)s subunit-2to1 '
'| %(venv)s %(tempest_path)s/tools/colorizer.py' % {
'venv': self.verifier.venv_wrapper,
@ -159,8 +160,6 @@ class TempestTestCase(test.TestCase):
mock_sp.assert_has_calls([
mock.call('python ./tools/install_venv.py', shell=True,
cwd=self.verifier.tempest_path),
mock.call('%s pip install junitxml' % self.verifier.venv_wrapper,
shell=True, cwd=self.verifier.tempest_path),
mock.call('%s python setup.py install' %
self.verifier.venv_wrapper, shell=True,
cwd=self.verifier.tempest_path)])
@ -196,15 +195,22 @@ class TempestTestCase(test.TestCase):
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file)
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
self.assertEqual(0, mock_parse.call_count)
@mock.patch('os.path.isfile')
def test__save_results_with_log_file(self, mock_isfile):
mock_isfile.return_value = True
self.verifier.log_file = os.path.join(os.path.dirname(__file__),
'fake_log.xml')
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file)
self.assertEqual(
1, self.verifier.verification.finish_verification.call_count)
with mock.patch.object(subunit2json, 'main') as mock_main:
mock_isfile.return_value = True
data = {'total': True, 'test_cases': True}
mock_main.return_value = jsonutils.dumps(data)
self.verifier.log_file_raw = os.path.join(
os.path.dirname(__file__),
'subunit.stream')
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
mock_main.assert_called_once_with(
self.verifier.log_file_raw)
self.assertEqual(
1, self.verifier.verification.finish_verification.call_count)