Add ability to compare two verification results

Currently there is no built-in way in Rally to see the differences
between two verification runs. This feature adds a compare sub-command
to rally verify that takes two verification IDs as input and output the
differences between two verifications in the desired format, html, csv,
json or pprint. Output may be printed to std out or directed to file
destination specified.

Tasks
- Add compare sub-command to rally verify.
- Add ability to specify output format and file name of comparison
  report.
  Initial support will allow for output to either HTML, pretty print
  or CSV.
- Create rally/verification/verifiers/tempest/diff.py that handles all
  the comparing and output of results.
- Create required mako template (compare.mako) and renderer
  (compare2html.py) class for HTML output.
- Added percentage threshold argument for determining execution time
  differences

Change-Id: I0276e431d7d1ad89b1b28306cd0e0ea7e1174217
Implements: blueprint tempest-compare
This commit is contained in:
David Paterson 2014-10-24 23:09:17 -04:00
parent f6c02ea246
commit 5910775a6c
9 changed files with 703 additions and 111 deletions

View File

@ -32,7 +32,7 @@ LOG = log.getLogger(__name__)
def main():
# Initialize configuation and logging.
# Initialize configuration and logging.
CONF(sys.argv[1:], project='rally')
log.setup('rally')
# Prepare application and bind to the service socket.

View File

@ -15,6 +15,7 @@
""" Rally command: verify """
import csv
import json
import os
@ -30,6 +31,7 @@ from rally.i18n import _
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
from rally.orchestrator import api
from rally.verification.verifiers.tempest import diff
from rally.verification.verifiers.tempest import json2html
@ -71,30 +73,30 @@ class VerifyCommands(object):
return (1)
verification = api.verify(deploy_id, set_name, regex, tempest_config)
if do_use:
use.UseCommands().verification(verification['uuid'])
use.UseCommands().verification(verification["uuid"])
def list(self):
"""Display all verifications table, started and finished."""
fields = ['UUID', 'Deployment UUID', 'Set name', 'Tests', 'Failures',
'Created at', 'Status']
fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures",
"Created at", "Status"]
verifications = db.verification_list()
if verifications:
common_cliutils.print_list(verifications, fields,
sortby_index=fields.index('Created at'))
sortby_index=fields.index("Created at"))
else:
print(_("There are no results from verifier. To run a verifier, "
"use:\nrally verify start"))
@cliutils.args('--uuid', type=str, dest='verification_uuid',
help='UUID of the verification')
@cliutils.args('--html', action='store_true', dest='output_html',
help=('Results will be in html format'))
@cliutils.args('--json', action='store_true', dest='output_json',
help=('Results will be in json format'))
@cliutils.args('--output-file', type=str, required=False,
dest='output_file',
help='If specified, output will be saved to given file')
@cliutils.args("--uuid", type=str, dest="verification_uuid",
help="UUID of the verification")
@cliutils.args("--html", action="store_true", dest="output_html",
help=("Results will be in html format"))
@cliutils.args("--json", action="store_true", dest="output_json",
help=("Results will be in json format"))
@cliutils.args("--output-file", type=str, required=False,
dest="output_file",
help="If specified, output will be saved to given file")
@envutils.with_default_verification_id
def results(self, verification_uuid=None, output_file=None,
output_html=None, output_json=None):
@ -107,13 +109,13 @@ class VerifyCommands(object):
"""
try:
results = db.verification_result_get(verification_uuid)['data']
results = db.verification_result_get(verification_uuid)["data"]
except exceptions.NotFoundException as e:
print(six.text_type(e))
return 1
result = ''
if len(filter(lambda x: bool(x), [output_json, output_html])) > 1:
result = ""
if output_json + output_html > 1:
print("Please specify only one output format.")
elif output_html:
result = json2html.main(results)
@ -122,24 +124,24 @@ class VerifyCommands(object):
if output_file:
output_file = os.path.expanduser(output_file)
with open(output_file, 'wb') as f:
with open(output_file, "wb") as f:
f.write(result)
else:
print(result)
@cliutils.args('--uuid', dest='verification_uuid', type=str,
@cliutils.args("--uuid", dest="verification_uuid", type=str,
required=False,
help='UUID of a verification')
@cliutils.args('--sort-by', dest='sort_by', type=str, required=False,
help='Tests can be sorted by "name" or "duration"')
@cliutils.args('--detailed', dest='detailed', action='store_true',
required=False, help='Prints traceback of failed tests')
help="UUID of a verification")
@cliutils.args("--sort-by", dest="sort_by", type=str, required=False,
help="Tests can be sorted by 'name' or 'duration'")
@cliutils.args("--detailed", dest="detailed", action="store_true",
required=False, help="Prints traceback of failed tests")
@envutils.with_default_verification_id
def show(self, verification_uuid=None, sort_by='name', detailed=False):
def show(self, verification_uuid=None, sort_by="name", detailed=False):
"""Display results table of the verification."""
try:
sortby_index = ('name', 'duration').index(sort_by)
sortby_index = ("name", "duration").index(sort_by)
except ValueError:
print("Sorry, but verification results can't be sorted "
"by '%s'." % sort_by)
@ -153,42 +155,104 @@ class VerifyCommands(object):
return 1
print ("Total results of verification:\n")
total_fields = ['UUID', 'Deployment UUID', 'Set name', 'Tests',
'Failures', 'Created at', 'Status']
total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
"Failures", "Created at", "Status"]
common_cliutils.print_list([verification], fields=total_fields)
print ("\nTests:\n")
fields = ['name', 'time', 'status']
fields = ["name", "time", "status"]
values = map(objects.Verification,
six.itervalues(tests.data['test_cases']))
six.itervalues(tests.data["test_cases"]))
common_cliutils.print_list(values, fields, sortby_index=sortby_index)
if detailed:
for test in six.itervalues(tests.data['test_cases']):
if test['status'] == 'FAIL':
for test in six.itervalues(tests.data["test_cases"]):
if test["status"] == "FAIL":
formatted_test = (
'====================================================='
'=================\n'
'FAIL: %(name)s\n'
'Time: %(time)s\n'
'Type: %(type)s\n'
'-----------------------------------------------------'
'-----------------\n'
'%(log)s\n'
"====================================================="
"=================\n"
"FAIL: %(name)s\n"
"Time: %(time)s\n"
"Type: %(type)s\n"
"-----------------------------------------------------"
"-----------------\n"
"%(log)s\n"
) % {
'name': test['name'], 'time': test['time'],
'type': test['failure']['type'],
'log': test['failure']['log']}
"name": test["name"], "time": test["time"],
"type": test["failure"]["type"],
"log": test["failure"]["log"]}
print (formatted_test)
@cliutils.args('--uuid', dest='verification_uuid', type=str,
@cliutils.args("--uuid", dest="verification_uuid", type=str,
required=False,
help='UUID of a verification')
@cliutils.args('--sort-by', dest='sort_by', type=str, required=False,
help='Tests can be sorted by "name" or "duration"')
help="UUID of a verification")
@cliutils.args("--sort-by", dest="sort_by", type=str, required=False,
help="Tests can be sorted by 'name' or 'duration'")
@envutils.with_default_verification_id
def detailed(self, verification_uuid=None, sort_by='name'):
def detailed(self, verification_uuid=None, sort_by="name"):
"""Display results table of verification with detailed errors."""
self.show(verification_uuid, sort_by, True)
@cliutils.args("--uuid-1", type=str, dest="uuid1",
help="UUID of the first verification")
@cliutils.args("--uuid-2", type=str, dest="uuid2",
help="UUID of the second verification")
@cliutils.args("--csv", action="store_true", dest="output_csv",
help=("Save results in csv format to specified file"))
@cliutils.args("--html", action="store_true", dest="output_html",
help=("Save results in html format to specified file"))
@cliutils.args("--json", action="store_true", dest="output_json",
help=("Save results in json format to specified file"))
@cliutils.args("--output-file", type=str, required=False,
dest="output_file",
help="If specified, output will be saved to given file")
@cliutils.args("--threshold", type=int, required=False,
dest="threshold", default=0,
help="If specified, timing differences must exceed this "
"percentage threshold to be included in output")
def compare(self, uuid1=None, uuid2=None,
output_file=None, output_csv=None, output_html=None,
output_json=None, threshold=0):
"""Compare two verification results.
:param uuid1: First Verification UUID
:param uuid2: Second Verification UUID
:param output_file: If specified, output will be saved to given file
:param output_csv: Save results in csv format to the specified file
:param output_html: Save results in html format to the specified file
:param output_json: Save results in json format to the specified file
(Default)
:param threshold: Timing difference threshold percentage
"""
try:
results1 = db.verification_result_get(uuid1)["data"]["test_cases"]
results2 = db.verification_result_get(uuid2)["data"]["test_cases"]
_diff = diff.Diff(results1, results2, threshold)
except exceptions.NotFoundException as e:
print(six.text_type(e))
return 1
result = ""
if output_json + output_html + output_csv > 1:
print("Please specify only one output format, either --json, "
"--html or --csv.")
return 1
elif output_html:
result = _diff.to_html()
elif output_csv:
result = _diff.to_csv()
else:
result = _diff.to_json()
if output_file:
with open(output_file, "wb") as f:
if output_csv:
writer = csv.writer(f, dialect="excel")
writer.writerows(result)
else:
f.write(result)
else:
print(result)

View File

@ -0,0 +1,41 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Output verification comparison results in html."""
import os
import mako.template
__description__ = "List differences between two verification runs"
__title__ = "Verification Comparison"
__version__ = "0.1"
def create_report(results):
template_kw = {
"heading": {
"title": __title__,
"description": __description__,
"parameters": [("Difference Count", len(results))]
},
"generator": "compare2html %s" % __version__,
"results": results
}
template_path = os.path.join(os.path.dirname(__file__),
"report_templates",
"compare.mako")
with open(template_path) as f:
template = mako.template.Template(f.read(), strict_undefined=True)
output = template.render(**template_kw)
return output.encode('utf8')

View File

@ -0,0 +1,108 @@
# Copyright 2014 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import compare2html
class Diff(object):
def __init__(self, test_cases1, test_cases2, threshold):
"""Compare two verification results.
Compares two verification results and emits
desired output, csv, html, json or pprint.
:param test_cases1: older verification json
:param test_cases2: newer verification json
:param threshold: test time difference percentage threshold
"""
self.threshold = threshold
self.diffs = self._compare(test_cases1, test_cases2)
def _compare(self, tc1, tc2):
"""Compare two verification results.
:param tc1: first verification test cases json
:param tc2: second verification test cases json
Typical test case json schema:
"test_case_key": {
"failure": {
"log": ""
},
"name": "",
"output": "",
"status": "",
"time": 0.0
}
"""
names1 = sorted(tc1)
names2 = sorted(tc2)
diffs = []
i = j = 0
while i < len(names1) and j < len(names2):
name1 = names1[i] if i < len(names1) else None
name2 = names2[j] if j < len(names2) else None
if name1 and name2 and name1 == name2:
diffs.extend(self._diff_values(name1, tc1[name1], tc2[name2]))
i += 1
j += 1
elif (not name1) or (name1 > name2):
diffs.append({"type": "new_test", "test_name": name2})
j += 1
else:
diffs.append({"type": "removed_test", "test_name": name1})
i += 1
return diffs
def _diff_values(self, name, result1, result2):
th = self.threshold
fields = ["status", "time", "output"]
diffs = []
for field in fields:
val1 = result1[field]
val2 = result2[field]
if val1 != val2 and not (field == "time"
and abs(((val2 - val1) / val1) * 100)
< th):
diffs.append({
"field": field,
"type": "value_changed",
"test_name": name,
"val1": val1,
"val2": val2
})
return diffs
def to_csv(self):
rows = (("Type", "Field", "Value 1", "Value 2", "Test Name"),)
for res in self.diffs:
row = (res.get("type"), res.get("field", ""),
res.get("val1", ""), res.get("val2", ""),
res.get("test_name"))
rows = rows + (row,)
return rows
def to_json(self):
return json.dumps(self.diffs, sort_keys=True, indent=4)
def to_html(self):
return compare2html.create_report(self.diffs)

View File

@ -0,0 +1,165 @@
<?xml version="1.0" encoding="UTF-8"?>
<!doctype html>
<html>
<head>
<title>${heading["title"]}</title>
<meta name="generator" content="${generator}">
<meta charset="utf-8">
<script type="text/javascript">
var DOWN = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAAGCAYAAAAVMmT4AAAAJUlEQVQYlWNgYGD4TwJmYCBFIYYGFhYWvArx2YAXEK0QWQMGAADd8SPpeGzm9QAAAABJRU5ErkJggg==";
var NONE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAAGCAYAAAAVMmT4AAAADUlEQVQYlWNgGAUIAAABDgAB6WzgmwAAAABJRU5ErkJggg==";
var UP = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAAGCAYAAAAVMmT4AAAAK0lEQVQYlWNgwA7+4xDHqhCGiVaIVwNcAQsLC14N2EzEqoEYhf8ZGBj+AwCZbyPp8zIdEAAAAABJRU5ErkJggg==";
function sort_table(table_id, col, sort){
var table = document.getElementById(table_id);
var tbody = table.tBodies[0];
var header_row = table.tHead.rows[0];
render_header(col, sort, header_row);
sort_results(tbody, col, sort);
}
function render_header(col, sort, header_row){
var h_cells = header_row.cells;
for(i = 0; i < h_cells.length; i++){
var cell = h_cells[i];
var img = cell.firstElementChild;
if (i == col){
if (sort == 1){
img.src = UP;
}else{
img.src = DOWN;
}
}else{ //spacer image
img.src = NONE;
}
}
}
function sort_results(tbody, col, sort) {
var rows = tbody.rows, rlen = rows.length, arr = new Array(), i, j, cells, clen;
// fill the array with values from the table
for(i = 0; i < rlen; i++){
cells = rows[i].cells;
clen = cells.length;
arr[i] = new Array();
for(j = 0; j < clen; j++){
arr[i][j] = cells[j].innerHTML;
}
}
// sort the array by the specified column number (col) and order (sort)
arr.sort(function(a, b){
return (a[col] == b[col]) ? 0 : ((a[col] > b[col]) ? sort : -1*sort);
});
for(i = 0; i < rlen; i++){
arr[i] = "<td>"+arr[i].join("</td><td>")+"</td>";
}
tbody.innerHTML = "<tr>"+arr.join("</tr><tr>")+"</tr>";
}
</script>
<style type="text/css" media="screen">
body {
font-family: verdana, arial, helvetica, sans-serif;
font-size: 80%;
}
table {
font-size: 100%; width: 100%;
}
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
#results_table {
width: 100%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#results_table td {
border: 1px solid #777;
padding: 2px;
}
.testcase { margin-left: 2em;}
img.updown{
padding-left: 3px;
padding-bottom: 2px;
}
th:hover{
cursor:pointer;
}
.nowrap {white-space: nowrap;}
</style>
</head>
<body>
<div class="heading">
<h1>${heading["title"]}</h1>
% for name, value in heading["parameters"]:
<p class="attribute"><strong>${name}:</strong> ${value}</p>
% endfor
<p class="description">${heading["description"]}</p>
</div>
<table id="results_table">
<colgroup>
<col align="left" />
<col align="left" />
<col align="left" />
<col align="left" />
<col align="left" />
</colgroup>
<thead>
<tr id="header_row">
<th class="nowrap" onclick="sort_table('results_table', 0, col1_sort); col1_sort *= -1; col2_sort = 1; col3_sort = 1; col4_sort = 1; col5_sort = 1;">Type<img class="updown" src=NONE /></th>
<th class="nowrap" onclick="sort_table('results_table', 1, col2_sort); col2_sort *= -1; col1_sort = 1; col3_sort = 1; col4_sort = 1; col5_sort = 1;">Field<img class="updown" src=NONE /></th>
<th class="nowrap" onclick="sort_table('results_table', 2, col3_sort); col3_sort *= -1; col1_sort = 1; col2_sort = 1; col4_sort = 1; col5_sort = 1;">Value 1<img class="updown" src=NONE /></th>
<th class="nowrap" onclick="sort_table('results_table', 3, col4_sort); col4_sort *= -1; col1_sort = 1; col2_sort = 1; col3_sort = 1; col5_sort = 1;">Value 2<img class="updown" src=NONE /></th>
<th onclick="sort_table('results_table', 4, col5_sort); col5_sort *= -1; col1_sort = 1; col2_sort = 1; col3_sort = 1; col4_sort = 1;">Test Name<img class="updown" src=NONE /></th>
</tr>
</thead>
<tbody id="results">
% for diff in results:
<tr class="">
<td class="type">${diff.get("type")}</td>
<td class="field">${diff.get("field", "")}</td>
<td class="val">${diff.get("val1", "")}</td>
<td class="val">${diff.get("val2", "")}</td>
<td class="testname">${diff.get("test_name")}</td>
</tr>
% endfor
</table>
<script type="text/javascript">
var col1_sort = 1, col2_sort = 1, col3_sort = 1; col4_sort = 1; col5_sort = 1;
sort_table("results_table", 4, col5_sort);
col5_sort *= -1;
</script>
</body>
</html>

View File

@ -31,41 +31,41 @@ class VerifyCommandsTestCase(test.TestCase):
self.verify = verify.VerifyCommands()
self.image1 = mock.Mock()
self.image1.name = 'cirros-1'
self.image1.id = 'fake_image_id_1'
self.image1.name = "cirros-1"
self.image1.id = "fake_image_id_1"
self.image2 = mock.Mock()
self.image2.id = 'fake_image_id_2'
self.image2.name = 'cirros-2'
self.image2.id = "fake_image_id_2"
self.image2.name = "cirros-2"
self.flavor1 = mock.Mock()
self.flavor2 = mock.Mock()
self.flavor1.id = 'fake_flavor_id_1'
self.flavor2.id = 'fake_flavor_id_2'
self.flavor1.id = "fake_flavor_id_1"
self.flavor2.id = "fake_flavor_id_2"
self.flavor1.ram = 128
self.flavor2.ram = 64
@mock.patch('rally.osclients.Clients')
@mock.patch('rally.orchestrator.api.verify')
@mock.patch("rally.osclients.Clients")
@mock.patch("rally.orchestrator.api.verify")
def test_start(self, mock_verify, mock_clients):
deploy_id = '0fba91c6-82d5-4ce1-bd00-5d7c989552d9'
deploy_id = "0fba91c6-82d5-4ce1-bd00-5d7c989552d9"
mock_clients().glance().images.list.return_value = [
self.image1, self.image2]
mock_clients().nova().flavors.list.return_value = [
self.flavor1, self.flavor2]
self.verify.start(deploy_id=deploy_id)
default_set_name = 'smoke'
default_set_name = "smoke"
default_regex = None
mock_verify.assert_called_once_with(deploy_id,
default_set_name, default_regex,
None)
@mock.patch('rally.osclients.Clients')
@mock.patch('rally.orchestrator.api.verify')
@mock.patch("rally.osclients.Clients")
@mock.patch("rally.orchestrator.api.verify")
def test_start_with_user_specified_tempest_config(self, mock_verify,
mock_clients):
deploy_id = '0fba91c6-82d5-4ce1-bd00-5d7c989552d9'
deploy_id = "0fba91c6-82d5-4ce1-bd00-5d7c989552d9"
mock_clients().glance().images.list.return_value = [
self.image1, self.image2]
mock_clients().nova().flavors.list.return_value = [
@ -73,7 +73,7 @@ class VerifyCommandsTestCase(test.TestCase):
tempest_config = tempfile.NamedTemporaryFile()
self.verify.start(deploy_id=deploy_id,
tempest_config=tempest_config.name)
default_set_name = 'smoke'
default_set_name = "smoke"
default_regex = None
mock_verify.assert_called_once_with(deploy_id,
@ -81,114 +81,212 @@ class VerifyCommandsTestCase(test.TestCase):
tempest_config.name)
tempest_config.close()
@mock.patch('rally.orchestrator.api.verify')
@mock.patch("rally.orchestrator.api.verify")
def test_start_with_wrong_set_name(self, mock_verify):
deploy_id = 'f2009aae-6ef3-468e-96b2-3c987d584010'
deploy_id = "f2009aae-6ef3-468e-96b2-3c987d584010"
wrong_set_name = 'unexpected_value'
wrong_set_name = "unexpected_value"
self.verify.start(deploy_id, wrong_set_name)
self.assertNotIn(wrong_set_name, consts.TEMPEST_TEST_SETS)
self.assertFalse(mock_verify.called)
@mock.patch('rally.openstack.common.cliutils.print_list')
@mock.patch('rally.db.verification_list')
@mock.patch("rally.openstack.common.cliutils.print_list")
@mock.patch("rally.db.verification_list")
def test_list(self, mock_db_verification_list, mock_print_list):
fields = ['UUID', 'Deployment UUID', 'Set name', 'Tests', 'Failures',
'Created at', 'Status']
verifications = {'dummy': []}
fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures",
"Created at", "Status"]
verifications = {"dummy": []}
mock_db_verification_list.return_value = verifications
self.verify.list()
mock_db_verification_list.assert_called_once_with()
mock_print_list.assert_called_once_with(verifications, fields,
sortby_index=fields.index(
'Created at'))
"Created at"))
@mock.patch('rally.openstack.common.cliutils.print_list')
@mock.patch('rally.db.verification_get')
@mock.patch('rally.db.verification_result_get')
@mock.patch('rally.objects.Verification')
@mock.patch("rally.openstack.common.cliutils.print_list")
@mock.patch("rally.db.verification_get")
@mock.patch("rally.db.verification_result_get")
@mock.patch("rally.objects.Verification")
def test_show(self, mock_obj_verification,
mock_verification_result_get, mock_verification_get,
mock_print_list):
class Test_dummy():
data = {'test_cases': {'test_a': {'name': 'test_a', 'time': 20,
'status': 'PASS'},
'test_b': {'name': 'test_b', 'time': 20,
'status': 'SKIP'},
'test_c': {'name': 'test_c', 'time': 20,
'status': 'FAIL'}}}
data = {"test_cases": {"test_a": {"name": "test_a", "time": 20,
"status": "PASS"},
"test_b": {"name": "test_b", "time": 20,
"status": "SKIP"},
"test_c": {"name": "test_c", "time": 20,
"status": "FAIL"}}}
verification_id = '39121186-b9a4-421d-b094-6c6b270cf9e9'
total_fields = ['UUID', 'Deployment UUID', 'Set name', 'Tests',
'Failures', 'Created at', 'Status']
fields = ['name', 'time', 'status']
verification_id = "39121186-b9a4-421d-b094-6c6b270cf9e9"
total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
"Failures", "Created at", "Status"]
fields = ["name", "time", "status"]
verification = mock.MagicMock()
tests = Test_dummy()
mock_verification_result_get.return_value = tests
mock_verification_get.return_value = verification
mock_obj_verification.return_value = 1
values = map(objects.Verification,
six.itervalues(tests.data['test_cases']))
six.itervalues(tests.data["test_cases"]))
self.verify.show(verification_id)
mock_print_list.assert_any_call(
[verification], fields=total_fields)
mock_print_list.assert_any_call([verification], fields=total_fields)
mock_verification_get.assert_called_once_with(verification_id)
mock_verification_result_get.assert_called_once_with(verification_id)
mock_print_list.assert_any_call(values, fields, sortby_index=0)
@mock.patch('rally.db.verification_result_get', return_value={'data': {}})
@mock.patch('json.dumps')
@mock.patch("rally.db.verification_result_get", return_value={"data": {}})
@mock.patch("json.dumps")
def test_results(self, mock_json_dumps, mock_db_result_get):
verification_uuid = 'a0231bdf-6a4e-4daf-8ab1-ae076f75f070'
self.verify.results(verification_uuid, output_json=True)
verification_uuid = "a0231bdf-6a4e-4daf-8ab1-ae076f75f070"
self.verify.results(verification_uuid, output_html=False,
output_json=True)
mock_db_result_get.assert_called_once_with(verification_uuid)
mock_json_dumps.assert_called_once_with({}, sort_keys=True, indent=4)
@mock.patch('rally.db.verification_result_get')
@mock.patch("rally.db.verification_result_get")
def test_results_verification_not_found(self, mock_db_result_get):
verification_uuid = '9044ced5-9c84-4666-8a8f-4b73a2b62acb'
verification_uuid = "9044ced5-9c84-4666-8a8f-4b73a2b62acb"
mock_db_result_get.side_effect = exceptions.NotFoundException()
self.assertEqual(self.verify.results(verification_uuid), 1)
self.assertEqual(self.verify.results(verification_uuid,
output_html=False,
output_json=True), 1)
mock_db_result_get.assert_called_once_with(verification_uuid)
@mock.patch('rally.cmd.commands.verify.open', create=True)
@mock.patch('rally.db.verification_result_get', return_value={'data': {}})
@mock.patch("rally.cmd.commands.verify.open", create=True)
@mock.patch("rally.db.verification_result_get", return_value={"data": {}})
def test_results_with_output_json_and_output_file(self,
mock_db_result_get,
mock_open):
mock_open.return_value = mock.MagicMock()
verification_uuid = '94615cd4-ff45-4123-86bd-4b0741541d09'
self.verify.results(verification_uuid, output_file='results',
output_json=True)
verification_uuid = "94615cd4-ff45-4123-86bd-4b0741541d09"
self.verify.results(verification_uuid, output_file="results",
output_html=False, output_json=True)
mock_db_result_get.assert_called_once_with(verification_uuid)
mock_open.assert_called_once_with('results', 'wb')
mock_open.assert_called_once_with("results", "wb")
fake_file = mock_open.return_value.__enter__.return_value
fake_file.write.assert_called_once_with('{}')
fake_file.write.assert_called_once_with("{}")
@mock.patch('rally.cmd.commands.verify.open', create=True)
@mock.patch('rally.db.verification_result_get')
@mock.patch('rally.verification.verifiers.tempest.json2html.main',
return_value='')
@mock.patch("rally.cmd.commands.verify.open", create=True)
@mock.patch("rally.db.verification_result_get")
@mock.patch("rally.verification.verifiers.tempest.json2html.main",
return_value="")
def test_results_with_output_html_and_output_file(self,
mock_json2html_main,
mock_db_result_get,
mock_open):
mock_open.return_value = mock.MagicMock()
verification_uuid = '7140dd59-3a7b-41fd-a3ef-5e3e615d7dfa'
verification_uuid = "7140dd59-3a7b-41fd-a3ef-5e3e615d7dfa"
fake_data = {}
results = {'data': fake_data}
results = {"data": fake_data}
mock_db_result_get.return_value = results
self.verify.results(verification_uuid, output_html=True,
output_file='results')
output_json=False, output_file="results")
mock_db_result_get.assert_called_once_with(verification_uuid)
mock_json2html_main.assert_called_once_with(fake_data)
mock_open.assert_called_once_with('results', 'wb')
mock_open.assert_called_once_with("results", "wb")
fake_file = mock_open.return_value.__enter__.return_value
fake_file.write.assert_called_once_with('')
fake_file.write.assert_called_once_with("")
@mock.patch("rally.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
@mock.patch("json.dumps")
def test_compare(self, mock_json_dumps, mock_db_result_get):
uuid1 = "8eda1b10-c8a4-4316-9603-8468ff1d1560"
uuid2 = "f6ef0a98-1b18-452f-a6a7-922555c2e326"
self.verify.compare(uuid1, uuid2, output_csv=False, output_html=False,
output_json=True)
fake_data = []
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_db_result_get.assert_has_calls(calls, True)
mock_json_dumps.assert_called_once_with(fake_data, sort_keys=True,
indent=4)
@mock.patch("rally.db.verification_result_get",
side_effect=exceptions.NotFoundException())
def test_compare_verification_not_found(self, mock_db_result_get):
uuid1 = "f7dc82da-31a6-4d40-bbf8-6d366d58960f"
uuid2 = "2f8a05f3-d310-4f02-aabf-e1165aaa5f9c"
self.assertEqual(self.verify.compare(uuid1, uuid2, output_csv=False,
output_html=False,
output_json=True), 1)
mock_db_result_get.assert_called_once_with(uuid1)
@mock.patch("rally.cmd.commands.verify.open", create=True)
@mock.patch("rally.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
def test_compare_with_output_csv_and_output_file(self,
mock_db_result_get,
mock_open):
fake_string = "Type,Field,Value 1,Value 2,Test Name\r\n"
uuid1 = "5e744557-4c3a-414f-9afb-7d3d8708028f"
uuid2 = "efe1c74d-a632-476e-bb6a-55a9aa9cf76b"
self.verify.compare(uuid1, uuid2, output_file="results",
output_csv=True, output_html=False,
output_json=False)
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_db_result_get.assert_has_calls(calls, True)
mock_open.assert_called_once_with("results", "wb")
fake_file = mock_open.return_value.__enter__.return_value
fake_file.write.assert_called_once_with(fake_string)
@mock.patch("rally.cmd.commands.verify.open", create=True)
@mock.patch("rally.db.verification_result_get",
return_value={"data": {"test_cases": {}}})
def test_compare_with_output_json_and_output_file(self,
mock_db_result_get,
mock_open):
fake_json_string = "[]"
uuid1 = "0505e33a-738d-4474-a611-9db21547d863"
uuid2 = "b1908417-934e-481c-8d23-bc0badad39ed"
self.verify.compare(uuid1, uuid2, output_file="results",
output_csv=False, output_html=False,
output_json=True)
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_db_result_get.assert_has_calls(calls, True)
mock_open.assert_called_once_with("results", "wb")
fake_file = mock_open.return_value.__enter__.return_value
fake_file.write.assert_called_once_with(fake_json_string)
@mock.patch("rally.cmd.commands.verify.open", create=True)
@mock.patch("rally.db.verification_result_get")
@mock.patch(("rally.verification.verifiers.tempest."
"compare2html.create_report"), return_value="")
def test_compare_with_output_html_and_output_file(self,
mock_compare2html_create,
mock_db_result_get,
mock_open):
uuid1 = "cdf64228-77e9-414d-9d4b-f65e9d62c61f"
uuid2 = "39393eec-1b45-4103-8ec1-631edac4b8f0"
results = {"data": {"test_cases": {}}}
fake_data = []
self.verify.compare(uuid1, uuid2,
output_file="results",
output_csv=False, output_html=True,
output_json=False)
mock_db_result_get.return_value = results
calls = [mock.call(uuid1),
mock.call(uuid2)]
mock_db_result_get.assert_has_calls(calls, True)
mock_compare2html_create.assert_called_once_with(fake_data)
mock_open.assert_called_once_with("results", "wb")
fake_file = mock_open.return_value.__enter__.return_value
fake_file.write.assert_called_once_with("")

View File

@ -0,0 +1,39 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.verification.verifiers.tempest import compare2html
from tests.unit import test
class Compare2HtmlTestCase(test.TestCase):
def test_main(self):
results = [{'val2': 0.0111, 'field': u'time', 'val1': 0.0222,
'type': 'CHANGED', 'test_name': u'test.one'},
{'val2': 0.111, 'field': u'time', 'val1': 0.222,
'type': 'CHANGED', 'test_name': u'test.two'},
{'val2': 1.11, 'field': u'time', 'val1': 2.22,
'type': 'CHANGED', 'test_name': u'test.three'}]
fake_kw = {"heading":
{"title": compare2html.__title__,
"description": compare2html.__description__,
"parameters": [("Difference Count", len(results))]
},
"generator": "compare2html %s" % compare2html.__version__,
"results": results}
with mock.patch('mako.template.Template') as mock_mako:
compare2html.create_report(results)
mock_mako().render.assert_called_once_with(**fake_kw)

View File

@ -0,0 +1,76 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.verification.verifiers.tempest import diff
from tests.unit import test
class DiffTestCase(test.TestCase):
def test_main(self):
results1 = {'test.NONE': {'name': 'test.NONE',
'output': 'test.NONE',
'status': 'SKIPPED',
'time': 0.000},
'test.one': {'name': 'test.one',
'output': 'test.one',
'status': 'OK',
'time': 0.111},
'test.two': {'name': 'test.two',
'output': 'test.two',
'status': 'OK',
'time': 0.222},
'test.three': {'name': 'test.three',
'output': 'test.three',
'status': 'FAILED',
'time': 0.333},
'test.four': {'name': 'test.four',
'output': 'test.four',
'status': 'OK',
'time': 0.444},
'test.five': {'name': 'test.five',
'output': 'test.five',
'status': 'OK',
'time': 0.555}
}
results2 = {'test.one': {'name': 'test.one',
'output': 'test.one',
'status': 'FAIL',
'time': 0.1111},
'test.two': {'name': 'test.two',
'output': 'test.two',
'status': 'OK',
'time': 0.222},
'test.three': {'name': 'test.three',
'output': 'test.three',
'status': 'OK',
'time': 0.3333},
'test.four': {'name': 'test.four',
'output': 'test.four',
'status': 'FAIL',
'time': 0.4444},
'test.five': {'name': 'test.five',
'output': 'test.five',
'status': 'OK',
'time': 0.555},
'test.six': {'name': 'test.six',
'output': 'test.six',
'status': 'OK',
'time': 0.666}
}
diff_ = diff.Diff(results1, results2, 0)
assert len(diff_.diffs) == 8
assert diff_.to_csv() != ''
assert diff_.to_html() != ''
assert diff_.to_json() != ''

View File

@ -27,6 +27,7 @@ _rally()
OPTS["show_keypairs"]="--deploy-id"
OPTS["show_networks"]="--deploy-id"
OPTS["show_secgroups"]="--deploy-id"
OPTS["verify_compare"]="--uuid-1 --uuid-2 --csv --html --json --output-file --threshold"
OPTS["verify_detailed"]="--uuid --sort-by"
OPTS["verify_list"]=""
OPTS["verify_results"]="--uuid --html --json --output-file"