Check verification component refactoring

Change-Id: Icbfc6173e5c2a45cc7779e0ae3b37b9910b803a5
This commit is contained in:
Yaroslav Lobankov 2016-12-07 15:57:28 +04:00
parent 01a1b5fc6e
commit 3821640bf2
2 changed files with 180 additions and 198 deletions

View File

@ -1,6 +1,6 @@
{% extends "/base.html" %}
{% block title_text %}Rally Verification job results{% endblock %}
{% block title_text %}Rally Verification Job Results{% endblock %}
{% block css %}
li { margin:2px 0 }
@ -12,8 +12,8 @@
.columns li { position:relative }
.columns li > :first-child { display:block }
.columns li > :nth-child(2) { display:block; position:static; left:165px; top:0; white-space:nowrap }
.fail {color: red; text-transform: uppercase}
.pass {color: green; display: none; text-transform: uppercase}
.fail {color: red}
.success {color: green}
{% endblock %}
{% block css_content_wrap %}margin:0 auto; padding:0 5px{% endblock %}
@ -25,108 +25,92 @@
@media only screen and (min-width: 720px) { .content-wrap { width:70% } }
{% endblock %}
{% block header_text %}Verify job results{% endblock %}
{% block header_text %}Verify Job Results{% endblock %}
{% block content %}
<h2>Job Logs and Job Result files</h2>
<h2>Logs and Results Files</h2>
<ul class="columns">
<li><a href="console.html" class="rich">Job logs</a> <code>console.html</code>
<li><a href="console.html">Job logs</a> <code>console.html</code>
<li><a href="logs/">Logs of all services</a> <code>logs/</code>
<li><a href="rally-verify/">Results files</a> <code>rally-verify/</code>
</ul>
<h2>Job Steps and Results</h2>
<h3>Table</h3>
<ul>
<li>Tempest Management</li>
<li>Launch verification(s)</li>
{% if compare %}
<li>Compare two verification results</li>
{% endif %}
<li>List all verifications</li>
</ul>
<h2>Steps</h2>
Each job step has output in all supported formats.
<span class="{{ list_plugins.status }}">[{{ list_plugins.status }}]</span>
<a href="{{ list_plugins.stdout_file }}">List plugins for verifiers management</a>
<code>$ {{ list_plugins.cmd }}</code><br>
<h4>Details</h4>
<span class="{{ install.status }}">[{{ install.status }}]</span>
<a href="{{ install.stdout_file }}">Tempest installation</a>
<code>$ {{ install.cmd }}</code>
<span class="{{ create_verifier.status }}">[{{ create_verifier.status }}]</span>
<a href="{{ create_verifier.stdout_file }}">Create a verifier</a>
<code>$ {{ create_verifier.cmd }}</code>
<span class="{{ list_verifiers.status }}">[{{ list_verifiers.status }}]</span>
<a href="{{ list_verifiers.stdout_file }}">List verifiers</a>
<code>$ {{ list_verifiers.cmd }}</code>
<span class="{{ update_verifier.status }}">[{{ update_verifier.status }}]</span>
<a href="{{ update_verifier.stdout_file }}">Switch the verifier to the penultimate version </a>
<code>$ {{ update_verifier.cmd }}</code>
<span class="{{ configure_verifier.status }}">[{{ configure_verifier.status }}]</span>
<a href="{{ configure_verifier.stdout_file }}">Generate and show the verifier config file</a>
<code>$ {{ configure_verifier.cmd }}</code><br>
<span class="{{ reinstall.status }}">[{{ reinstall.status }}]</span>
<a href="{{ reinstall.stdout_file }}">Tempest re-installation</a>
<code>$ {{ reinstall.cmd }}</code>
<span class="{{ add_verifier_ext.status }}">[{{ add_verifier_ext.status }}]</span>
<a href="{{ add_verifier_ext.stdout_file }}">Add a verifier extension</a>
<code>$ {{ add_verifier_ext.cmd }}</code>
<span class="{{ list_verifier_exts.status }}">[{{ list_verifier_exts.status }}]</span>
<a href="{{ list_verifier_exts.stdout_file }}">List verifier extensions</a>
<code>$ {{ list_verifier_exts.cmd }}</code><br>
<span class="{{ installplugin.status }}">[{{ installplugin.status }}]</span>
<a href="{{ installplugin.stdout_file }}">Tempest plugin installation</a>
<code>$ {{ installplugin.cmd }}</code>
<span class="{{ listplugins.status }}">[{{ listplugins.status }}]</span>
<a href="{{ listplugins.stdout_file }}">List installed Tempest plugins</a>
<code>$ {{ listplugins.cmd }}</code>
<span class="{{ discover.status }}">[{{ discover.status }}]</span>
<a href="{{ discover.stdout_file }}">Discovering tests</a>
<code>$ {{ discover.cmd }}</code>
<span class="{{ genconfig.status }}">[{{ genconfig.status }}]</span>
<a href="{{ genconfig.stdout_file }}">Tempest config generation</a>
<code>$ {{ genconfig.cmd }}</code>
<span class="{{ showconfig.status }}">[{{ showconfig.status}}]</span>
<a href="{{ showconfig.stdout_file }}">Show tempest config</a>
<code>$ {{ showconfig.cmd }}</code>
<span class="{{ list_verifier_tests.status }}">[{{ list_verifier_tests.status }}]</span>
<a href="{{ list_verifier_tests.stdout_file }}">List verifier tests</a>
<code>$ {{ list_verifier_tests.cmd }}</code><br>
{% for i in range(verifications|length) %}
{% if verifications|length > 1 %}
<br>Verification # {{ i + 1}}
<b>Verification # {{ i + 1}}</b><p>
<div style="padding-left:2em">
{% endif %}
<span class="{{ verifications[i].status }}">[{{ verifications[i].status }}]</span>
<a href="{{ verifications[i].stdout_file }}">Start verification</a>
<code>$ {{ verifications[i].cmd }}</code><br>
<span class="{{ verifications[i].show.status }}">[{{ verifications[i].show.status }}]</span>
<a href="{{ verifications[i].show.stdout_file }}">Show verification results</a>
<code>$ {{ verifications[i].show.cmd }}</code>
<span class="{{ verifications[i].show_detailed.status }}">[{{ verifications[i].show_detailed.status }}]</span>
<a href="{{ verifications[i].show_detailed.stdout_file }}">Show verification results with details</a>
<code>$ {{ verifications[i].show_detailed.cmd }}</code><br>
<span class="{{ verifications[i].json.status }}">[{{ verifications[i].json.status }}]</span>
<a href="{{ verifications[i].json.output_file }}">Generate the verification report in JSON format</a> [<a href="{{ verifications[i].json.stdout_file }}">Output from CLI</a>]
<code>$ {{ verifications[i].json.cmd }}</code>
<span class="{{ verifications[i].html.status }}">[{{ verifications[i].html.status }}]</span>
<a href="{{ verifications[i].html.output_file }}">Generate the verification report in HTML format</a> [<a href="{{ verifications[i].html.stdout_file }}">Output from CLI</a>]
<code>$ {{ verifications[i].html.cmd }}</code><br>
{% if verifications|length > 1 %}
</div>
{% endif %}
<ol>
<li>
<span class="{{ verifications[i].status }}">[{{ verifications[i].status }}]</span>
<a href="{{ verifications[i].stdout_file }}">Launch of verification</a>
<code>$ {{ verifications[i].cmd }}</code>
</li>
<li>
<span class="{{ verifications[i].result_in_html.status }}">[{{ verifications[i].result_in_html.status }}]</span>
<a href="{{ verifications[i].result_in_html.output_file }}">Display raw results in HTML</a> [<a href="{{ verifications[i].result_in_html.stdout_file }}">Output from CLI</a>]
<code>$ {{ verifications[i].result_in_html.cmd }}</code>
</li>
<li>
<span class="{{ verifications[i].result_in_json.status }}">[{{ verifications[i].result_in_json.status }}]</span>
<a href="{{ verifications[i].result_in_json.output_file }}">Display raw results in JSON</a> [<a href="{{ verifications[i].result_in_json.stdout_file }}">Output from CLI</a>]
<code>$ {{ verifications[i].result_in_json.cmd }}</code>
</li>
<li>
<span class="{{ verifications[i].show.status }}">[{{ verifications[i].show.status }}]</span>
<a href="{{ verifications[i].show.stdout_file }}">Display results table of the verification</a>
<code>$ {{ verifications[i].show.cmd }}</code>
</li>
<li>
<span class="{{ verifications[i].show_detailed.status }}">[{{ verifications[i].show_detailed.status }}]</span>
<a href="{{ verifications[i].show_detailed.stdout_file }}">Display results table of the verification with detailed errors</a><br />
<code>$ {{ verifications[i].show_detailed.cmd }}</code>
</li>
</ol>
{% endfor %}
{% if compare %}
<span class="{{ compare.html.status }}">[{{ compare.html.status }}]</span>
<a href="{{ compare.html.output_file }}">Compare two verifications and display results in HTML</a> [<a href="{{ compare.html.stdout_file }}">Output from CLI</a>]
<code>$ rally verify results --uuid &lt;uuid-1&gt; &lt;uuid&gt; --html</code>
<span class="{{ compare.json.status }}">[{{ compare.json.status }}]</span>
<a href="{{ compare.json.output_file }}">Compare two verifications and display results in JSON</a> [<a href="{{ compare.json.stdout_file }}">Output from CLI</a>]
<code>$ rally verify results --uuid &lt;uuid-1&gt; &lt;uuid-2&gt; --json</code>
<span class="{{ compare.csv.status }}">[{{ compare.csv.status }}]</span>
<a href="{{ compare.csv.output_file }}">Compare two verifications and display results in CSV</a> [<a href="{{ compare.csv.stdout_file }}">Output from CLI</a>]
<code>$ rally verify results --uuid &lt;uuid-1&gt; &lt;uuid-2&gt; --csv</code>
<a href="{{ compare.json.output_file }}">Generate the trends report for two verifications in JSON format</a> [<a href="{{ compare.json.stdout_file }}">Output from CLI</a>]
<code>$ {{ compare.json.cmd }}</code>
<span class="{{ compare.html.status }}">[{{ compare.html.status }}]</span>
<a href="{{ compare.html.output_file }}">Generate the trends report for two verifications in HTML format</a> [<a href="{{ compare.html.stdout_file }}">Output from CLI</a>]
<code>$ {{ compare.html.cmd }}</code><br>
{% endif %}
<span class="{{ list.status }}">[{{ list.status }}]</span>
<a href="{{ list.stdout_file }}">List of all verifications</a>
<code>$ {{ list.cmd }}</code>
<a href="{{ list.stdout_file }}">List verifications</a>
<code>$ {{ list.cmd }}</code><br>
<span class="{{ delete_verifier_ext.status }}">[{{ delete_verifier_ext.status }}]</span>
<a href="{{ delete_verifier_ext.stdout_file }}">Delete the verifier extension</a>
<code>$ {{ delete_verifier_ext.cmd }}</code>
<span class="{{ delete_verifier.status }}">[{{ delete_verifier.status }}]</span>
<a href="{{ delete_verifier.stdout_file }}">Delete the verifier and all verifications</a>
<code>$ {{ delete_verifier.cmd }}</code>
<h2>About Rally</h2>
<p>Rally is benchmarking and verification system for OpenStack:</p>
@ -136,6 +120,5 @@
<li><a href="https://rally.readthedocs.org/en/latest/">Documentation</a>
<li><a href="https://wiki.openstack.org/wiki/Rally/HowTo">How to use Rally (locally)</a>
<li><a href="https://wiki.openstack.org/wiki/Rally/RallyGates">How to add Rally job to your project</a>
<li><a href="https://www.mirantis.com/blog/rally-openstack-tempest-testing-made-simpler/">Rally: OpenStack Tempest Testing Made Simple(r) [a little outdated blog-post, but contains basic of Rally verification]</a>
</ul>
{% endblock %}

View File

@ -21,8 +21,6 @@ import subprocess
import sys
import uuid
import yaml
from rally.cli import envutils
from rally.common import objects
from rally import osclients
@ -31,22 +29,22 @@ from rally.ui import utils
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
MODES_PARAMETERS = {
"full": "--set full",
"light": "--set smoke"
}
BASE_DIR = "rally-verify"
EXPECTED_FAILURES_FILE = "expected_failures.yaml"
EXPECTED_FAILURES = {
MODES = {"full": "--pattern set=full", "light": "--pattern set=smoke"}
DEPLOYMENT_NAME = "devstack"
VERIFIER_TYPE = "tempest"
VERIFIER_SOURCE = "https://git.openstack.org/openstack/tempest"
VERIFIER_EXT_REPO = "https://git.openstack.org/openstack/keystone"
VERIFIER_EXT_NAME = "keystone_tests"
SKIPPED_TESTS = (
"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON."
"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]: "
"This test was skipped intentionally")
XFAILED_TESTS = (
"tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON."
"test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]":
"This test fails because 'novnc' console type is unavailable."
}
TEMPEST_PLUGIN = "https://git.openstack.org/openstack/keystone"
"test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]: "
"This test fails because 'novnc' console type is unavailable")
# NOTE(andreykurilin): this variable is used to generate output file names
# with prefix ${CALL_COUNT}_ .
@ -57,45 +55,40 @@ _return_status = 0
def call_rally(cmd, print_output=False, output_type=None):
"""Execute a Rally command and write result in files."""
global _return_status
global _call_count
_call_count += 1
data = {"cmd": "rally --rally-debug %s" % cmd}
stdout_file = "{base}/{prefix}_{cmd}.txt.gz"
stdout_file = "{base_dir}/{prefix}_{cmd}.txt.gz"
if "--xfails-file" in cmd or "--source" in cmd:
cmd_items = cmd.split()
for num, item in enumerate(cmd_items):
if EXPECTED_FAILURES_FILE in item or TEMPEST_PLUGIN in item:
cmd_items[num] = os.path.basename(item)
break
cmd = " ".join(cmd_items)
data.update({"stdout_file": stdout_file.format(base=BASE_DIR,
cmd = cmd.replace("/", "_")
data.update({"stdout_file": stdout_file.format(base_dir=BASE_DIR,
prefix=_call_count,
cmd=cmd.replace(" ", "_"))})
if output_type:
data["output_file"] = data["stdout_file"].replace(
".txt.", ".%s." % output_type)
data["cmd"] += " --%(type)s --output-file %(file)s" % {
"type": output_type, "file": data["output_file"]}
data["cmd"] += " --file %s" % data["output_file"]
if output_type == "html":
data["cmd"] += " --html"
try:
LOG.info("Try to launch `%s`." % data["cmd"])
stdout = subprocess.check_output(data["cmd"], shell=True,
LOG.info("Try to execute `%s`." % data["cmd"])
stdout = subprocess.check_output(data["cmd"].split(),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.error("Command `%s` is failed." % data["cmd"])
LOG.error("Command `%s` failed." % data["cmd"])
stdout = e.output
data["status"] = "fail"
_return_status = 1
else:
data["status"] = "pass"
data["status"] = "success"
if output_type:
# lets gzip results
# let's gzip results
with open(data["output_file"]) as f:
output = f.read()
with gzip.open(data["output_file"], "wb") as f:
@ -112,39 +105,34 @@ def call_rally(cmd, print_output=False, output_type=None):
return data
def create_file_with_xfails():
"""Create a YAML file with a list of tests that are expected to fail."""
with open(os.path.join(BASE_DIR, EXPECTED_FAILURES_FILE), "wb") as f:
yaml.dump(EXPECTED_FAILURES, f, default_flow_style=False)
return os.path.join(os.getcwd(), BASE_DIR, EXPECTED_FAILURES_FILE)
def launch_verification_once(launch_parameters):
"""Launch verification and show results in different formats."""
results = call_rally("verify start %s" % launch_parameters)
def start_verification(args):
"""Start a verification, show results and generate reports."""
results = call_rally("verify start %s" % args)
results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION)
results["result_in_html"] = call_rally("verify results",
output_type="html")
results["result_in_json"] = call_rally("verify results",
output_type="json")
results["show"] = call_rally("verify show")
results["show_detailed"] = call_rally("verify show --detailed")
for ot in ("json", "html"):
results[ot] = call_rally("verify report", output_type=ot)
# NOTE(andreykurilin): we need to clean verification uuid from global
# environment to be able to load it next time(for another verification).
envutils.clear_global(envutils.ENV_VERIFICATION)
return results
def do_compare(uuid_1, uuid_2):
"""Compare and save results in different formats."""
def write_file(filename, data):
"""Create a file and write some data to it."""
path = os.path.join(BASE_DIR, filename)
with open(path, "wb") as f:
f.write(data)
return path
def generate_trends_reports(uuid_1, uuid_2):
"""Generate trends reports."""
results = {}
for output_format in ("csv", "html", "json"):
cmd = "verify results --uuid %(uuid-1)s %(uuid-2)s" % {
"uuid-1": uuid_1,
"uuid-2": uuid_2
}
results[output_format] = call_rally(cmd, output_type=output_format)
for ot in ("json", "html"):
results[ot] = call_rally(
"verify report --uuid %s %s" % (uuid_1, uuid_2), output_type=ot)
return results
@ -155,35 +143,29 @@ def render_page(**render_vars):
def main():
# NOTE(andreykurilin): We need to stop checking verification component to
# be able to split forthcoming redesign by several patches.
return 0
parser = argparse.ArgumentParser(description="Launch rally-verify job.")
parser.add_argument(
"--mode",
type=str,
default="light",
help="Mode of job. The 'full' mode corresponds to the full set of "
"Tempest tests. The 'light' mode corresponds to the smoke set "
"of Tempest tests.",
choices=MODES_PARAMETERS.keys())
parser.add_argument(
"--compare",
action="store_true",
help="Launch 2 verifications and compare them.")
parser.add_argument(
"--ctx-create-resources",
action="store_true",
help="Make Tempest context create needed resources for the tests.")
parser.add_argument("--mode", type=str, default="light",
help="Mode of job. The 'full' mode corresponds to the "
"full set of verifier tests. The 'light' mode "
"corresponds to the smoke set of verifier tests.",
choices=MODES.keys())
parser.add_argument("--compare", action="store_true",
help="Start the second verification to generate a "
"trends report for two verifications.")
# TODO(ylobankov): Remove hard-coded Tempest related things and make it
# configurable.
parser.add_argument("--ctx-create-resources", action="store_true",
help="Make Tempest context create needed resources "
"for the tests.")
args = parser.parse_args()
if not os.path.exists("%s/extra" % BASE_DIR):
os.makedirs("%s/extra" % BASE_DIR)
# Check deployment
call_rally("deployment use --deployment devstack", print_output=True)
call_rally("deployment check", print_output=True)
# Choose and check the deployment
call_rally("deployment use --deployment %s" % DEPLOYMENT_NAME)
call_rally("deployment check")
config = json.loads(
subprocess.check_output(["rally", "deployment", "config"]))
@ -234,58 +216,75 @@ def main():
"= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram))
clients.nova().flavors.create(**params)
render_vars = {"verifications": []}
render_vars = dict(verifications=[])
# Install the latest Tempest version
render_vars["install"] = call_rally("verify install")
# List plugins for verifiers management
render_vars["list_plugins"] = call_rally("verify list-plugins")
# Get Rally deployment ID
rally_deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT)
# Get the penultimate Tempest commit ID
tempest_dir = (
"/home/jenkins/.rally/tempest/for-deployment-%s" % rally_deployment_id)
tempest_commit_id = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:'%H'"],
cwd=tempest_dir).strip()
# Install the penultimate Tempest version
render_vars["reinstall"] = call_rally(
"verify reinstall --version %s" % tempest_commit_id)
# Create a verifier
render_vars["create_verifier"] = call_rally(
"verify create-verifier --type %s --name my-verifier --source %s"
% (VERIFIER_TYPE, VERIFIER_SOURCE))
# Install a Tempest plugin
render_vars["installplugin"] = call_rally(
"verify installplugin --source %s" % TEMPEST_PLUGIN)
# List verifiers
render_vars["list_verifiers"] = call_rally("verify list-verifiers")
# List installed Tempest plugins
render_vars["listplugins"] = call_rally("verify listplugins")
# Get verifier ID
verifier_id = envutils.get_global(envutils.ENV_VERIFIER)
# Get the penultimate verifier commit ID
repo_dir = os.path.join(
os.path.expanduser("~"),
".rally/verification/verifier-%s/repo" % verifier_id)
p_commit_id = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:%H"], cwd=repo_dir).strip()
# Switch the verifier to the penultimate version
render_vars["update_verifier"] = call_rally(
"verify update-verifier --version %s --update-venv" % p_commit_id)
# Discover tests depending on Tempest suite
discover_cmd = "verify discover"
if args.mode == "light":
discover_cmd += " --pattern smoke"
render_vars["discover"] = call_rally(discover_cmd)
# Generate and show the verifier config file
render_vars["configure_verifier"] = call_rally(
"verify configure-verifier --show")
# Generate and show Tempest config file
render_vars["genconfig"] = call_rally("verify genconfig")
render_vars["showconfig"] = call_rally("verify showconfig")
# Add a verifier extension
render_vars["add_verifier_ext"] = call_rally(
"verify add-verifier-ext --source %s" % VERIFIER_EXT_REPO)
# Create a file with a list of tests that are expected to fail
xfails_file_path = create_file_with_xfails()
# List verifier extensions
render_vars["list_verifier_exts"] = call_rally("verify list-verifier-exts")
# Launch verification
launch_params = "%s --xfails-file %s" % (
MODES_PARAMETERS[args.mode], xfails_file_path)
render_vars["verifications"].append(
launch_verification_once(launch_params))
# List verifier tests
render_vars["list_verifier_tests"] = call_rally(
"verify list-verifier-tests %s" % MODES[args.mode])
# Start a verification, show results and generate reports
skip_list_path = write_file("skip-list.yaml", SKIPPED_TESTS)
xfail_list_path = write_file("xfail-list.yaml", XFAILED_TESTS)
run_args = ("%s --skip-list %s --xfail-list %s"
% (MODES[args.mode], skip_list_path, xfail_list_path))
render_vars["verifications"].append(start_verification(run_args))
if args.compare:
render_vars["verifications"].append(
launch_verification_once(launch_params))
render_vars["compare"] = do_compare(
# Start another verification, show results and generate reports
with gzip.open(render_vars["list_verifier_tests"]["stdout_file"]) as f:
load_list_path = write_file("load-list.txt", f.read())
run_args = "--load-list %s" % load_list_path
render_vars["verifications"].append(start_verification(run_args))
# Generate trends reports for two verifications
render_vars["compare"] = generate_trends_reports(
render_vars["verifications"][-2]["uuid"],
render_vars["verifications"][-1]["uuid"])
# List verifications
render_vars["list"] = call_rally("verify list")
# Delete the verifier extension
render_vars["delete_verifier_ext"] = call_rally(
"verify delete-verifier-ext --name %s" % VERIFIER_EXT_NAME)
# Delete the verifier and all verifications
render_vars["delete_verifier"] = call_rally(
"verify delete-verifier --force")
render_page(**render_vars)
return _return_status