Configure gate-rally-dsvm-verify

Recently, we added new job "gate-rally-dsvm-verify" for Rally[1].
This job implied functional testing integration Rally with Tempest[2].

This patch adds tests/ci/rally-verify.sh script, which is used by new job.

rally-verify.sh does:
- tempest installation
- run "rally verify start" twice and print results
- compare results of two verifications
- list verifications
- generate html page based on results

To implement gate-rally-dsvm-verify some changes were requered to
existing Rally code:
- Added ability for rally/ui/utils.py to accept arguments to render html-pages
- Fixed logging debug-messages in tempest verifier
- Fixed check "is debug mode turned on or not"(also, added hacking rule for it)

TODO for future patches:
- add launch of rally task for Tempest
- add launch of random test set
- add check for successful tests

[1] https://review.openstack.org/#/c/137232
[2] https://www.mirantis.com/blog/rally-openstack-tempest-testing-made-simpler

Closes-Bug: #1400465
Closes-Bug: #1400518

Change-Id: I8e1fbab22c2da109bbc442f040fe259e5d22a62a
This commit is contained in:
Andrey Kurilin 2014-12-05 01:19:36 +02:00
parent cce2f680d2
commit 6c5e07e31c
20 changed files with 315 additions and 76 deletions

View File

@ -1,4 +1,3 @@
from oslo.config import cfg
from rally.benchmark.context import base
from rally import log as logging
@ -6,7 +5,6 @@ from rally import osclients
from rally import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@base.context(name="create_flavor", order=1000)
@ -56,7 +54,7 @@ class CreateFlavorContext(base.Context):
LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't create flavor: %s" % e.message
if CONF.debug:
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
@ -69,7 +67,7 @@ class CreateFlavorContext(base.Context):
LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't delete flavor: %s" % e.message
if CONF.debug:
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)

View File

@ -13,11 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import pecan
CONF = cfg.CONF
from rally import log as logging
def setup_app(config):
@ -29,19 +27,19 @@ def setup_app(config):
:return: A normal WSGI application, an instance of
:class:`pecan.Pecan`.
"""
app = pecan.Pecan(config.app.root, debug=CONF.debug)
app = pecan.Pecan(config.app.root, debug=logging.is_debug())
return app
def make_app():
config = {
'app': {
'root': 'rally.aas.rest.controllers.root.RootController',
'modules': ['rally.aas.rest'],
'debug': CONF.debug,
"app": {
"root": "rally.aas.rest.controllers.root.RootController",
"modules": ["rally.aas.rest"],
"debug": logging.is_debug(),
},
'wsme': {
'debug': CONF.debug,
"wsme": {
"debug": logging.is_debug(),
},
}
app = pecan.load_app(config)

View File

@ -15,8 +15,6 @@
import time
from oslo.config import cfg
from rally.benchmark.context.cleanup import base
from rally import broker
from rally.i18n import _
@ -25,7 +23,6 @@ from rally import osclients
from rally import utils as rutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -87,7 +84,7 @@ class SeekAndDestroy(object):
_("Resource deletion failed, max retries exceeded for "
"%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s")
% msg_kw)
if CONF.debug:
if logging.is_debug():
LOG.exception(e)
else:
started = time.time()

View File

@ -18,7 +18,6 @@ import collections
import random
import jsonschema
from oslo.config import cfg
from rally.benchmark.scenarios import base as scenario_base
from rally.benchmark import types
@ -73,7 +72,7 @@ def _run_scenario_once(args):
method_name)(**kwargs) or scenario_output
except Exception as e:
error = utils.format_exc(e)
if cfg.CONF.debug:
if logging.is_debug():
LOG.exception(e)
finally:
status = "Error %s: %s" % tuple(error[0:2]) if error else "OK"

View File

@ -17,13 +17,10 @@ import collections
import threading
import time
from oslo.config import cfg
from rally.i18n import _
from rally import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -50,7 +47,7 @@ def _consumer(consume, queue, is_published):
except Exception as e:
LOG.warning(_("Failed to consume a task from the queue: "
"%s") % e)
if CONF.debug:
if logging.is_debug():
LOG.exception(e)
elif is_published.isSet():
break
@ -73,7 +70,7 @@ def _publisher(publish, queue, is_published):
publish(queue)
except Exception as e:
LOG.warning(_("Failed to publish a task to the queue: %s") % e)
if CONF.debug:
if logging.is_debug():
LOG.exception(e)
finally:
is_published.set()

View File

@ -298,12 +298,12 @@ def run(argv, categories):
ret = fn(*fn_args, **fn_kwargs)
return(ret)
except (IOError, TypeError, exceptions.DeploymentNotFound) as e:
if CONF.debug:
if logging.is_debug():
raise
print(e)
return 1
except exceptions.TaskNotFound as e:
if CONF.debug:
if logging.is_debug():
LOG.exception(e)
print(e)
return 1

View File

@ -21,7 +21,6 @@ import os
import pprint
import webbrowser
from oslo.config import cfg
import yaml
from rally.benchmark.processing import plot
@ -33,6 +32,7 @@ from rally import consts
from rally import db
from rally import exceptions
from rally.i18n import _
from rally import log as logging
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
from rally.orchestrator import api
@ -205,7 +205,7 @@ class TaskCommands(object):
print("-" * 80)
verification = yaml.safe_load(task["verification_log"])
if not cfg.CONF.debug:
if not logging.is_debug():
print(verification[0])
print(verification[1])
print()

View File

@ -65,3 +65,7 @@ class RallyContextAdapter(oslogging.ContextAdapter):
def debug(self, msg, *args, **kwargs):
self.log(logging.RDEBUG, msg, *args, **kwargs)
def is_debug():
return CONF.debug or CONF.rally_debug

View File

@ -14,7 +14,6 @@
# under the License.
import jsonschema
from oslo.config import cfg
from rally.benchmark import engine
from rally import consts
@ -25,7 +24,6 @@ from rally import log as logging
from rally import objects
from rally.verification.verifiers.tempest import tempest
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -39,7 +37,7 @@ def create_deploy(config, name):
try:
deployment = objects.Deployment(name=name, config=config)
except exceptions.DeploymentNameExists as e:
if CONF.debug:
if logging.is_debug():
LOG.exception(e)
raise

View File

@ -34,6 +34,7 @@ from zaqarclient.queues import client as zaqar
from rally import consts
from rally import exceptions
from rally import log as logging
CONF = cfg.CONF
@ -144,7 +145,7 @@ class Clients(object):
region_name=self.endpoint.region_name)
client = nova.Client(version,
auth_token=kc.auth_token,
http_log_debug=CONF.debug,
http_log_debug=logging.is_debug(),
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
@ -203,7 +204,7 @@ class Clients(object):
def cinder(self, version='1'):
"""Return cinder client."""
client = cinder.Client(version, None, None,
http_log_debug=CONF.debug,
http_log_debug=logging.is_debug(),
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)

View File

@ -35,10 +35,15 @@ def get_template(template_path):
def main(*args):
if len(args) != 2 or args[0] != "render":
exit("Usage: utils.py render <lookup/path/to/template.mako>")
if len(args) < 2 or args[0] != "render":
exit("Usage: \n\t"
"utils.py render <lookup/path/to/template.mako> "
"<key-1>=<value-1> <key-2>=<value-2>\n"
"where key-1,value-1 and key-2,value-2 are key pairs of template")
try:
print(get_template(sys.argv[2]).render())
render_kwargs = dict([arg.split("=") for arg in args[2:]])
print(get_template(sys.argv[2]).render(**render_kwargs))
except mako.exceptions.TopLevelLookupException as e:
exit(e)

View File

@ -23,7 +23,6 @@ import StringIO
import sys
import time
from oslo.config import cfg
from oslo.utils import importutils
from sphinx.util import docstrings
@ -31,7 +30,6 @@ from rally import exceptions
from rally.i18n import _
from rally import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
JSON_SCHEMA = 'http://json-schema.org/draft-04/schema'
@ -201,7 +199,7 @@ def load_plugins(directory):
LOG.warning(
"\t Failed to load module with plugins %(path)s.py: %(e)s"
% {"path": fullpath, "e": e})
if CONF.debug:
if logging.is_debug():
LOG.exception(e)

View File

@ -45,8 +45,7 @@ class JsonOutput(testtools.TestResult):
self.results_file = results_file
def _format_result(self, name, time, status, output, failure=None):
if status == STATUS_SKIP:
# We do not need `setUpClass' in skipped test name
# We do not need `setUpClass' in test name
if name[:12] == "setUpClass (" and name[-1] == ")":
name = name[12:-1]

View File

@ -44,7 +44,8 @@ def check_output(*args, **kwargs):
LOG.debug("error output: '%s'" % e.output)
raise
LOG.debug(output)
if logging.is_debug():
print(output)
class Tempest(object):

View File

@ -0,0 +1,136 @@
## -*- coding: utf-8 -*-
<%inherit file="/base.mako"/>
<%block name="title_text">Rally Verification job results</%block>
<%block name="css">
li { margin:2px 0 }
a, a:visited { color:#039 }
code { padding:0 15px; color:#888; display: block }
.columns li { position:relative }
.columns li > :first-child { display:block }
.columns li > :nth-child(2) { display:block; position:static; left:165px; top:0; white-space:nowrap }
.fail {color: red; text-transform: uppercase}
.pass {color: green; display: none; text-transform: uppercase}
</%block>
<%block name="css_content_wrap">margin:0 auto; padding:0 5px</%block>
<%block name="media_queries">
@media only screen and (min-width: 320px) { .content-wrap { width:400px } }
@media only screen and (min-width: 520px) { .content-wrap { width:500px } }
@media only screen and (min-width: 620px) { .content-wrap { width:90% } .columns li > :nth-child(2) { position:absolute } }
@media only screen and (min-width: 720px) { .content-wrap { width:70% } }
</%block>
<%block name="header_text">Verify job results</%block>
<%block name="content">
<h2>Job Logs and Job Result files</h2>
<ul class="columns">
<li><a href="console.html" class="rich">Job logs</a> <code>console.html</code>
<li><a href="logs/">Logs of all services</a> <code>logs/</code>
<li><a href="rally-verify/">Results files</a> <code>rally-verify/</code>
</ul>
<h2>Job Steps and Results</h2>
<h3>Introduction</h3>
<ul>
<li>Install tempest</li>
<li>Launch two verifications ("compute" set is used)</li>
<li>List all verifications</li>
<li>Compare two verification results</li>
</ul>
Each job step has output in all supported formats.
<h3>Details</h3>
<span class="${install}">[${install}]</span>
<a href="rally-verify/tempest_installation.txt.gz">Tempest installation</a>
<code>$ rally-manage tempest install</code>
<br>First verification run
<ol>
<li>
<span class="${v1}">[${v1}]</span>
<a href="rally-verify/1_verification_compute_set.txt.gz">Launch of verification</a>
<code>$ rally verify start --set compute</code>
</li>
<li>
<span class="${vr_1_html}">[${vr_1_html}]</span>
<a href="rally-verify/1_verify_results.html.gz">Display raw results in HTML</a>
<code>$ rally verify results --html</code>
</li>
<li>
<span class="${vr_1_json}">[${vr_1_json}]</span>
<a href="rally-verify/1_verify_results.json.gz">Display raw results in JSON</a>
<code>$ rally verify results --json</code>
</li>
<li>
<span class="${vs_1}">[${vs_1}]</span>
<a href="rally-verify/1_verify_show.txt.gz">Display results table of the verification</a>
<code>$ rally verify show</code>
</li>
<li>
<span class="${vsd_1}">[${vsd_1}]</span>
<a href="rally-verify/1_verify_show_detailed.txt.gz">Display results table of the verification with detailed errors</a><br />
<code style="display: inline">$ rally verify show --detailed</code> or <code style="display: inline">$ rally verify detailed</code>
</li>
</ol>
Second verification run
<ol>
<li>
<span class="${v2}">[${v2}]</span>
<a href="rally-verify/2_verification_compute_set.txt.gz">Launch of verification</a>
<code>$ rally verify start --set compute</code>
</li>
<li>
<span class="${vr_2_html}">[${vr_2_html}]</span>
<a href="rally-verify/2_verify_results.html.gz">Display results in HTML</a>
<code>$ rally verify results --html</code>
</li>
<li>
<span class="${vr_2_json}">[${vr_2_json}]</span>
<a href="rally-verify/2_verify_results.json.gz">Display results in JSON</a>
<code>$ rally verify results --json</code>
</li>
<li>
<span class="${vs_2}">[${vs_2}]</span>
<a href="rally-verify/2_verify_show.txt.gz">Display table results of the verification</a>
<code>$ rally verify show</code>
</li>
<li>
<span class="${vsd_2}">[${vsd_2}]</span>
<a href="rally-verify/2_verify_show_detailed.txt.gz">Display table results of the verification with detailed errors</a><br />
<code style="display: inline">$ rally verify show --detailed</code> or <code style="display: inline">$ rally verify detailed</code>
</li>
</ol>
<span class="${l}">[${l}]</span>
<a href="rally-verify/verify_list.txt.gz">List of all verifications</a>
<code>$ rally verify list</code>
<span class="${c_html}">[${c_html}]</span>
<a href="rally-verify/compare_results.html.gz">Compare two verification and display results in HTML</a>
<code>$ rally verify compare --uuid-1 &lt;uuid-1&gt; --uuid-2 &lt;uuid-2&gt; --html</code>
<span class="${c_json}">[${c_json}]</span>
<a href="rally-verify/compare_results.json.gz">Compare two verifications and display results in JSON</a>
<code>$ rally verify compare --uuid-1 &lt;uuid-1&gt; --uuid-2 &lt;uuid-2&gt; --json</code>
<span class="${c_csv}">[${c_csv}]</span>
<a href="rally-verify/compare_results.csv.gz">Compare two verifications and display results in CSV</a>
<code>$ rally verify compare --uuid-1 &lt;uuid-1&gt; --uuid-2 &lt;uuid-2&gt; --csv</code>
<h2>About Rally</h2>
<p>Rally is benchmarking and verification system for OpenStack:</p>
<ul>
<li><a href="https://github.com/stackforge/rally">Git repository</a>
<li><a href="https://rally.readthedocs.org/en/latest/">Documentation</a>
<li><a href="https://wiki.openstack.org/wiki/Rally/HowTo">How to use Rally (locally)</a>
<li><a href="https://wiki.openstack.org/wiki/Rally/RallyGates">How to add Rally job to your project</a>
<li><a href="https://www.mirantis.com/blog/rally-openstack-tempest-testing-made-simpler/">Rally: OpenStack Tempest Testing Made Simple(r) [a little outdated blog-post, but contains basic of Rally verification]</a>
</ul>
</%block>

96
tests/ci/rally-verify.sh Executable file
View File

@ -0,0 +1,96 @@
#!/bin/bash -ex
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compli$OUT with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed by post_test_hook function in desvstack gate.
RESULTS_DIR="rally-verify"
env
set -o pipefail
set +e
mkdir -p ${RESULTS_DIR}/extra
# Check deployment
rally use deployment --deployment devstack
rally deployment check
function do_status {
if [[ ${1} != 0 ]]
then
echo "fail"
else
echo "pass"
fi
}
declare -a RESULTS
rally-manage --rally-debug tempest install > ${RESULTS_DIR}/tempest_installation.txt 2>&1
RESULTS+="install=$(do_status $?) "
gzip -9 ${RESULTS_DIR}/tempest_installation.txt
# Run to verification for one SET_NAME and then compare them.
SET_NAME="compute"
function do_verification {
OUTPUT_FILE=${RESULTS_DIR}/${1}_verification_${SET_NAME}_set.txt
rally --rally-debug verify start --set ${SET_NAME} > ${OUTPUT_FILE} 2>&1
RESULTS+="v${1}=$(do_status $?) "
gzip -9 ${OUTPUT_FILE}
source ~/.rally/globals && VERIFICATIONS[${1}]=${RALLY_VERIFICATION}
# Check different "rally verify" commands, which displays verification results
for OUTPUT_FORMAT in "html" "json"
do
OUTPUT_FILE=${RESULTS_DIR}/${1}_verify_results.${OUTPUT_FORMAT}
rally verify results --uuid ${RALLY_VERIFICATION} --${OUTPUT_FORMAT} --output-file ${OUTPUT_FILE}
RESULTS+="vr_${1}_${OUTPUT_FORMAT}=$(do_status $?) "
gzip -9 ${OUTPUT_FILE}
done
rally verify show --uuid ${RALLY_VERIFICATION} > ${RESULTS_DIR}/${1}_verify_show.txt
RESULTS+="vs_${1}=$(do_status $?) "
gzip -9 ${RESULTS_DIR}/${1}_verify_show.txt
rally verify show --uuid ${RALLY_VERIFICATION} --detailed > ${RESULTS_DIR}/${1}_verify_show_detailed.txt
RESULTS+="vsd_${1}=$(do_status $?) "
gzip -9 ${RESULTS_DIR}/${1}_verify_show_detailed.txt
}
do_verification 1
do_verification 2
rally verify list > ${RESULTS_DIR}/verify_list.txt
RESULTS+="l=$(do_status $?) "
gzip -9 ${RESULTS_DIR}/verify_list.txt
# Compare and save results in different formats
for OUTPUT_FORMAT in "csv" "html" "json"
do
OUTPUT_FILE=${RESULTS_DIR}/compare_results.${OUTPUT_FORMAT}
rally --rally-debug verify compare --uuid-1 ${VERIFICATIONS[1]} --uuid-2 ${VERIFICATIONS[2]} --${OUTPUT_FORMAT} --output-file ${OUTPUT_FILE}
RESULTS+="c_${OUTPUT_FORMAT}=$(do_status $?) "
gzip -9 ${OUTPUT_FILE}
done
python $BASE/new/rally/rally/ui/utils.py render\
tests/ci/rally-gate/index_verify.mako ${RESULTS[*]}> ${RESULTS_DIR}/extra/index.html
if [[ ${RESULTS[*]} == *"fail"* ]]
then
return 1
fi

View File

@ -13,35 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
##############################################################################
#
# THIS MODULE IS DEPRECATED.
# DON'T ADD TESTS FOR "rally verify" HERE.
#
# This module is no longer used for testing "rally verify" command.
# Functional testing for this command is moved to separate job.
# https://review.openstack.org/#/c/137232
#
# Please look at tests/ci/rally-verify.sh for more details.
#
##############################################################################
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
pass

View File

@ -14,6 +14,7 @@ Rally Specific Commandments
* [N310-N314] - Reserved for rules related to logging
* [N310] - Ensure that ``rally.log`` is used instead of ``rally.openstack.common.log``
* [N311] - Validate that debug level logs are not translated
* [N312] - Validate correctness of debug on check.
* [N32x] - Reserved for rules related to assert* methods
* [N320] - Ensure that ``assertTrue(isinstance(A, B))`` is not used
* [N321] - Ensure that ``assertEqual(type(A), B)`` is not used

View File

@ -138,6 +138,24 @@ def no_translate_debug_logs(logical_line):
yield(0, "N311 Don't translate debug level logs")
def no_use_conf_debug_check(logical_line, filename):
"""Check for 'cfg.CONF.debug'
Rally has two DEBUG level:
- Full DEBUG, which include all debug-messages from all OpenStack services
- Rally DEBUG, which include only Rally debug-messages
so we should use custom check to know debug-mode, instead of CONF.debug
N312
"""
excluded_files = ["./rally/log.py"]
point = logical_line.find("CONF.debug")
if point != -1 and filename not in excluded_files:
yield(point, "N312 Don't use `CONF.debug`. "
"Function `rally.log.is_debug` should be used instead.")
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
@ -209,6 +227,7 @@ def factory(register):
register(check_assert_methods_from_mock)
register(check_import_of_logging)
register(no_translate_debug_logs)
register(no_use_conf_debug_check)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)

View File

@ -93,6 +93,16 @@ class HackingTestCase(test.TestCase):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))"))), 0)
def test_no_use_conf_debug_check(self):
self.assertEqual(len(list(checks.no_use_conf_debug_check(
"if CONF.debug:", "fakefile"))), 1)
self.assertEqual(len(list(checks.no_use_conf_debug_check(
"if cfg.CONF.debug", "fakefile"))), 1)
self.assertEqual(len(list(checks.no_use_conf_debug_check(
"if logging.is_debug()", "fakefile"))), 0)
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "