Merge "Implement SLA support and subunit output"

This commit is contained in:
Jenkins 2015-03-24 10:02:33 +00:00 committed by Gerrit Code Review
commit 4914c361e6
11 changed files with 170 additions and 8 deletions

View File

@ -3,7 +3,7 @@ usage: shaker-report [-h] [--config-dir DIR] [--config-file PATH] [--debug]
[--log-date-format DATE_FORMAT] [--log-dir LOG_DIR]
[--log-file PATH] [--log-format FORMAT] [--nodebug]
[--nouse-syslog] [--nouse-syslog-rfc-format]
[--noverbose] [--report REPORT]
[--noverbose] [--report REPORT] [--subunit SUBUNIT]
[--syslog-log-facility SYSLOG_LOG_FACILITY]
[--use-syslog] [--use-syslog-rfc-format] [--verbose]
[--version] [--report-template REPORT_TEMPLATE]
@ -49,6 +49,8 @@ optional arguments:
--noverbose The inverse of --verbose
--report REPORT Report file name, defaults to env[SHAKER_REPORT]. If
no value provided the report is printed to stdout.
--subunit SUBUNIT Subunit stream file name, defaults to
env[SHAKER_SUBUNIT].
--syslog-log-facility SYSLOG_LOG_FACILITY
Syslog facility to receive log lines.
--use-syslog Use syslog for logging. Existing syslog format is

View File

@ -10,8 +10,8 @@ usage: shaker [-h] [--config-dir DIR] [--config-file PATH] [--debug]
[--os-username <auth-username>] [--output OUTPUT]
[--polling-interval POLLING_INTERVAL] [--report REPORT]
[--report-template REPORT_TEMPLATE] [--scenario SCENARIO]
[--syslog-log-facility SYSLOG_LOG_FACILITY] [--use-syslog]
[--use-syslog-rfc-format] [--verbose] [--version]
[--subunit SUBUNIT] [--syslog-log-facility SYSLOG_LOG_FACILITY]
[--use-syslog] [--use-syslog-rfc-format] [--verbose] [--version]
[--server-endpoint SERVER_ENDPOINT]
optional arguments:
@ -82,6 +82,8 @@ optional arguments:
--report-template REPORT_TEMPLATE
Report template in Jinja format
--scenario SCENARIO Scenario file name, defaults to env[SHAKER_SCENARIO].
--subunit SUBUNIT Subunit stream file name, defaults to
env[SHAKER_SUBUNIT].
--syslog-log-facility SYSLOG_LOG_FACILITY
Syslog facility to receive log lines.
--use-syslog Use syslog for logging. Existing syslog format is

View File

@ -136,6 +136,9 @@
# report is printed to stdout. (string value)
#report = <None>
# Subunit stream file name, defaults to env[SHAKER_SUBUNIT]. (string value)
#subunit = <None>
# File to read test results from, defaults to env[SHAKER_INPUT]. (string value)
#input = <None>

View File

@ -17,6 +17,7 @@ python-keystoneclient>=1.1.0
python-neutronclient>=2.3.11,<3
python-novaclient>=2.18.0,!=2.21.0
python-heatclient>=0.3.0
python-subunit>=0.0.18
PyYAML>=3.1.0
pyzmq>=14.3.1 # LGPL+BSD
six>=1.9.0

View File

@ -12,6 +12,8 @@ execution:
title: Iperf TCP
class: iperf_graph
time: 60
sla:
- bandwidth.mean > 100
-
title: Iperf UDP 5 threads
class: iperf

View File

@ -90,6 +90,10 @@ REPORT_OPTS = [
default=utils.env('SHAKER_REPORT'),
help='Report file name, defaults to env[SHAKER_REPORT]. '
'If no value provided the report is printed to stdout.'),
cfg.StrOpt('subunit',
default=utils.env('SHAKER_SUBUNIT'),
help='Subunit stream file name, defaults to '
'env[SHAKER_SUBUNIT].'),
]
INPUT_OPTS = [

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import json
import sys
@ -20,6 +21,7 @@ import sys
import jinja2
from oslo_config import cfg
from oslo_log import log as logging
from subunit import v2 as subunit_v2
import yaml
from shaker.engine import aggregators
@ -43,11 +45,80 @@ def calculate_stats(data):
aggregator.test_summary(test_result)
def generate_report(report_template, report_filename, data):
SLARecord = collections.namedtuple('SLARecord',
['sla', 'status', 'location', 'stats'])
def _verify_stats_against_sla(sla, stats, location):
res = []
for term in sla:
status = utils.eval_expr(term, stats)
sla_record = SLARecord(sla=term, status=status,
location=location, stats=stats)
res.append(sla_record)
LOG.debug('SLA: %s', sla_record)
return res
def verify_sla(data):
res = []
for test_result in data['result']:
test_name = (test_result['definition'].get('title') or
test_result['definition'].get('class'))
sla = test_result['definition'].get('sla')
if not sla:
continue
for iteration_result in test_result['results_per_iteration']:
size = str(len(iteration_result['results_per_agent']))
sla_info = _verify_stats_against_sla(
sla, iteration_result['stats'],
'%s.%s' % (test_name, size))
res += sla_info
iteration_result['sla_info'] = sla_info
for agent_result in iteration_result['results_per_agent']:
agent_id = agent_result['agent']['id']
sla_info = _verify_stats_against_sla(
sla, agent_result['stats'],
'%s.%s.%s' % (test_name, size, agent_id))
res += sla_info
agent_result['sla_info'] = sla_info
return res
def save_to_subunit(sla_res, subunit_filename):
fd = open(subunit_filename, 'w')
output = subunit_v2.StreamResultToBytes(fd)
for item in sla_res:
output.startTestRun()
test_id = item.location + ':' + item.sla
if not item.status:
output.status(test_id=test_id, file_name='results',
mime_type='text/plain; charset="utf8"', eof=True,
file_bytes=yaml.safe_dump(
item.stats, default_flow_style=False))
output.status(test_id=test_id,
test_status='success' if item.status else 'fail')
output.stopTestRun()
fd.close()
def generate_report(data, report_template, report_filename, subunit_filename):
LOG.debug('Generating report, template: %s, output: %s',
report_template, report_filename or 'stdout')
calculate_stats(data)
sla_res = verify_sla(data)
if subunit_filename:
save_to_subunit(sla_res, subunit_filename)
# add more filters to jinja
jinja_env = jinja2.Environment()
@ -75,7 +146,8 @@ def main():
LOG.debug('Reading JSON data from: %s', cfg.CONF.input)
report_data = json.loads(utils.read_file(cfg.CONF.input))
generate_report(cfg.CONF.report_template, cfg.CONF.report, report_data)
generate_report(report_data, cfg.CONF.report_template, cfg.CONF.report,
cfg.CONF.subunit)
if __name__ == "__main__":

View File

@ -213,8 +213,8 @@ def main():
if cfg.CONF.output:
utils.write_file(json.dumps(report_data), cfg.CONF.output)
report.generate_report(cfg.CONF.report_template, cfg.CONF.report,
report_data)
report.generate_report(report_data, cfg.CONF.report_template,
cfg.CONF.report, cfg.CONF.subunit)
if __name__ == "__main__":

View File

@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import operator as op
import logging as std_logging
import os
import random
@ -119,3 +122,63 @@ def random_string(length=6):
def copy_dict_kv(source):
return dict((k, v) for k, v in source.items())
def flatten_dict(d, prefix='', sep='.'):
res = []
for k, v in d.items():
path = prefix + k
if isinstance(v, dict):
res.extend(flatten_dict(v, path + sep))
else:
res.append((path, v))
return res
# supported operators
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg, ast.Lt: op.lt, ast.Gt: op.gt, ast.LtE: op.le,
ast.GtE: op.ge, ast.Eq: op.eq}
def eval_expr(expr, ctx=None):
"""Usage examples:
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
>>> eval_expr('11 > a > 5', {'a': 7})
True
>>> eval_expr('2 + a.b', {'a': {'b': 2.2}})
4.2
"""
ctx = ctx or {}
return _eval(ast.parse(expr, mode='eval').body, ctx)
def _eval(node, ctx):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Name):
return ctx.get(node.id)
elif isinstance(node, ast.BinOp):
return operators[type(node.op)](_eval(node.left, ctx),
_eval(node.right, ctx))
elif isinstance(node, ast.UnaryOp):
return operators[type(node.op)](_eval(node.operand, ctx))
elif isinstance(node, ast.Compare):
x = _eval(node.left, ctx)
r = True
for i in range(len(node.ops)):
y = _eval(node.comparators[i], ctx)
r &= operators[type(node.ops[i])](x, y)
x = y
return r
elif isinstance(node, ast.Attribute):
return _eval(node.value, ctx).get(node.attr)
else:
raise TypeError(node)

View File

@ -6,7 +6,6 @@
coverage>=3.6
hacking>=0.8.0,<0.9
mock>=1.0
python-subunit>=0.0.18
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
sphinxcontrib-httpdomain
testrepository>=0.0.18

View File

@ -28,3 +28,17 @@ class TestUtils(testtools.TestCase):
def test_split_address_invalid(self):
self.assertRaises(ValueError, utils.split_address, 'erroneous')
def test_flatten_dict(self):
self.assertEqual({}, dict(utils.flatten_dict({})))
self.assertEqual(
{'pa_b': 1},
dict(utils.flatten_dict({'a': {'b': 1}}, prefix='p', sep='_')))
self.assertEqual(
{'a': 1, 'b.c': 2, 'b.d': 3},
dict(utils.flatten_dict({'a': 1, 'b': {'c': 2, 'd': 3}})))
def test_eval(self):
self.assertEqual(2 ** 6, utils.eval_expr('2**6'))
self.assertEqual(True, utils.eval_expr('11 > a > 5', {'a': 7}))
self.assertEqual(42, utils.eval_expr('2 + a.b', {'a': {'b': 40}}))