Merge "Add graph command to graph daily test count over time"
This commit is contained in:
52
subunit2sql/analysis/dailycount.py
Normal file
52
subunit2sql/analysis/dailycount.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
from oslo.config import cfg
|
||||
import pandas as pd
|
||||
|
||||
from subunit2sql.db import api
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
matplotlib.style.use('ggplot')
|
||||
|
||||
|
||||
def set_cli_opts(parser):
|
||||
pass
|
||||
|
||||
|
||||
def generate_series():
|
||||
session = api.get_session()
|
||||
test_starts = api.get_test_run_series(CONF.start_date, CONF.stop_date,
|
||||
session)
|
||||
session.close()
|
||||
daily_count = pd.Series(test_starts)
|
||||
mean = pd.rolling_mean(daily_count, 20)
|
||||
rolling_std = pd.rolling_std(daily_count, 20)
|
||||
plt.figure()
|
||||
title = CONF.title or 'Number of Tempest tests run in the Gate'
|
||||
plt.title(title)
|
||||
plt.ylabel('Number of tests')
|
||||
plt.plot(daily_count.index, daily_count, 'k', label='Daily Test Count')
|
||||
plt.plot(mean.index, mean, 'b', label='Avg. Daily Test Count')
|
||||
upper_std_dev = mean + 2 * rolling_std
|
||||
lower_std_dev = mean - 2 * rolling_std
|
||||
# Set negative numbers to 0
|
||||
lower_std_dev[lower_std_dev < 0] = 0
|
||||
plt.fill_between(rolling_std.index, lower_std_dev, upper_std_dev,
|
||||
color='b', alpha=0.2, label='std dev')
|
||||
plt.legend()
|
||||
plt.savefig(CONF.output)
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
from oslo_config import cfg
|
||||
|
||||
import subunit2sql.analysis.agg_count
|
||||
import subunit2sql.analysis.dailycount
|
||||
import subunit2sql.analysis.failures
|
||||
import subunit2sql.analysis.run_time
|
||||
from subunit2sql import shell
|
||||
@@ -42,7 +43,7 @@ SHELL_OPTS = [
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
for name in ['failures', 'run_time', 'agg_count']:
|
||||
for name in ['failures', 'run_time', 'agg_count', 'dailycount']:
|
||||
parser = subparsers.add_parser(name)
|
||||
getattr(subunit2sql.analysis, name).set_cli_opts(parser)
|
||||
parser.set_defaults(
|
||||
|
||||
@@ -586,6 +586,45 @@ def get_test_run_time_series(test_id, session=None):
|
||||
return time_series
|
||||
|
||||
|
||||
def get_test_run_series(start_date=None, stop_date=None, session=None):
|
||||
"""Returns a time series dict of total daily run counts
|
||||
|
||||
:param str start_date: Optional start date to filter results on
|
||||
:param str stop_date: Optional stop date to filter results on
|
||||
:param session: optional session object if one isn't provided a new session
|
||||
|
||||
:return dict: A dictionary with the dates as the keys and the values
|
||||
being the total run count for that day. (The sum of success
|
||||
and failures from all runs that started that day)
|
||||
"""
|
||||
session = session or get_session()
|
||||
full_query = db_utils.model_query(models.Run, session=session).join(
|
||||
models.RunMetadata).filter_by(key='build_queue', value='gate')
|
||||
|
||||
# Process date bounds
|
||||
if isinstance(start_date, str):
|
||||
start_date = datetime.datetime.strptime(start_date, '%b %d %Y')
|
||||
if isinstance(stop_date, str):
|
||||
stop_date = datetime.datetime.strptime(stop_date, '%b %d %Y')
|
||||
if start_date:
|
||||
full_query.filter(models.Run.run_at < start_date)
|
||||
if stop_date:
|
||||
full_query.filter(models.Run.run_at > stop_date)
|
||||
|
||||
query = full_query.values(models.Run.run_at, models.Run.passes,
|
||||
models.Run.fails)
|
||||
time_series = {}
|
||||
for test_run in query:
|
||||
start_time = test_run[0]
|
||||
# Sum of starts and failures is the count for the run
|
||||
local_run_count = test_run[1] + test_run[2]
|
||||
if start_time in time_series:
|
||||
time_series[start_time] = time_series[start_time] + local_run_count
|
||||
else:
|
||||
time_series[start_time] = local_run_count
|
||||
return time_series
|
||||
|
||||
|
||||
def get_test_status_time_series(test_id, session=None):
|
||||
"""Returns a time series dict of test_run statuses of a single test
|
||||
|
||||
|
||||
Reference in New Issue
Block a user