Add support for keeping a moving avg of test run_time
This commit adds a tracking the moving average of individual test run times. It involves added a new column to the tests table and handling updating the test row on each time the test shows up in a subunit stream being processed
This commit is contained in:
2
TODO.rst
2
TODO.rst
@@ -6,8 +6,6 @@ Short Term
|
||||
* Add a new metadata table for each existing table (run_metadata,
|
||||
test_metadata, test_run_metadata) to store extra info from stream like
|
||||
tags, or attrs and other information about runs like job name.
|
||||
* Add average runtime column to tests table to keep running average of
|
||||
how long the test takes to run.
|
||||
* Add artifacts option to CLI on subunit2sql to store log links in runs table
|
||||
* Add unit tests
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ class Test(BASE, SubunitBase):
|
||||
run_count = sa.Column(sa.Integer())
|
||||
success = sa.Column(sa.Integer())
|
||||
failure = sa.Column(sa.Integer())
|
||||
run_time = sa.Column(sa.Float())
|
||||
|
||||
|
||||
class Run(BASE, SubunitBase):
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Create avg runtime column in test table
|
||||
|
||||
Revision ID: 163fd5aa1380
|
||||
Revises: 3db7b49816d5
|
||||
Create Date: 2014-06-16 15:45:19.221576
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '163fd5aa1380'
|
||||
down_revision = '3db7b49816d5'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('tests', sa.Column('run_time', sa.Float(), nullable=True))
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_column('tests', 'run_time')
|
||||
@@ -20,6 +20,16 @@ import testtools
|
||||
DAY_SECONDS = 60 * 60 * 24
|
||||
|
||||
|
||||
def get_duration(start, end):
|
||||
if not start or not end:
|
||||
duration = ''
|
||||
else:
|
||||
delta = end - start
|
||||
duration = '%d.%06ds' % (
|
||||
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
|
||||
return duration
|
||||
|
||||
|
||||
class ReadSubunit(object):
|
||||
|
||||
def __init__(self, stream_file):
|
||||
@@ -84,18 +94,9 @@ class ReadSubunit(object):
|
||||
name = newname
|
||||
return name
|
||||
|
||||
def get_duration(self, start, end):
|
||||
if not start or not end:
|
||||
duration = ''
|
||||
else:
|
||||
delta = end - start
|
||||
duration = '%d.%06ds' % (
|
||||
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
|
||||
return duration
|
||||
|
||||
def run_time(self):
|
||||
runtime = 0.0
|
||||
for name, data in self.results.items():
|
||||
runtime += float(self.get_duration(data['start_time'],
|
||||
data['end_time']).strip('s'))
|
||||
runtime += float(get_duration(data['start_time'],
|
||||
data['end_time']).strip('s'))
|
||||
return runtime
|
||||
|
||||
@@ -50,9 +50,24 @@ def parse_args(argv, default_config_files=None):
|
||||
default_config_files=default_config_files)
|
||||
|
||||
|
||||
def increment_counts(run, test, status, session=None):
|
||||
def running_avg(test, values, result):
|
||||
count = test.success
|
||||
avg_prev = test.run_time
|
||||
curr_runtime = float(subunit.get_duration(result['start_time'],
|
||||
result['end_time']).strip('s'))
|
||||
if isinstance(avg_prev, float):
|
||||
# Using a smoothed moving avg to limit the affect of a single outlier
|
||||
new_avg = ((count * avg_prev) + curr_runtime) / (count + 1)
|
||||
values['run_time'] = new_avg
|
||||
else:
|
||||
values['run_time'] = curr_runtime
|
||||
return values
|
||||
|
||||
|
||||
def increment_counts(run, test, results, session=None):
|
||||
test_values = {'run_count': test.run_count + 1}
|
||||
run_values = {}
|
||||
status = results.get('status')
|
||||
run = api.get_run_by_id(run.id, session)
|
||||
if status == 'success':
|
||||
test_values['success'] = test.success + 1
|
||||
@@ -66,6 +81,7 @@ def increment_counts(run, test, status, session=None):
|
||||
else:
|
||||
msg = "Unknown test status %s" % status
|
||||
raise exceptions.UnknownStatus(msg)
|
||||
test_values = running_avg(test, test_values, results)
|
||||
if test_values:
|
||||
api.update_test(test_values, test.id)
|
||||
api.update_run(run_values, run.id)
|
||||
@@ -78,7 +94,7 @@ def process_results(results):
|
||||
db_test = api.get_test_by_test_id(test, session)
|
||||
if not db_test:
|
||||
db_test = api.create_test(test)
|
||||
increment_counts(db_run, db_test, results[test]['status'], session)
|
||||
increment_counts(db_run, db_test, results[test], session)
|
||||
api.create_test_run(db_test.id, db_run.id, results[test]['status'],
|
||||
results[test]['start_time'],
|
||||
results[test]['end_time'])
|
||||
|
||||
Reference in New Issue
Block a user