Initial support for tempest providers.

Instead of indexes, test runs are now identified by a source (e.g. test repository, file, or stdin) and any number of these can be mixed/matched. Export script now accepts any number of input repositories from different sources. Currently only the timeline has been patched with support for providers: other tempest views are currently broken.
This commit is contained in:
Tim Buckley 2015-08-19 14:05:17 -06:00
parent e141c24158
commit 4cf81555ee
10 changed files with 312 additions and 100 deletions

View File

@ -76,8 +76,16 @@ def init_django(args):
settings.USE_GZIP = args.gzip
settings.OFFLINE = True
print(repr(args))
if args.repository:
settings.TEST_REPOSITORIES = (args.repository,)
settings.TEST_REPOSITORIES = args.repository
if args.stream_file:
settings.TEST_STREAMS = args.stream_file
if args.stdin:
settings.TEST_STREAM_STDIN = True
if args.dstat:
settings.DSTAT_CSV = args.dstat
@ -95,13 +103,21 @@ def main():
parser.add_argument("--ignore-bower",
help="Ignore missing Bower components.",
action="store_true")
parser.add_argument("--gzip",
parser.add_argument("-z", "--gzip",
help="Enable gzip compression for data files.",
action="store_true")
parser.add_argument("--repository",
help="The directory containing the `.testrepository` "
"to export. If not provided, the `settings.py` "
"configured value will be used.")
parser.add_argument("-f", "--stream-file",
action="append",
help="Include the given direct subunit stream.")
parser.add_argument("-r", "--repository",
action="append",
help="A directory containing a `.testrepository` to "
"include. If not provided, the `settings.py` "
"configured values will be used.")
parser.add_argument("-i", "--stdin",
help="Read a direct subunit stream from standard "
"input.",
action="store_true")
parser.add_argument("--dstat",
help="The path to the DStat log file (CSV-formatted) "
"to include. If not provided, the `settings.py` "
@ -132,23 +148,23 @@ def main():
print("Rendering:", path)
export_single_page(path, args.path)
repos = tempest_subunit.get_repositories()
if repos:
for run_id in range(repos[0].count()):
print("Rendering views for tempest run #%d" % (run_id))
export_single_page('/tempest_timeline_%d.html' % run_id, args.path)
export_single_page('/tempest_results_%d.html' % run_id, args.path)
for provider in tempest_subunit.get_providers().values():
for i in range(provider.count):
param = (provider.name, i)
print("Exporting data for tempest run #%d" % (run_id))
export_single_page('/tempest_api_tree_%d.json' % run_id,
print("Rendering views for tempest run %s #%d" % param)
export_single_page('/tempest_timeline_%s_%d.html' % param,
args.path)
export_single_page('/tempest_results_%s_%d.html' % param,
args.path)
print("Exporting data for tempest run %s #%d" % param)
export_single_page('/tempest_api_tree_%s_%d.json' % param,
args.path, args.gzip)
export_single_page('/tempest_api_raw_%d.json' % run_id,
export_single_page('/tempest_api_raw_%s_%d.json' % param,
args.path, args.gzip)
export_single_page('/tempest_api_details_%d.json' % run_id,
export_single_page('/tempest_api_details_%s_%d.json' % param,
args.path, args.gzip)
else:
print("Warning: no test repository could be loaded, no data will "
"be available!")
print("Exporting DStat log: dstat_log.csv")
export_single_page('/dstat_log.csv', args.path, args.gzip)

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from stackviz.parser.tempest_subunit import get_repositories
from stackviz.parser.tempest_subunit import get_providers
from stackviz.settings import OFFLINE
from stackviz.settings import USE_GZIP
@ -20,14 +20,16 @@ from stackviz.settings import USE_GZIP
def inject_extra_context(request):
ret = {
'use_gzip': USE_GZIP,
'offline' : OFFLINE
'offline': OFFLINE
}
repos = get_repositories()
if repos:
providers = get_providers()
if providers:
default = providers.values()[0]
ret.update({
'tempest_latest_run': get_repositories()[0].latest_id(),
'tempest_runs': range(get_repositories()[0].count()),
'tempest_providers': providers.values(),
'tempest_default_provider': default,
})
return ret

View File

@ -12,12 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import shutil
import subunit
import sys
from functools import partial
from io import BytesIO
from subunit import ByteStreamToStreamResult
from testtools import CopyStreamResult
from testtools import StreamResult
from testtools import StreamSummary
@ -33,29 +36,188 @@ NAME_SCENARIO_PATTERN = re.compile(r'^(.+) \((.+)\)$')
NAME_TAGS_PATTERN = re.compile(r'^(.+)\[(.+)\]$')
def get_repositories():
"""Loads all test repositories from locations configured in settings
_provider_cache = None
Where settings is found in`settings.TEST_REPOSITORIES`. Only locations
with a valid `.testrepository` subdirectory containing valid test entries
will be returned.
:return: a list of loaded :class:`Repository` instances
:rtype: list[Repository]
class InvalidSubunitProvider(Exception):
pass
class SubunitProvider(object):
@property
def name(self):
"""Returns a unique name for this provider, such that a valid URL
fragment pointing to a particular stream from this provider is
`name_index`, applicable for paths to pages and data files making use
of the stream.
:return: a path fragment referring to the stream at `index` from this
provider
"""
raise NotImplementedError()
@property
def description(self):
"""Returns a user-facing description for this provider.
This description may be used in UI contexts, but will not be used
within paths or other content-sensitive contexts.
:return: a description for this provider
"""
raise NotImplementedError()
@property
def count(self):
raise NotImplementedError()
def describe(self, index):
"""Returns a short, user-visible description for the contents of this
subunit stream provider.
:return: a description that can apply to all streams returned by this
provider
"""
raise NotImplementedError()
def get_stream(self, index):
"""Returns a file-like object representing the subunit stream at the
given index.
:param index: the index of the stream; must be between `0` and
`count - 1` (inclusive)
"""
raise NotImplementedError()
@property
def indexes(self):
# for the benefit of django templates
return range(self.count)
@property
def streams(self):
"""Creates a generator that iterates over each stream available in
this provider.
:return: each stream available from this generator
"""
for i in range(self.count):
yield self.get_stream(i)
class RepositoryProvider(SubunitProvider):
def __init__(self, repository_path):
self.repository_path = repository_path
self.repository = RepositoryFactory().open(repository_path)
@property
def name(self):
return "repo_%s" % os.path.basename(self.repository_path)
@property
def description(self):
return "Repository: %s" % os.path.basename(self.repository_path)
@property
def count(self):
return self.repository.count()
def describe(self, index):
return "Repository (%s): #%d" % (
os.path.basename(self.repository_path),
index
)
def get_stream(self, index):
return self.repository.get_latest_run().get_subunit_stream()
class FileProvider(SubunitProvider):
def __init__(self, path):
if not os.path.exists(path):
raise InvalidSubunitProvider("Stream doesn't exist: %s" % path)
self.path = path
@property
def name(self):
return "file_%s" % os.path.basename(self.path)
@property
def description(self):
return "Subunit File: %s" % os.path.basename(self.path)
@property
def count(self):
return 1
def describe(self, index):
return "File: %s" % os.path.basename(self.path)
def get_stream(self, index):
if index != 0:
raise IndexError("Index out of bounds: %d" % index)
return open(self.path, "r")
class StandardInputProvider(SubunitProvider):
def __init__(self):
self.buffer = BytesIO()
shutil.copyfileobj(sys.stdin, self.buffer)
@property
def name(self):
return "stdin"
@property
def description(self):
return "Subunit Stream (stdin)"
@property
def count(self):
return 1
def get_stream(self, index):
if index != 0:
raise IndexError()
return self.buffer
def get_providers():
"""Loads all test providers from locations configured in settings.
:return: a dict of loaded provider names and their associated
:class:`SubunitProvider` instances
:rtype: dict[str, SubunitProvider]
"""
global _provider_cache
factory = RepositoryFactory()
if _provider_cache is not None:
return _provider_cache
ret = []
_provider_cache = {}
for path in settings.TEST_REPOSITORIES:
try:
ret.append(factory.open(path))
p = RepositoryProvider(path)
_provider_cache[p.name] = p
except (ValueError, RepositoryNotFound):
# skip
continue
return ret
for path in settings.TEST_STREAMS:
try:
p = FileProvider(path)
_provider_cache[p.name] = p
except InvalidSubunitProvider:
continue
if settings.TEST_STREAM_STDIN:
p = StandardInputProvider()
_provider_cache[p.name] = p
return _provider_cache
def _clean_name(name):
@ -122,7 +284,6 @@ def convert_stream(stream_file, strip_details=False):
return ret
def convert_run(test_run, strip_details=False):
"""Converts the given test run into a raw list of test dicts.

View File

@ -101,10 +101,19 @@ TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'stackviz', 'templates')
]
# If True, read a stream from stdin (only valid for exported sites)
TEST_STREAM_STDIN = False
# A list of files containing directly-accessible subunit streams.
TEST_STREAMS = []
# A list of test repositories containing (potentially) multiple subunit
# streams.
TEST_REPOSITORIES = [
os.path.join(BASE_DIR, 'test_data')
]
# The input dstat file
DSTAT_CSV = 'dstat.log'
# If true, AJAX calls should attempt to load `*.json.gz` files rather than

View File

@ -22,10 +22,14 @@
<a href="#"><i class="fa fa-bar-chart-o fa-fw"></i> Tempest<span class="fa arrow"></span></a>
<ul class="nav nav-second-level">
<li>
<a href="tempest_results_{{ tempest_latest_run }}.html"><i class="fa fa-clock-o fa-fw"></i> Sunburst</a>
<a href="tempest_results_{{ tempest_default_provider.name }}_{{ tempest_default_provider.count | add:'-1' }}.html">
<i class="fa fa-clock-o fa-fw"></i> Sunburst
</a>
</li>
<li>
<a href="tempest_timeline_{{ tempest_latest_run }}.html"><i class="fa fa-calendar fa-fw"></i> Timeline</a>
<a href="tempest_timeline_{{ tempest_default_provider.name }}_{{ tempest_default_provider.count | add:'-1' }}.html">
<i class="fa fa-calendar fa-fw"></i> Timeline
</a>
</li>
<!--<li>
<a href="/tempest/"><i class="fa fa-database fa-fw"></i> Compare</a>

View File

@ -44,8 +44,11 @@
<span class="caret"></span>
</button>
<ul class="dropdown-menu pull-right" role="menu">
{% for run_id in tempest_runs %}
<li><a href="tempest_timeline_{{run_id}}.html">Run #{{run_id}}</a></li>
{% for provider in tempest_providers %}
<li class="dropdown-header">{{ provider.description }}</li>
{% for index in provider.indexes %}
<li><a href="tempest_timeline_{{ provider.name }}_{{ index }}.html">Run #{{ index }}</a></li>
{% endfor %}
{% endfor %}
</ul>
</div>
@ -131,7 +134,7 @@ var loadDetails = function(callback) {
detailsWaiting.push(callback);
if (!detailsInProgress) {
var url = "tempest_api_details_{{run_id}}.json";
var url = "tempest_api_details_{{provider_name}}_{{run_id}}.json";
if ("{{use_gzip}}" === "True") {
url += ".gz";
}
@ -186,7 +189,7 @@ window.addEventListener('load', function() {
var selectedItem = null;
var selectedValue = null;
var url = "tempest_api_raw_{{run_id}}.json";
var url = "tempest_api_raw_{{provider_name}}_{{run_id}}.json";
if ("{{use_gzip}}" === "True") {
url += ".gz";
}

View File

@ -15,8 +15,8 @@
from django.http import Http404
from restless.views import Endpoint
from stackviz.parser.tempest_subunit import convert_run
from stackviz.parser.tempest_subunit import get_repositories
from stackviz.parser.tempest_subunit import convert_stream
from stackviz.parser.tempest_subunit import get_providers
from stackviz.parser.tempest_subunit import reorganize
#: Cached results from loaded subunit logs indexed by their run number
@ -34,6 +34,10 @@ class NoRunDataException(Http404):
pass
class ProviderNotFoundException(Http404):
pass
class RunNotFoundException(Http404):
pass
@ -42,61 +46,71 @@ class TestNotFoundException(Http404):
pass
def _load_run(run_id):
if run_id in _cached_run:
return _cached_run[run_id]
def _load_run(provider_name, run_id):
if (provider_name, run_id) in _cached_run:
return _cached_run[provider_name, run_id]
repos = get_repositories()
if not repos:
raise NoRunDataException("No test repositories could be loaded")
providers = get_providers()
if not providers:
raise NoRunDataException("No test providers could be loaded")
if provider_name not in providers:
raise ProviderNotFoundException("Requested subunit provider could not "
"be found")
p = providers[provider_name]
try:
# assume first repo for now
run = repos[0].get_test_run(run_id)
stream = p.get_stream(run_id)
# strip details for now
# TODO(provide method for getting details on demand)
# (preferably for individual tests to avoid bloat)
converted_run = convert_run(run, strip_details=True)
_cached_run[run_id] = converted_run
converted_run = convert_stream(stream, strip_details=True)
_cached_run[provider_name, run_id] = converted_run
return converted_run
except KeyError:
raise RunNotFoundException("Requested test run could not be found")
def _load_tree(run_id):
if run_id in _cached_tree:
return _cached_tree[run_id]
def _load_tree(provider, run_id):
if (provider, run_id) in _cached_tree:
return _cached_tree[provider, run_id]
run = _load_run(run_id)
run = _load_run(provider, run_id)
tree = reorganize(run)
_cached_tree[run_id] = tree
_cached_tree[provider, run_id] = tree
return tree
def _load_details(run_id, test_name):
if run_id not in _cached_details:
repos = get_repositories()
if not repos:
raise NoRunDataException("No test repositories could be loaded")
def _load_details(provider_name, run_id, test_name):
if (provider_name, run_id) not in _cached_details:
providers = get_providers()
if not providers:
raise NoRunDataException("No test providers could be loaded")
if provider_name not in providers:
raise ProviderNotFoundException("Requested subunit provider could "
"not be found")
provider = providers[provider_name]
try:
# assume first repo for now
run = repos[0].get_test_run(run_id)
converted_run = convert_run(run, strip_details=False)
stream = provider.get_stream(run_id)
converted_run = convert_stream(stream, strip_details=False)
# remap dict to allow direct access to details via test name
dest = {}
for entry in converted_run:
dest[entry['name']] = entry['details']
_cached_details[run_id] = dest
except KeyError:
_cached_details[provider_name, run_id] = dest
except (KeyError, IndexError):
raise RunNotFoundException("Requested test run could not be found")
details_map = _cached_details[run_id]
details_map = _cached_details[provider_name, run_id]
if test_name is None:
return details_map
else:
@ -108,15 +122,15 @@ def _load_details(run_id, test_name):
class TempestRunRawEndpoint(Endpoint):
def get(self, request, run_id):
return _load_run(run_id)
def get(self, request, provider_name, run_id):
return _load_run(provider_name, int(run_id))
class TempestRunTreeEndpoint(Endpoint):
def get(self, request, run_id):
return _load_tree(run_id)
def get(self, request, provider_name, run_id):
return _load_tree(provider_name, int(run_id))
class TempestRunDetailsEndpoint(Endpoint):
def get(self, request, run_id, test_name=None):
return _load_details(run_id, test_name)
def get(self, request, run_id, provider_name, test_name=None):
return _load_details(int(run_id), provider_name, test_name)

View File

@ -20,6 +20,7 @@ class ResultsView(TemplateView):
def get_context_data(self, **kwargs):
context = super(ResultsView, self).get_context_data(**kwargs)
context['provider_name'] = self.kwargs['provider_name']
context['run_id'] = self.kwargs['run_id']
return context

View File

@ -20,6 +20,7 @@ class TimelineView(TemplateView):
def get_context_data(self, **kwargs):
context = super(TimelineView, self).get_context_data(**kwargs)
context['provider_name'] = self.kwargs['provider_name']
context['run_id'] = self.kwargs['run_id']
return context

View File

@ -25,27 +25,28 @@ from api import TempestRunRawEndpoint
from api import TempestRunTreeEndpoint
urlpatterns = patterns('',
url(r'^results_(?P<run_id>\d+).html$',
ResultsView.as_view(),
name='tempest_results'),
url(r'^timeline_(?P<run_id>\d+).html$',
TimelineView.as_view(),
name='tempest_timeline'),
urlpatterns = patterns(
'',
url(r'^results_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+).html$',
ResultsView.as_view(),
name='tempest_results'),
url(r'^timeline_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+).html$',
TimelineView.as_view(),
name='tempest_timeline'),
url(r'^api_tree_(?P<run_id>\d+).json$',
TempestRunTreeEndpoint.as_view(),
name='tempest_api_tree'),
url(r'^api_raw_(?P<run_id>\d+).json$',
TempestRunRawEndpoint.as_view(),
name='tempest_api_raw'),
url(r'^api_details_(?P<run_id>\d+).json$',
TempestRunDetailsEndpoint.as_view()),
url(r'^api_details_(?P<run_id>\d+)_(?P<test_name>[^/]+)'
r'.json$',
TempestRunDetailsEndpoint.as_view()),
url(r'^api_tree_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+).json$',
TempestRunTreeEndpoint.as_view(),
name='tempest_api_tree'),
url(r'^api_raw_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+).json$',
TempestRunRawEndpoint.as_view(),
name='tempest_api_raw'),
url(r'^api_details_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+).json$',
TempestRunDetailsEndpoint.as_view()),
url(r'^api_details_(?P<provider_name>[\w_\.]+)_(?P<run_id>\d+)_'
r'(?P<test_name>[^/]+).json$',
TempestRunDetailsEndpoint.as_view()),
url(r'^aggregate.html$',
AggregateResultsView.as_view(),
name='tempest_aggregate_results'),
)
url(r'^aggregate.html$',
AggregateResultsView.as_view(),
name='tempest_aggregate_results'),
)