Browse Source

Merge "Remove subunit-trace fork"

Jenkins 2 years ago
parent
commit
50be1aeb69
4 changed files with 2 additions and 313 deletions
  1. 1
    0
      test-requirements.txt
  2. 0
    6
      tools/pretty_tox.sh
  3. 0
    306
      tools/subunit-trace.py
  4. 1
    1
      tox.ini

+ 1
- 0
test-requirements.txt View File

@@ -6,6 +6,7 @@
6 6
 coverage>=4.0 # Apache-2.0
7 7
 ddt>=1.0.1 # MIT
8 8
 hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
9
+os-testr>=0.8.0 # Apache-2.0
9 10
 oslosphinx>=4.7.0 # Apache-2.0
10 11
 oslotest>=1.10.0 # Apache-2.0
11 12
 python-subunit>=0.0.18 # Apache-2.0/BSD

+ 0
- 6
tools/pretty_tox.sh View File

@@ -1,6 +0,0 @@
1
-#! /bin/sh
2
-
3
-TESTRARGS=$1
4
-
5
-exec 3>&1
6
-status=$(exec 4>&1 >&3; ( python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status

+ 0
- 306
tools/subunit-trace.py View File

@@ -1,306 +0,0 @@
1
-#!/usr/bin/env python
2
-
3
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
4
-# Copyright 2014 Samsung Electronics
5
-# All Rights Reserved.
6
-#
7
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
8
-# not use this file except in compliance with the License. You may obtain
9
-# a copy of the License at
10
-#
11
-#     http://www.apache.org/licenses/LICENSE-2.0
12
-#
13
-# Unless required by applicable law or agreed to in writing, software
14
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16
-# License for the specific language governing permissions and limitations
17
-# under the License.
18
-
19
-"""Trace a subunit stream in reasonable detail and high accuracy."""
20
-
21
-import argparse
22
-import functools
23
-import os
24
-import re
25
-import sys
26
-
27
-import mimeparse
28
-import subunit
29
-import testtools
30
-
31
-DAY_SECONDS = 60 * 60 * 24
32
-FAILS = []
33
-RESULTS = {}
34
-
35
-
36
-class Starts(testtools.StreamResult):
37
-
38
-    def __init__(self, output):
39
-        super(Starts, self).__init__()
40
-        self._output = output
41
-
42
-    def startTestRun(self):
43
-        self._neednewline = False
44
-        self._emitted = set()
45
-
46
-    def status(self, test_id=None, test_status=None, test_tags=None,
47
-               runnable=True, file_name=None, file_bytes=None, eof=False,
48
-               mime_type=None, route_code=None, timestamp=None):
49
-        super(Starts, self).status(
50
-            test_id, test_status,
51
-            test_tags=test_tags, runnable=runnable, file_name=file_name,
52
-            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
53
-            route_code=route_code, timestamp=timestamp)
54
-        if not test_id:
55
-            if not file_bytes:
56
-                return
57
-            if not mime_type or mime_type == 'test/plain;charset=utf8':
58
-                mime_type = 'text/plain; charset=utf-8'
59
-            primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
60
-            content_type = testtools.content_type.ContentType(
61
-                primary, sub, parameters)
62
-            content = testtools.content.Content(
63
-                content_type, lambda: [file_bytes])
64
-            text = content.as_text()
65
-            if text and text[-1] not in '\r\n':
66
-                self._neednewline = True
67
-            self._output.write(text)
68
-        elif test_status == 'inprogress' and test_id not in self._emitted:
69
-            if self._neednewline:
70
-                self._neednewline = False
71
-                self._output.write('\n')
72
-            worker = ''
73
-            for tag in test_tags or ():
74
-                if tag.startswith('worker-'):
75
-                    worker = '(' + tag[7:] + ') '
76
-            if timestamp:
77
-                timestr = timestamp.isoformat()
78
-            else:
79
-                timestr = ''
80
-                self._output.write('%s: %s%s [start]\n' %
81
-                                   (timestr, worker, test_id))
82
-            self._emitted.add(test_id)
83
-
84
-
85
-def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
86
-    """Clean up the test name for display.
87
-
88
-    By default we strip out the tags in the test because they don't help us
89
-    in identifying the test that is run to it's result.
90
-
91
-    Make it possible to strip out the testscenarios information (not to
92
-    be confused with tempest scenarios) however that's often needed to
93
-    indentify generated negative tests.
94
-    """
95
-    if strip_tags:
96
-        tags_start = name.find('[')
97
-        tags_end = name.find(']')
98
-        if tags_start > 0 and tags_end > tags_start:
99
-            newname = name[:tags_start]
100
-            newname += name[tags_end + 1:]
101
-            name = newname
102
-
103
-    if strip_scenarios:
104
-        tags_start = name.find('(')
105
-        tags_end = name.find(')')
106
-        if tags_start > 0 and tags_end > tags_start:
107
-            newname = name[:tags_start]
108
-            newname += name[tags_end + 1:]
109
-            name = newname
110
-
111
-    return name
112
-
113
-
114
-def get_duration(timestamps):
115
-    start, end = timestamps
116
-    if not start or not end:
117
-        duration = ''
118
-    else:
119
-        delta = end - start
120
-        duration = '%d.%06ds' % (
121
-            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
122
-    return duration
123
-
124
-
125
-def find_worker(test):
126
-    for tag in test['tags']:
127
-        if tag.startswith('worker-'):
128
-            return int(tag[7:])
129
-    return 'NaN'
130
-
131
-
132
-# Print out stdout/stderr if it exists, always
133
-def print_attachments(stream, test, all_channels=False):
134
-    """Print out subunit attachments.
135
-
136
-    Print out subunit attachments that contain content. This
137
-    runs in 2 modes, one for successes where we print out just stdout
138
-    and stderr, and an override that dumps all the attachments.
139
-    """
140
-    channels = ('stdout', 'stderr')
141
-    for name, detail in test['details'].items():
142
-        # NOTE(sdague): the subunit names are a little crazy, and actually
143
-        # are in the form pythonlogging:'' (with the colon and quotes)
144
-        name = name.split(':')[0]
145
-        if detail.content_type.type == 'test':
146
-            detail.content_type.type = 'text'
147
-        if (all_channels or name in channels) and detail.as_text():
148
-            title = "Captured %s:" % name
149
-            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
150
-            # indent attachment lines 4 spaces to make them visually
151
-            # offset
152
-            for line in detail.as_text().split('\n'):
153
-                stream.write("    %s\n" % line)
154
-
155
-
156
-def show_outcome(stream, test, print_failures=False, failonly=False):
157
-    global RESULTS
158
-    status = test['status']
159
-    # TODO(sdague): ask lifeless why on this?
160
-    if status == 'exists':
161
-        return
162
-
163
-    worker = find_worker(test)
164
-    name = cleanup_test_name(test['id'])
165
-    duration = get_duration(test['timestamps'])
166
-
167
-    if worker not in RESULTS:
168
-        RESULTS[worker] = []
169
-    RESULTS[worker].append(test)
170
-
171
-    # don't count the end of the return code as a fail
172
-    if name == 'process-returncode':
173
-        return
174
-
175
-    if status == 'fail':
176
-        FAILS.append(test)
177
-        stream.write('{%s} %s [%s] ... FAILED\n' % (
178
-            worker, name, duration))
179
-        if not print_failures:
180
-            print_attachments(stream, test, all_channels=True)
181
-    elif not failonly:
182
-        if status == 'success':
183
-            stream.write('{%s} %s [%s] ... ok\n' % (
184
-                worker, name, duration))
185
-            print_attachments(stream, test)
186
-        elif status == 'skip':
187
-            stream.write('{%s} %s ... SKIPPED: %s\n' % (
188
-                worker, name, test['details']['reason'].as_text()))
189
-        else:
190
-            stream.write('{%s} %s [%s] ... %s\n' % (
191
-                worker, name, duration, test['status']))
192
-            if not print_failures:
193
-                print_attachments(stream, test, all_channels=True)
194
-
195
-    stream.flush()
196
-
197
-
198
-def print_fails(stream):
199
-    """Print summary failure report.
200
-
201
-    Currently unused, however there remains debate on inline vs. at end
202
-    reporting, so leave the utility function for later use.
203
-    """
204
-    if not FAILS:
205
-        return
206
-    stream.write("\n==============================\n")
207
-    stream.write("Failed %s tests - output below:" % len(FAILS))
208
-    stream.write("\n==============================\n")
209
-    for f in FAILS:
210
-        stream.write("\n%s\n" % f['id'])
211
-        stream.write("%s\n" % ('-' * len(f['id'])))
212
-        print_attachments(stream, f, all_channels=True)
213
-    stream.write('\n')
214
-
215
-
216
-def count_tests(key, value):
217
-    count = 0
218
-    for k, v in RESULTS.items():
219
-        for item in v:
220
-            if key in item:
221
-                if re.search(value, item[key]):
222
-                    count += 1
223
-    return count
224
-
225
-
226
-def run_time():
227
-    runtime = 0.0
228
-    for k, v in RESULTS.items():
229
-        for test in v:
230
-            runtime += float(get_duration(test['timestamps']).strip('s'))
231
-    return runtime
232
-
233
-
234
-def worker_stats(worker):
235
-    tests = RESULTS[worker]
236
-    num_tests = len(tests)
237
-    delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
238
-    return num_tests, delta
239
-
240
-
241
-def print_summary(stream):
242
-    stream.write("\n======\nTotals\n======\n")
243
-    stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
244
-                                           run_time()))
245
-    stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
246
-    stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
247
-    stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
248
-
249
-    # we could have no results, especially as we filter out the process-codes
250
-    if RESULTS:
251
-        stream.write("\n==============\nWorker Balance\n==============\n")
252
-
253
-        for w in range(max(RESULTS.keys()) + 1):
254
-            if w not in RESULTS:
255
-                stream.write(
256
-                    " - WARNING: missing Worker %s! "
257
-                    "Race in testr accounting.\n" % w)
258
-            else:
259
-                num, time = worker_stats(w)
260
-                stream.write(" - Worker %s (%s tests) => %ss\n" %
261
-                             (w, num, time))
262
-
263
-
264
-def parse_args():
265
-    parser = argparse.ArgumentParser()
266
-    parser.add_argument('--no-failure-debug', '-n', action='store_true',
267
-                        dest='print_failures', help='Disable printing failure '
268
-                        'debug information in realtime')
269
-    parser.add_argument('--fails', '-f', action='store_true',
270
-                        dest='post_fails', help='Print failure debug '
271
-                        'information after the stream is processed')
272
-    parser.add_argument('--failonly', action='store_true',
273
-                        dest='failonly', help="Don't print success items",
274
-                        default=(
275
-                            os.environ.get('TRACE_FAILONLY', False)
276
-                            is not False))
277
-    return parser.parse_args()
278
-
279
-
280
-def main():
281
-    args = parse_args()
282
-    stream = subunit.ByteStreamToStreamResult(
283
-        sys.stdin, non_subunit_name='stdout')
284
-    starts = Starts(sys.stdout)
285
-    outcomes = testtools.StreamToDict(
286
-        functools.partial(show_outcome, sys.stdout,
287
-                          print_failures=args.print_failures,
288
-                          failonly=args.failonly))
289
-    summary = testtools.StreamSummary()
290
-    result = testtools.CopyStreamResult([starts, outcomes, summary])
291
-    result.startTestRun()
292
-    try:
293
-        stream.run(result)
294
-    finally:
295
-        result.stopTestRun()
296
-    if count_tests('status', '.*') == 0:
297
-        print("The test run didn't actually run any tests")
298
-        return 1
299
-    if args.post_fails:
300
-        print_fails(sys.stdout)
301
-    print_summary(sys.stdout)
302
-    return (0 if summary.wasSuccessful() else 1)
303
-
304
-
305
-if __name__ == '__main__':
306
-    sys.exit(main())

+ 1
- 1
tox.ini View File

@@ -18,7 +18,7 @@ whitelist_externals = sh
18 18
                       find
19 19
 commands = find . -type f -name "*.py[c|o]" -delete
20 20
            find . -type d -name "__pycache__" -delete
21
-           sh tools/pretty_tox.sh '{posargs}'
21
+           ostestr '{posargs}'
22 22
 
23 23
 [testenv:fullstack]
24 24
 basepython = python2.7

Loading…
Cancel
Save