Retire Packaging Deb project repos
This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: Ia5a15a7e460cb5c23a7f74818b0f916da6ecaa85
This commit is contained in:
		@@ -1,7 +0,0 @@
 | 
			
		||||
[run]
 | 
			
		||||
branch = True
 | 
			
		||||
source = os_testr
 | 
			
		||||
omit = os_testr/tests/*,os_testr/openstack/*
 | 
			
		||||
 | 
			
		||||
[report]
 | 
			
		||||
ignore_errors = True
 | 
			
		||||
							
								
								
									
										54
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										54
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -1,54 +0,0 @@
 | 
			
		||||
*.py[cod]
 | 
			
		||||
 | 
			
		||||
# C extensions
 | 
			
		||||
*.so
 | 
			
		||||
 | 
			
		||||
# Packages
 | 
			
		||||
*.egg*
 | 
			
		||||
dist
 | 
			
		||||
build
 | 
			
		||||
eggs
 | 
			
		||||
parts
 | 
			
		||||
bin
 | 
			
		||||
var
 | 
			
		||||
sdist
 | 
			
		||||
develop-eggs
 | 
			
		||||
.installed.cfg
 | 
			
		||||
lib
 | 
			
		||||
lib64
 | 
			
		||||
 | 
			
		||||
# Installer logs
 | 
			
		||||
pip-log.txt
 | 
			
		||||
 | 
			
		||||
# Unit test / coverage reports
 | 
			
		||||
cover/
 | 
			
		||||
.coverage*
 | 
			
		||||
!.coveragerc
 | 
			
		||||
.tox
 | 
			
		||||
nosetests.xml
 | 
			
		||||
.testrepository
 | 
			
		||||
.venv
 | 
			
		||||
 | 
			
		||||
# Translations
 | 
			
		||||
*.mo
 | 
			
		||||
 | 
			
		||||
# Mr Developer
 | 
			
		||||
.mr.developer.cfg
 | 
			
		||||
.project
 | 
			
		||||
.pydevproject
 | 
			
		||||
 | 
			
		||||
# Complexity
 | 
			
		||||
output/*.html
 | 
			
		||||
output/*/index.html
 | 
			
		||||
 | 
			
		||||
# Sphinx
 | 
			
		||||
doc/build
 | 
			
		||||
 | 
			
		||||
# pbr generates these
 | 
			
		||||
AUTHORS
 | 
			
		||||
ChangeLog
 | 
			
		||||
 | 
			
		||||
# Editors
 | 
			
		||||
*~
 | 
			
		||||
.*.swp
 | 
			
		||||
.*sw?
 | 
			
		||||
@@ -1,4 +0,0 @@
 | 
			
		||||
[gerrit]
 | 
			
		||||
host=review.openstack.org
 | 
			
		||||
port=29418
 | 
			
		||||
project=openstack/os-testr.git
 | 
			
		||||
							
								
								
									
										3
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								.mailmap
									
									
									
									
									
								
							@@ -1,3 +0,0 @@
 | 
			
		||||
# Format is:
 | 
			
		||||
# <preferred e-mail> <other e-mail 1>
 | 
			
		||||
# <preferred e-mail> <other e-mail 2>
 | 
			
		||||
@@ -1,7 +0,0 @@
 | 
			
		||||
[DEFAULT]
 | 
			
		||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
 | 
			
		||||
             OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
 | 
			
		||||
             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
 | 
			
		||||
             ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
 | 
			
		||||
test_id_option=--load-list $IDFILE
 | 
			
		||||
test_list_option=--list
 | 
			
		||||
@@ -1,16 +0,0 @@
 | 
			
		||||
If you would like to contribute to the development of OpenStack,
 | 
			
		||||
you must follow the steps in this page:
 | 
			
		||||
 | 
			
		||||
   http://docs.openstack.org/infra/manual/developers.html
 | 
			
		||||
 | 
			
		||||
Once those steps have been completed, changes to OpenStack
 | 
			
		||||
should be submitted for review via the Gerrit tool, following
 | 
			
		||||
the workflow documented at:
 | 
			
		||||
 | 
			
		||||
   http://docs.openstack.org/infra/manual/developers.html#development-workflow
 | 
			
		||||
 | 
			
		||||
Pull requests submitted through GitHub will be ignored.
 | 
			
		||||
 | 
			
		||||
Bugs should be filed on Launchpad, not GitHub:
 | 
			
		||||
 | 
			
		||||
   https://bugs.launchpad.net/os-testr
 | 
			
		||||
@@ -1,4 +0,0 @@
 | 
			
		||||
os-testr Style Commandments
 | 
			
		||||
===============================================
 | 
			
		||||
 | 
			
		||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
 | 
			
		||||
							
								
								
									
										176
									
								
								LICENSE
									
									
									
									
									
								
							
							
						
						
									
										176
									
								
								LICENSE
									
									
									
									
									
								
							@@ -1,176 +0,0 @@
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
@@ -1,6 +0,0 @@
 | 
			
		||||
include AUTHORS
 | 
			
		||||
include ChangeLog
 | 
			
		||||
exclude .gitignore
 | 
			
		||||
exclude .gitreview
 | 
			
		||||
 | 
			
		||||
global-exclude *.pyc
 | 
			
		||||
							
								
								
									
										14
									
								
								README
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								README
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,14 @@
 | 
			
		||||
This project is no longer maintained.
 | 
			
		||||
 | 
			
		||||
The contents of this repository are still available in the Git
 | 
			
		||||
source code management system. To see the contents of this
 | 
			
		||||
repository before it reached its end of life, please check out the
 | 
			
		||||
previous commit with "git checkout HEAD^1".
 | 
			
		||||
 | 
			
		||||
For ongoing work on maintaining OpenStack packages in the Debian
 | 
			
		||||
distribution, please see the Debian OpenStack packaging team at
 | 
			
		||||
https://wiki.debian.org/OpenStack/.
 | 
			
		||||
 | 
			
		||||
For any further questions, please email
 | 
			
		||||
openstack-dev@lists.openstack.org or join #openstack-dev on
 | 
			
		||||
Freenode.
 | 
			
		||||
							
								
								
									
										28
									
								
								README.rst
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								README.rst
									
									
									
									
									
								
							@@ -1,28 +0,0 @@
 | 
			
		||||
========
 | 
			
		||||
os-testr
 | 
			
		||||
========
 | 
			
		||||
 | 
			
		||||
.. image:: https://img.shields.io/pypi/v/os-testr.svg
 | 
			
		||||
    :target: https://pypi.python.org/pypi/os-testr/
 | 
			
		||||
    :alt: Latest Version
 | 
			
		||||
 | 
			
		||||
.. image:: https://img.shields.io/pypi/dm/os-testr.svg
 | 
			
		||||
    :target: https://pypi.python.org/pypi/os-testr/
 | 
			
		||||
    :alt: Downloads
 | 
			
		||||
 | 
			
		||||
A testr wrapper to provide functionality for OpenStack projects.
 | 
			
		||||
 | 
			
		||||
* Free software: Apache license
 | 
			
		||||
* Documentation: http://docs.openstack.org/os-testr/
 | 
			
		||||
* Source: http://git.openstack.org/cgit/openstack/os-testr
 | 
			
		||||
* Bugs: http://bugs.launchpad.net/os-testr
 | 
			
		||||
 | 
			
		||||
Features
 | 
			
		||||
--------
 | 
			
		||||
 | 
			
		||||
* ``ostestr``: a testr wrapper that uses subunit-trace for output and builds
 | 
			
		||||
  some helpful extra functionality around testr
 | 
			
		||||
* ``subunit-trace``: an output filter for a subunit stream which provides
 | 
			
		||||
  useful information about the run
 | 
			
		||||
* ``subunit2html``: generates a test results html page from a subunit stream
 | 
			
		||||
* ``generate-subunit``: generate a subunit stream for a single test
 | 
			
		||||
@@ -1,94 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
# implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
sys.path.insert(0, os.path.abspath('../..'))
 | 
			
		||||
# -- General configuration ----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# Add any Sphinx extension module names here, as strings. They can be
 | 
			
		||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 | 
			
		||||
extensions = [
 | 
			
		||||
    'sphinx.ext.autodoc',
 | 
			
		||||
    # 'sphinx.ext.intersphinx',
 | 
			
		||||
    'openstackdocstheme'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
# openstackdocstheme options
 | 
			
		||||
repository_name = 'openstack/os-testr'
 | 
			
		||||
bug_project = 'os-testr'
 | 
			
		||||
bug_tag = ''
 | 
			
		||||
 | 
			
		||||
# Must set this variable to include year, month, day, hours, and minutes.
 | 
			
		||||
html_last_updated_fmt = '%Y-%m-%d %H:%M'
 | 
			
		||||
 | 
			
		||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
 | 
			
		||||
# text edit cycles.
 | 
			
		||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
 | 
			
		||||
 | 
			
		||||
# The suffix of source filenames.
 | 
			
		||||
source_suffix = '.rst'
 | 
			
		||||
 | 
			
		||||
# The master toctree document.
 | 
			
		||||
master_doc = 'index'
 | 
			
		||||
 | 
			
		||||
# General information about the project.
 | 
			
		||||
project = u'os-testr'
 | 
			
		||||
copyright = u'2015, Matthew Treinish'
 | 
			
		||||
 | 
			
		||||
# If true, '()' will be appended to :func: etc. cross-reference text.
 | 
			
		||||
add_function_parentheses = True
 | 
			
		||||
 | 
			
		||||
# If true, the current module name will be prepended to all description
 | 
			
		||||
# unit titles (such as .. function::).
 | 
			
		||||
add_module_names = True
 | 
			
		||||
 | 
			
		||||
# The name of the Pygments (syntax highlighting) style to use.
 | 
			
		||||
pygments_style = 'sphinx'
 | 
			
		||||
 | 
			
		||||
# -- Options for HTML output --------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# The theme to use for HTML and HTML Help pages.  Major themes that come with
 | 
			
		||||
# Sphinx are currently 'default' and 'sphinxdoc'.
 | 
			
		||||
# html_theme_path = ["."]
 | 
			
		||||
html_theme = 'openstackdocs'
 | 
			
		||||
# html_static_path = ['static']
 | 
			
		||||
 | 
			
		||||
# If true, SmartyPants will be used to convert quotes and dashes to
 | 
			
		||||
# typographically correct entities.
 | 
			
		||||
html_use_smartypants = False
 | 
			
		||||
 | 
			
		||||
# Output file base name for HTML help builder.
 | 
			
		||||
htmlhelp_basename = '%sdoc' % project
 | 
			
		||||
 | 
			
		||||
# Grouping the document tree into LaTeX files. List of tuples
 | 
			
		||||
# (source start file, target name, title, author, documentclass
 | 
			
		||||
# [howto/manual]).
 | 
			
		||||
latex_documents = [
 | 
			
		||||
    ('index',
 | 
			
		||||
     '%s.tex' % project,
 | 
			
		||||
     u'%s Documentation' % project,
 | 
			
		||||
     u'Matthew Treinish', 'manual'),
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
man_pages = [('ostestr', 'ostestr', 'tooling to run OpenStack tests',
 | 
			
		||||
             ['Matthew Treinish'], 1),
 | 
			
		||||
             ('subunit_trace', 'subunit-trace', 'pretty output filter for '
 | 
			
		||||
              'subunit streams', ['Matthew Treinish'], 1),
 | 
			
		||||
             ('subunit2html', 'subunit2html', 'generate a html results page '
 | 
			
		||||
              'from a subunit stream', ['Matthew Treinish'], 1)]
 | 
			
		||||
 | 
			
		||||
# Example configuration for intersphinx: refer to the Python standard library.
 | 
			
		||||
# intersphinx_mapping = {'http://docs.python.org/': None}
 | 
			
		||||
@@ -1,4 +0,0 @@
 | 
			
		||||
============
 | 
			
		||||
Contributing
 | 
			
		||||
============
 | 
			
		||||
.. include:: ../../../CONTRIBUTING.rst
 | 
			
		||||
@@ -1,35 +0,0 @@
 | 
			
		||||
====================================
 | 
			
		||||
Welcome to os-testr's documentation!
 | 
			
		||||
====================================
 | 
			
		||||
 | 
			
		||||
.. image:: https://img.shields.io/pypi/v/os-testr.svg
 | 
			
		||||
    :target: https://pypi.python.org/pypi/os-testr/
 | 
			
		||||
    :alt: Latest Version
 | 
			
		||||
 | 
			
		||||
.. image:: https://img.shields.io/pypi/dm/os-testr.svg
 | 
			
		||||
    :target: https://pypi.python.org/pypi/os-testr/
 | 
			
		||||
    :alt: Downloads
 | 
			
		||||
 | 
			
		||||
A testr wrapper to provide functionality for OpenStack projects.
 | 
			
		||||
 | 
			
		||||
* Free software: Apache license
 | 
			
		||||
* Documentation: http://docs.openstack.org/os-testr/
 | 
			
		||||
* Source: http://git.openstack.org/cgit/openstack/os-testr
 | 
			
		||||
* Bugs: http://bugs.launchpad.net/os-testr
 | 
			
		||||
 | 
			
		||||
Contents:
 | 
			
		||||
 | 
			
		||||
.. toctree::
 | 
			
		||||
   :maxdepth: 2
 | 
			
		||||
 | 
			
		||||
   install/index
 | 
			
		||||
   contributor/index
 | 
			
		||||
   user/index
 | 
			
		||||
 | 
			
		||||
Indices and tables
 | 
			
		||||
==================
 | 
			
		||||
 | 
			
		||||
* :ref:`genindex`
 | 
			
		||||
* :ref:`modindex`
 | 
			
		||||
* :ref:`search`
 | 
			
		||||
 | 
			
		||||
@@ -1,12 +0,0 @@
 | 
			
		||||
============
 | 
			
		||||
Installation
 | 
			
		||||
============
 | 
			
		||||
 | 
			
		||||
At the command line::
 | 
			
		||||
 | 
			
		||||
    $ pip install os-testr
 | 
			
		||||
 | 
			
		||||
Or, if you have virtualenvwrapper installed::
 | 
			
		||||
 | 
			
		||||
    $ mkvirtualenv os-testr
 | 
			
		||||
    $ pip install os-testr
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
.. generate_subunit:
 | 
			
		||||
 | 
			
		||||
generate-subunit
 | 
			
		||||
================
 | 
			
		||||
 | 
			
		||||
generate-subunit is a simple tool to, as its name implies, generate a subunit
 | 
			
		||||
stream. It will generate a stream with a single test result to STDOUT. The
 | 
			
		||||
subunit protocol lets you concatenate multiple streams together so if you want
 | 
			
		||||
to generate a stream with multiple just append the output of multiple executions
 | 
			
		||||
of generate-subunit.
 | 
			
		||||
 | 
			
		||||
Summary
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
    generate-subunit timestamp secs [status] [test_id]
 | 
			
		||||
 | 
			
		||||
Usage
 | 
			
		||||
-----
 | 
			
		||||
 | 
			
		||||
generate-subunit has 2 mandatory arguments. These are needed to specify when
 | 
			
		||||
the "test" started running and how long it took. The first argument is a POSIX
 | 
			
		||||
timestamp (which can returned by the date util using ``date +%s``) for when it
 | 
			
		||||
started running. The second argument is the number of seconds it took for the
 | 
			
		||||
execution to finish. For example::
 | 
			
		||||
 | 
			
		||||
    $ generate-subunit $(date +%s) 42
 | 
			
		||||
 | 
			
		||||
will generate a stream with the test_id 'devstack' successfully running for 42
 | 
			
		||||
secs starting when the command was executed. This leads into the 2 optional
 | 
			
		||||
arguments. The first optional argument is for specifying the status. This must
 | 
			
		||||
be the 3rd argument when calling generate-subunit. Valid status options can
 | 
			
		||||
be found in the `testtools documentation`_. If status is not specified it will
 | 
			
		||||
default to success. For example::
 | 
			
		||||
 | 
			
		||||
    $ generate-subunit $(date +%s) 42 fail
 | 
			
		||||
 | 
			
		||||
will be the same as the previous example except that it marks the test as
 | 
			
		||||
failing.
 | 
			
		||||
 | 
			
		||||
.. _testtools documentation: http://testtools.readthedocs.io/en/latest/api.html#testtools.StreamResult.status
 | 
			
		||||
 | 
			
		||||
The other optional argument is the test_id (aka test name) and is used to
 | 
			
		||||
identify the "test" being run. For better or worse this defaults to *devstack*.
 | 
			
		||||
(which is an artifact of why this tool was originally created) Note, this must
 | 
			
		||||
be the 4th argument when calling generate-subunit. This means you also must
 | 
			
		||||
specify a status if you want to set your own test_id. For example::
 | 
			
		||||
 | 
			
		||||
    $ generate-subunit %(date +%s) 42 fail my_little_test
 | 
			
		||||
 | 
			
		||||
will generate a subunit stream as before except instead the test will be named
 | 
			
		||||
my_little_test.
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
.. include:: ../../../ChangeLog
 | 
			
		||||
@@ -1,14 +0,0 @@
 | 
			
		||||
=====
 | 
			
		||||
Usage
 | 
			
		||||
=====
 | 
			
		||||
 | 
			
		||||
This section contains the documentation for each of tools packaged in os-testr
 | 
			
		||||
 | 
			
		||||
.. toctree::
 | 
			
		||||
   :maxdepth: 2
 | 
			
		||||
 | 
			
		||||
   ostestr
 | 
			
		||||
   subunit_trace
 | 
			
		||||
   subunit2html
 | 
			
		||||
   generate_subunit
 | 
			
		||||
   history
 | 
			
		||||
@@ -1,256 +0,0 @@
 | 
			
		||||
.. _ostestr:
 | 
			
		||||
 | 
			
		||||
ostestr
 | 
			
		||||
=======
 | 
			
		||||
 | 
			
		||||
The ostestr command provides a wrapper around the testr command included in
 | 
			
		||||
the testrepository package. It's designed to build on the functionality
 | 
			
		||||
included in testr and workaround several UI bugs in the short term. By default
 | 
			
		||||
it also has output that is much more useful for OpenStack's test suites which
 | 
			
		||||
are lengthy in both runtime and number of tests. Please note that the CLI
 | 
			
		||||
semantics are still a work in progress as the project is quite young, so
 | 
			
		||||
default behavior might change in future version.
 | 
			
		||||
 | 
			
		||||
Summary
 | 
			
		||||
-------
 | 
			
		||||
    ostestr [-b|--blacklist-file <blacklist_file>] [-r|--regex REGEX]
 | 
			
		||||
            [-w|--whitelist-file <whitelist_file>]
 | 
			
		||||
            [-p|--pretty] [--no-pretty] [-s|--subunit] [-l|--list]
 | 
			
		||||
            [-n|--no-discover <test_id>] [--slowest] [--no-slowest]
 | 
			
		||||
            [--pdb <test_id>] [--parallel] [--serial]
 | 
			
		||||
            [-c|--concurrency <workers>] [--until-failure] [--print-exclude]
 | 
			
		||||
 | 
			
		||||
Options
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
  --blacklist-file BLACKLIST_FILE, -b BLACKLIST_FILE
 | 
			
		||||
                        Path to a blacklist file, this file contains a
 | 
			
		||||
                        separate regex exclude on each newline
 | 
			
		||||
  --whitelist-file WHITELIST_FILE, -w WHITELIST_FILE
 | 
			
		||||
                        Path to a whitelist file, this file contains a
 | 
			
		||||
                        separate regex on each newline
 | 
			
		||||
  --regex REGEX, -r REGEX
 | 
			
		||||
                        A normal testr selection regex.
 | 
			
		||||
 | 
			
		||||
  --black-regex BLACK_REGEX, -B BLACK_REGEX
 | 
			
		||||
                        Test rejection regex. If the test cases durring a
 | 
			
		||||
                        search opration matches, it will be removed from the
 | 
			
		||||
                        final test list.
 | 
			
		||||
  --pretty, -p
 | 
			
		||||
                        Print pretty output from subunit-trace. This is
 | 
			
		||||
                        mutually exclusive with --subunit
 | 
			
		||||
  --no-pretty
 | 
			
		||||
                        Disable the pretty output with subunit-trace
 | 
			
		||||
  --subunit, -s
 | 
			
		||||
                        output the raw subunit v2 from the test run this is
 | 
			
		||||
                        mutually exclusive with --pretty
 | 
			
		||||
  --list, -l
 | 
			
		||||
                        List all the tests which will be run.
 | 
			
		||||
  --no-discover TEST_ID, -n TEST_ID
 | 
			
		||||
                        Takes in a single test to bypasses test discover and
 | 
			
		||||
                        just execute the test specified
 | 
			
		||||
  --slowest
 | 
			
		||||
                        After the test run print the slowest tests
 | 
			
		||||
  --no-slowest
 | 
			
		||||
                        After the test run don't print the slowest tests
 | 
			
		||||
  --pdb TEST_ID
 | 
			
		||||
                        Run a single test that has pdb traces added
 | 
			
		||||
  --parallel
 | 
			
		||||
                        Run tests in parallel (this is the default)
 | 
			
		||||
  --serial
 | 
			
		||||
                        Run tests serially
 | 
			
		||||
  --concurrency WORKERS, -c WORKERS
 | 
			
		||||
                        The number of workers to use when running in parallel.
 | 
			
		||||
                        By default this is the number of cpus
 | 
			
		||||
  --until-failure
 | 
			
		||||
                        Run the tests in a loop until a failure is
 | 
			
		||||
                        encountered. Running with subunit or prettyoutput
 | 
			
		||||
                        enable will force the loop to run testsserially
 | 
			
		||||
  --print-exclude
 | 
			
		||||
                        If an exclude file is used this option will prints the
 | 
			
		||||
                        comment from the same line and all skipped tests
 | 
			
		||||
                        before the test run
 | 
			
		||||
 | 
			
		||||
Running Tests
 | 
			
		||||
-------------
 | 
			
		||||
 | 
			
		||||
os-testr is primarily for running tests at it's basic level you just invoke
 | 
			
		||||
ostestr to run a test suite for a project. (assuming it's setup to run tests
 | 
			
		||||
using testr already) For example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr
 | 
			
		||||
 | 
			
		||||
This will run tests in parallel (with the number of workers matching the number
 | 
			
		||||
of CPUs) and with subunit-trace output. If you need to run tests in serial you
 | 
			
		||||
can use the serial option::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --serial
 | 
			
		||||
 | 
			
		||||
Or if you need to adjust the concurrency but still run in parallel you can use
 | 
			
		||||
-c/--concurrency::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --concurrency 2
 | 
			
		||||
 | 
			
		||||
If you only want to run an individual test module or more specific (a single
 | 
			
		||||
class, or test) and parallel execution doesn't matter, you can use the
 | 
			
		||||
-n/--no-discover to skip test discovery and just directly calls subunit.run on
 | 
			
		||||
the tests under the covers. Bypassing discovery is desirable when running a
 | 
			
		||||
small subset of tests in a larger test suite because the discovery time can
 | 
			
		||||
often far exceed the total run time of the tests.
 | 
			
		||||
 | 
			
		||||
For example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --no-discover test.test_thing.TestThing.test_thing_method
 | 
			
		||||
 | 
			
		||||
Additionally, if you need to run a single test module, class, or single test
 | 
			
		||||
with pdb enabled you can use --pdb to directly call testtools.run under the
 | 
			
		||||
covers which works with pdb. For example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --pdb tests.test_thing.TestThing.test_thing_method
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Test Selection
 | 
			
		||||
--------------
 | 
			
		||||
 | 
			
		||||
ostestr intially designed to build on top of the test selection in testr.
 | 
			
		||||
testr only exposed a regex option to select tests. This functionality is
 | 
			
		||||
exposed via the --regex option. For example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --regex 'magic\.regex'
 | 
			
		||||
 | 
			
		||||
This will do a straight passthrough of the provided regex to testr.
 | 
			
		||||
When ostestr is asked to do more complex test selection than a sinlge regex,
 | 
			
		||||
it will ask testr for a full list of tests than passing the filtered test list
 | 
			
		||||
back to testr.
 | 
			
		||||
ostestr allows you do to do simple test exclusion via apssing rejection/black regex::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --black-regex 'slow_tests|bad_tests'
 | 
			
		||||
 | 
			
		||||
ostestr also allow you to combine these argumants::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --regex ui\.interface --black-regex 'slow_tests|bad_tests'
 | 
			
		||||
 | 
			
		||||
Here first we selected all tests which  matches to 'ui\.interface',
 | 
			
		||||
than we are dropping all test which matches
 | 
			
		||||
'slow_tests|bad_tests' from the final list.
 | 
			
		||||
 | 
			
		||||
ostestr also allows you to specify a blacklist file to define a set
 | 
			
		||||
of regexes to exclude. You can specify a blacklist file with the
 | 
			
		||||
--blacklist_file/-b option, for example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --blacklist_file $path_to_file
 | 
			
		||||
 | 
			
		||||
The format for the file is line separated regex, with '#' used to signify the
 | 
			
		||||
start of a comment on a line. For example::
 | 
			
		||||
 | 
			
		||||
    # Blacklist File
 | 
			
		||||
    ^regex1 # Excludes these tests
 | 
			
		||||
    .*regex2 # exclude those tests
 | 
			
		||||
 | 
			
		||||
The regex used in the blacklist File or passed as argument, will be used
 | 
			
		||||
to drop tests from the initial selection list.
 | 
			
		||||
Will generate a list which will exclude both any tests
 | 
			
		||||
matching '^regex1' and '.*regex2'. If a blacklist file is used in conjunction
 | 
			
		||||
with the --regex option the regex specified with --regex will be used for the intial
 | 
			
		||||
test selection. Also it's worth noting that the
 | 
			
		||||
regex test selection options can not be used in conjunction with the
 | 
			
		||||
--no-discover or --pdb options described in the previous section. This is
 | 
			
		||||
because the regex selection requires using testr under the covers to actually
 | 
			
		||||
do the filtering, and those 2 options do not use testr.
 | 
			
		||||
 | 
			
		||||
The dual of the blacklist file is the whitelist file which altering the initial
 | 
			
		||||
test selection regex, by joining the white list elements by '|'.
 | 
			
		||||
You can specify the path to the file with --whitelist_file/-w, for example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --whitelist_file $path_to_file
 | 
			
		||||
 | 
			
		||||
The format for the file is more or less identical to the blacklist file::
 | 
			
		||||
 | 
			
		||||
    # Whitelist File
 | 
			
		||||
    ^regex1 # Include these tests
 | 
			
		||||
    .*regex2 # include those tests
 | 
			
		||||
 | 
			
		||||
However, instead of excluding the matches it will include them.
 | 
			
		||||
 | 
			
		||||
It's also worth noting that you can use the test list option to dry run any
 | 
			
		||||
selection arguments you are using. You just need to use --list/-l with your
 | 
			
		||||
selection options to do this, for example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --regex 'regex3.*' --blacklist_file blacklist.txt --list
 | 
			
		||||
 | 
			
		||||
This will list all the tests which will be run by ostestr using that combination
 | 
			
		||||
of arguments.
 | 
			
		||||
 | 
			
		||||
Please not that all of this selection functionality will be expanded on in the
 | 
			
		||||
future and a default grammar for selecting multiple tests will be chosen in a
 | 
			
		||||
future release. However as of right now all current arguments (which have
 | 
			
		||||
guarantees on always remaining in place) are still required to perform any
 | 
			
		||||
selection logic while this functionality is still under development.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Output Options
 | 
			
		||||
--------------
 | 
			
		||||
 | 
			
		||||
By default ostestr will use subunit-trace as the output filter on the test
 | 
			
		||||
run. It will also print the slowest tests from the run after the run is
 | 
			
		||||
concluded. You can disable the printing the slowest tests with the --no-slowest
 | 
			
		||||
flag, for example::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --no-slowest
 | 
			
		||||
 | 
			
		||||
If you'd like to disable the subunit-trace output you can do this using
 | 
			
		||||
--no-pretty::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --no-pretty
 | 
			
		||||
 | 
			
		||||
ostestr also provides the option to just output the raw subunit stream on
 | 
			
		||||
STDOUT with --subunit/-s. Note if you want to use this you also have to
 | 
			
		||||
specify --no-pretty as the subunit-trace output and the raw subunit output
 | 
			
		||||
are mutually exclusive. For example, to get raw subunit output the arguments
 | 
			
		||||
would be::
 | 
			
		||||
 | 
			
		||||
    $ ostestr --no-pretty --subunit
 | 
			
		||||
 | 
			
		||||
An additional option on top of the blacklist file is --print-exclude option.
 | 
			
		||||
When this option is specified when using a blacklist file before the tests are
 | 
			
		||||
run ostestr will print all the tests it will be excluding from the blacklist
 | 
			
		||||
file. If a line in the blacklist file has a comment that will be printed before
 | 
			
		||||
listing the tests which will be excluded by that line's regex. If no comment is
 | 
			
		||||
present on a line the regex from that line will be used instead. For example,
 | 
			
		||||
if you were using the example blacklist file from the previous section the
 | 
			
		||||
output before the regular test run output would be::
 | 
			
		||||
 | 
			
		||||
    $ ostestr -b blacklist-file blacklist.txt --print-exclude
 | 
			
		||||
    Excludes these tests
 | 
			
		||||
    regex1_match
 | 
			
		||||
    regex1_exclude
 | 
			
		||||
 | 
			
		||||
    exclude those tests
 | 
			
		||||
    regex2_match
 | 
			
		||||
    regex2_exclude
 | 
			
		||||
 | 
			
		||||
    ...
 | 
			
		||||
 | 
			
		||||
Notes for running with tox
 | 
			
		||||
--------------------------
 | 
			
		||||
 | 
			
		||||
If you use `tox`_ for running your tests and call ostestr as the test command
 | 
			
		||||
it's recommended that you set a posargs following ostestr on the commands
 | 
			
		||||
stanza. For example::
 | 
			
		||||
 | 
			
		||||
    [testenv]
 | 
			
		||||
    commands = ostestr {posargs}
 | 
			
		||||
 | 
			
		||||
.. _tox: https://tox.readthedocs.org/en/latest/
 | 
			
		||||
 | 
			
		||||
this will enable end users to pass args to configure the output, use the
 | 
			
		||||
selection logic, or any other options directly from the tox cli. This will let
 | 
			
		||||
tox take care of the venv management and the environment separation but enable
 | 
			
		||||
direct access to all of the ostestr options to easily customize your test run.
 | 
			
		||||
For example, assuming the above posargs usage you would be to do::
 | 
			
		||||
 | 
			
		||||
    $ tox -epy34 -- --regex ^regex1
 | 
			
		||||
 | 
			
		||||
or to skip discovery::
 | 
			
		||||
 | 
			
		||||
    $ tox -epy34 -- -n test.test_thing.TestThing.test_thing_method
 | 
			
		||||
@@ -1,33 +0,0 @@
 | 
			
		||||
.. _subunit2html:
 | 
			
		||||
 | 
			
		||||
subunit2html
 | 
			
		||||
============
 | 
			
		||||
 | 
			
		||||
subunit2html is a tool that takes in a subunit stream file and will output an
 | 
			
		||||
html page
 | 
			
		||||
 | 
			
		||||
Summary
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
    subunit2html subunit_stream [output]
 | 
			
		||||
 | 
			
		||||
Usage
 | 
			
		||||
-----
 | 
			
		||||
 | 
			
		||||
subunit2html takes in 1 mandatory argument. This is used to specify the location
 | 
			
		||||
of the subunit stream file. For example::
 | 
			
		||||
 | 
			
		||||
    $ subunit2html subunit_stream
 | 
			
		||||
 | 
			
		||||
By default subunit2html will store the generated html results file at
 | 
			
		||||
results.html file in the current working directory.
 | 
			
		||||
 | 
			
		||||
An optional second argument can be provided to set the output path of the html
 | 
			
		||||
results file that is generated. If it is provided this will be the output path
 | 
			
		||||
for saving the generated file, otherwise results.html in the current working
 | 
			
		||||
directory will be used. For example::
 | 
			
		||||
 | 
			
		||||
    $ subunit2html subunit_stream test_results.html
 | 
			
		||||
 | 
			
		||||
will write the generated html results file to test_results.html in the current
 | 
			
		||||
working directory
 | 
			
		||||
@@ -1,112 +0,0 @@
 | 
			
		||||
.. _subunit_trace:
 | 
			
		||||
 | 
			
		||||
subunit-trace
 | 
			
		||||
=============
 | 
			
		||||
 | 
			
		||||
subunit-trace is an output filter for subunit streams. It is often used in
 | 
			
		||||
conjunction with test runners that emit subunit to enable a consistent and
 | 
			
		||||
useful realtime output from a test run.
 | 
			
		||||
 | 
			
		||||
Summary
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
subunit-trace [--fails|-f] [--failonly] [--perc-diff|-d] [--no-summary]
 | 
			
		||||
              [--diff-threshold|-t <threshold>] [--color]
 | 
			
		||||
 | 
			
		||||
Options
 | 
			
		||||
-------
 | 
			
		||||
 | 
			
		||||
  --no-failure-debug, -n
 | 
			
		||||
                        Disable printing failure debug information in realtime
 | 
			
		||||
  --fails, -f
 | 
			
		||||
                        Print failure debug information after the stream is
 | 
			
		||||
                        processed
 | 
			
		||||
  --failonly
 | 
			
		||||
                        Don't print success items
 | 
			
		||||
  --perc-diff, -d
 | 
			
		||||
                        Print percent change in run time on each test
 | 
			
		||||
  --diff-threshold THRESHOLD, -t THRESHOLD
 | 
			
		||||
                        Threshold to use for displaying percent change from the
 | 
			
		||||
                        avg run time. If one is not specified the percent
 | 
			
		||||
                        change will always be displayed.
 | 
			
		||||
  --no-summary
 | 
			
		||||
                        Don't print the summary of the test run after completes
 | 
			
		||||
  --color
 | 
			
		||||
                        Print result with colors
 | 
			
		||||
 | 
			
		||||
Usage
 | 
			
		||||
-----
 | 
			
		||||
subunit-trace will take a subunit stream in via STDIN. This is the only input
 | 
			
		||||
into the tool. It will then print on STDOUT the formatted test result output
 | 
			
		||||
for the test run information contained in the stream.
 | 
			
		||||
 | 
			
		||||
A subunit v2 stream must be passed into subunit-trace. If only a subunit v1
 | 
			
		||||
stream is available you must use the subunit-1to2 utility to convert it before
 | 
			
		||||
passing the stream into subunit-trace. For example this can be done by chaining
 | 
			
		||||
pipes::
 | 
			
		||||
 | 
			
		||||
    $ cat subunit_v1 | subunit-1to2 | subunit-trace
 | 
			
		||||
 | 
			
		||||
Adjusting per test output
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
subunit-trace provides several options to customize it's output. This allows
 | 
			
		||||
users to customize the output from subunit-trace to suit their needs. The output
 | 
			
		||||
from subunit-trace basically comes in 2 parts, the per test output, and the
 | 
			
		||||
summary at the end. By default subunit-trace will print failure messages during
 | 
			
		||||
the per test output, meaning when a test fails it will also print the message
 | 
			
		||||
and any traceback and other attachments at that time. However this can be
 | 
			
		||||
disabled by using --no-failure-debug, -n. For example::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --no-failure-debug
 | 
			
		||||
 | 
			
		||||
Here is also the option to print all failures together at the end of the test
 | 
			
		||||
run before the summary view. This is done using the --fails/-f option. For
 | 
			
		||||
example::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --fails
 | 
			
		||||
 | 
			
		||||
Often the --fails and --no-failure-debug options are used in conjunction to
 | 
			
		||||
only print failures at the end of a test run. This is useful for large test
 | 
			
		||||
suites where an error message might be lost in the noise. To do this ::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --fails --no-failure-debug
 | 
			
		||||
 | 
			
		||||
By default subunit-trace will print a line for each test after it completes with
 | 
			
		||||
the test status. However, if you only want to see the run time output for
 | 
			
		||||
failures and not any other test status you can use the --failonly option. For
 | 
			
		||||
example::
 | 
			
		||||
 | 
			
		||||
     $ testr run --subunit | subunit-trace --failonly
 | 
			
		||||
 | 
			
		||||
The last output option provided by subunit-trace is to disable the summary view
 | 
			
		||||
of the test run which is normally displayed at the end of a run. You can do
 | 
			
		||||
this using the --no-summary option. For example::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --no-summary
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Show per test run time percent change
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
subunit-trace provides an option to display the percent change in run time
 | 
			
		||||
from the previous run. To do this subunit-trace leverages the testr internals
 | 
			
		||||
a bit. It uses the times.dbm database which, the file repository type in
 | 
			
		||||
testrepository will create, to get the previous run time for a test. If testr
 | 
			
		||||
hasn't ever been used before or for whatever reason subunit-trace is unable to
 | 
			
		||||
find the times.dbm file from testr no percentages will be displayed even if it's
 | 
			
		||||
enabled. Additionally, if a test is run which does not have an entry in the
 | 
			
		||||
times.dbm file will not have a percentage printed for it.
 | 
			
		||||
 | 
			
		||||
To enable this feature you use --perc-diff/-d, for example::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --perc-diff
 | 
			
		||||
 | 
			
		||||
There is also the option to set a threshold value for this option. If used it
 | 
			
		||||
acts as an absolute value and only percentage changes that exceed it will be
 | 
			
		||||
printed. Use the --diff-threshold/-t option to set a threshold, for example::
 | 
			
		||||
 | 
			
		||||
    $ testr run --subunit | subunit-trace --perc-diff --threshold 45
 | 
			
		||||
 | 
			
		||||
This will only display percent differences when the change in run time is either
 | 
			
		||||
>=45% faster or <=45% slower.
 | 
			
		||||
@@ -1,19 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import pbr.version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__version__ = pbr.version.VersionInfo(
 | 
			
		||||
    'os_testr').version_string()
 | 
			
		||||
@@ -1,58 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python2
 | 
			
		||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
import pbr.version
 | 
			
		||||
import subunit
 | 
			
		||||
from subunit import iso8601
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__version__ = pbr.version.VersionInfo('os_testr').version_string()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    if '--version' in sys.argv:
 | 
			
		||||
        print(__version__)
 | 
			
		||||
        exit(0)
 | 
			
		||||
 | 
			
		||||
    start_time = datetime.datetime.fromtimestamp(float(sys.argv[1])).replace(
 | 
			
		||||
        tzinfo=iso8601.UTC)
 | 
			
		||||
    elapsed_time = datetime.timedelta(seconds=int(sys.argv[2]))
 | 
			
		||||
    stop_time = start_time + elapsed_time
 | 
			
		||||
 | 
			
		||||
    if len(sys.argv) > 3:
 | 
			
		||||
        status = sys.argv[3]
 | 
			
		||||
    else:
 | 
			
		||||
        status = 'success'
 | 
			
		||||
 | 
			
		||||
    if len(sys.argv) > 4:
 | 
			
		||||
        test_id = sys.argv[4]
 | 
			
		||||
    else:
 | 
			
		||||
        test_id = 'devstack'
 | 
			
		||||
 | 
			
		||||
    # Write the subunit test
 | 
			
		||||
    output = subunit.v2.StreamResultToBytes(sys.stdout)
 | 
			
		||||
    output.startTestRun()
 | 
			
		||||
    output.status(timestamp=start_time, test_id=test_id)
 | 
			
		||||
    # Write the end of the test
 | 
			
		||||
    output.status(test_status=status, timestamp=stop_time, test_id=test_id)
 | 
			
		||||
    output.stopTestRun()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,315 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python2
 | 
			
		||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import atexit
 | 
			
		||||
import copy
 | 
			
		||||
import io
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
import tempfile
 | 
			
		||||
 | 
			
		||||
import pbr.version
 | 
			
		||||
 | 
			
		||||
from subunit import run as subunit_run
 | 
			
		||||
from testtools import run as testtools_run
 | 
			
		||||
 | 
			
		||||
from os_testr import regex_builder as rb
 | 
			
		||||
from os_testr import testlist_builder as tlb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__version__ = pbr.version.VersionInfo('os_testr').version_string()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_parser(args):
 | 
			
		||||
    parser = argparse.ArgumentParser(
 | 
			
		||||
        description='Tool to run openstack tests')
 | 
			
		||||
    parser.add_argument('--version', action='version',
 | 
			
		||||
                        version='%s' % __version__)
 | 
			
		||||
    parser.add_argument('--blacklist-file', '-b', '--blacklist_file',
 | 
			
		||||
                        help='Path to a blacklist file, this file '
 | 
			
		||||
                             'contains a separate regex exclude on each '
 | 
			
		||||
                             'newline')
 | 
			
		||||
    parser.add_argument('--whitelist-file', '-w', '--whitelist_file',
 | 
			
		||||
                        help='Path to a whitelist file, this file '
 | 
			
		||||
                             'contains a separate regex on each newline.')
 | 
			
		||||
    group = parser.add_mutually_exclusive_group()
 | 
			
		||||
    group.add_argument('--regex', '-r',
 | 
			
		||||
                       help='A normal testr selection regex.')
 | 
			
		||||
    group.add_argument('--path', metavar='FILE_OR_DIRECTORY',
 | 
			
		||||
                       help='A file name or directory of tests to run.')
 | 
			
		||||
    group.add_argument('--no-discover', '-n', metavar='TEST_ID',
 | 
			
		||||
                       help="Takes in a single test to bypasses test "
 | 
			
		||||
                            "discover and just execute the test specified. "
 | 
			
		||||
                            "A file name may be used in place of a test "
 | 
			
		||||
                            "name.")
 | 
			
		||||
    parser.add_argument('--black-regex', '-B',
 | 
			
		||||
                        help='Test rejection regex. If a test cases name '
 | 
			
		||||
                        'matches on re.search() operation , '
 | 
			
		||||
                        'it will be removed from the final test list. '
 | 
			
		||||
                        'Effectively the black-regex is added to '
 | 
			
		||||
                        ' black regex list, but you do need to edit a file. '
 | 
			
		||||
                        'The black filtering happens after the initial '
 | 
			
		||||
                        ' white selection, which by default is everything.')
 | 
			
		||||
    pretty = parser.add_mutually_exclusive_group()
 | 
			
		||||
    pretty.add_argument('--pretty', '-p', dest='pretty', action='store_true',
 | 
			
		||||
                        help='Print pretty output from subunit-trace. This is '
 | 
			
		||||
                             'mutually exclusive with --subunit')
 | 
			
		||||
    pretty.add_argument('--no-pretty', dest='pretty', action='store_false',
 | 
			
		||||
                        help='Disable the pretty output with subunit-trace')
 | 
			
		||||
    parser.add_argument('--subunit', '-s', action='store_true',
 | 
			
		||||
                        help='output the raw subunit v2 from the test run '
 | 
			
		||||
                             'this is mutually exclusive with --pretty')
 | 
			
		||||
    parser.add_argument('--list', '-l', action='store_true',
 | 
			
		||||
                        help='List all the tests which will be run.')
 | 
			
		||||
    parser.add_argument('--color', action='store_true',
 | 
			
		||||
                        help='Use color in the pretty output')
 | 
			
		||||
    slowest = parser.add_mutually_exclusive_group()
 | 
			
		||||
    slowest.add_argument('--slowest', dest='slowest', action='store_true',
 | 
			
		||||
                         help="after the test run print the slowest tests")
 | 
			
		||||
    slowest.add_argument('--no-slowest', dest='slowest', action='store_false',
 | 
			
		||||
                         help="after the test run don't print the slowest "
 | 
			
		||||
                              "tests")
 | 
			
		||||
    parser.add_argument('--pdb', metavar='TEST_ID',
 | 
			
		||||
                        help='Run a single test that has pdb traces added')
 | 
			
		||||
    parallel = parser.add_mutually_exclusive_group()
 | 
			
		||||
    parallel.add_argument('--parallel', dest='parallel', action='store_true',
 | 
			
		||||
                          help='Run tests in parallel (this is the default)')
 | 
			
		||||
    parallel.add_argument('--serial', dest='parallel', action='store_false',
 | 
			
		||||
                          help='Run tests serially')
 | 
			
		||||
    parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS',
 | 
			
		||||
                        help='The number of workers to use when running in '
 | 
			
		||||
                             'parallel. By default this is the number of cpus')
 | 
			
		||||
    parser.add_argument('--until-failure', action='store_true',
 | 
			
		||||
                        help='Run the tests in a loop until a failure is '
 | 
			
		||||
                             'encountered. Running with subunit or pretty'
 | 
			
		||||
                             'output enable will force the loop to run tests'
 | 
			
		||||
                             'serially')
 | 
			
		||||
    parser.add_argument('--print-exclude', action='store_true',
 | 
			
		||||
                        help='If an exclude file is used this option will '
 | 
			
		||||
                             'prints the comment from the same line and all '
 | 
			
		||||
                             'skipped tests before the test run')
 | 
			
		||||
    parser.set_defaults(pretty=True, slowest=True, parallel=True)
 | 
			
		||||
    return parser.parse_known_args(args)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur,
 | 
			
		||||
               until_failure, color, list_of_tests=None, others=None):
 | 
			
		||||
    others = others or []
 | 
			
		||||
    if parallel:
 | 
			
		||||
        cmd = ['testr', 'run', '--parallel']
 | 
			
		||||
        if concur:
 | 
			
		||||
            cmd.append('--concurrency=%s' % concur)
 | 
			
		||||
    else:
 | 
			
		||||
        cmd = ['testr', 'run']
 | 
			
		||||
    if list_tests:
 | 
			
		||||
        cmd = ['testr', 'list-tests']
 | 
			
		||||
    elif (subunit or pretty) and not until_failure:
 | 
			
		||||
        cmd.append('--subunit')
 | 
			
		||||
    elif not (subunit or pretty) and until_failure:
 | 
			
		||||
        cmd.append('--until-failure')
 | 
			
		||||
    if list_of_tests:
 | 
			
		||||
        test_fd, test_file_name = tempfile.mkstemp()
 | 
			
		||||
        atexit.register(os.remove, test_file_name)
 | 
			
		||||
        test_file = os.fdopen(test_fd, 'w')
 | 
			
		||||
        test_file.write('\n'.join(list_of_tests) + '\n')
 | 
			
		||||
        test_file.close()
 | 
			
		||||
        cmd.extend(('--load-list', test_file_name))
 | 
			
		||||
    elif regex:
 | 
			
		||||
        cmd.append(regex)
 | 
			
		||||
 | 
			
		||||
    env = copy.deepcopy(os.environ)
 | 
			
		||||
 | 
			
		||||
    if pretty:
 | 
			
		||||
        subunit_trace_cmd = ['subunit-trace', '--no-failure-debug', '-f']
 | 
			
		||||
        if color:
 | 
			
		||||
            subunit_trace_cmd.append('--color')
 | 
			
		||||
 | 
			
		||||
    # This workaround is necessary because of lp bug 1411804 it's super hacky
 | 
			
		||||
    # and makes tons of unfounded assumptions, but it works for the most part
 | 
			
		||||
    if (subunit or pretty) and until_failure:
 | 
			
		||||
        test_list = rb._get_test_list(regex, env)
 | 
			
		||||
        count = 0
 | 
			
		||||
        failed = False
 | 
			
		||||
        if not test_list:
 | 
			
		||||
            print("No tests to run")
 | 
			
		||||
            return 1
 | 
			
		||||
        # If pretty or subunit output is desired manually loop forever over
 | 
			
		||||
        # test individually and generate the desired output in a linear series
 | 
			
		||||
        # this avoids 1411804 while retaining most of the desired behavior
 | 
			
		||||
        while True:
 | 
			
		||||
            for test in test_list:
 | 
			
		||||
                if pretty:
 | 
			
		||||
                    cmd = ['python', '-m', 'subunit.run', test]
 | 
			
		||||
                    ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
 | 
			
		||||
                    subunit_trace_cmd.append('--no-summary')
 | 
			
		||||
                    proc = subprocess.Popen(subunit_trace_cmd,
 | 
			
		||||
                                            env=env,
 | 
			
		||||
                                            stdin=ps.stdout)
 | 
			
		||||
                    ps.stdout.close()
 | 
			
		||||
                    proc.communicate()
 | 
			
		||||
                    if proc.returncode > 0:
 | 
			
		||||
                        failed = True
 | 
			
		||||
                        break
 | 
			
		||||
                else:
 | 
			
		||||
                    try:
 | 
			
		||||
                        subunit_run.main([sys.argv[0], test], sys.stdout)
 | 
			
		||||
                    except SystemExit as e:
 | 
			
		||||
                        if e > 0:
 | 
			
		||||
                            print("Ran %s tests without failure" % count)
 | 
			
		||||
                            return 1
 | 
			
		||||
                        else:
 | 
			
		||||
                            raise
 | 
			
		||||
                count = count + 1
 | 
			
		||||
            if failed:
 | 
			
		||||
                print("Ran %s tests without failure" % count)
 | 
			
		||||
                return 0
 | 
			
		||||
    # If not until-failure special case call testr like normal
 | 
			
		||||
    elif pretty and not list_tests:
 | 
			
		||||
        cmd.extend(others)
 | 
			
		||||
        ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
 | 
			
		||||
        proc = subprocess.Popen(subunit_trace_cmd,
 | 
			
		||||
                                env=env, stdin=ps.stdout)
 | 
			
		||||
        ps.stdout.close()
 | 
			
		||||
    else:
 | 
			
		||||
        cmd.extend(others)
 | 
			
		||||
        proc = subprocess.Popen(cmd, env=env)
 | 
			
		||||
    proc.communicate()
 | 
			
		||||
    return_code = proc.returncode
 | 
			
		||||
    if slowest and not list_tests:
 | 
			
		||||
        print("\nSlowest Tests:\n")
 | 
			
		||||
        slow_proc = subprocess.Popen(['testr', 'slowest'], env=env)
 | 
			
		||||
        slow_proc.communicate()
 | 
			
		||||
    return return_code
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def call_subunit_run(test_id, pretty, subunit):
 | 
			
		||||
    env = copy.deepcopy(os.environ)
 | 
			
		||||
    cmd_save_results = ['testr', 'load', '--subunit']
 | 
			
		||||
 | 
			
		||||
    if pretty:
 | 
			
		||||
        # Use subunit run module
 | 
			
		||||
        cmd = ['python', '-m', 'subunit.run', test_id]
 | 
			
		||||
        ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
 | 
			
		||||
        # Save subunit results via testr
 | 
			
		||||
        pfile = subprocess.Popen(cmd_save_results, env=env,
 | 
			
		||||
                                 stdin=ps.stdout, stdout=subprocess.PIPE)
 | 
			
		||||
        ps.stdout.close()
 | 
			
		||||
        # Transform output via subunit-trace
 | 
			
		||||
        proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'],
 | 
			
		||||
                                env=env, stdin=pfile.stdout)
 | 
			
		||||
        pfile.stdout.close()
 | 
			
		||||
        proc.communicate()
 | 
			
		||||
        return proc.returncode
 | 
			
		||||
    elif subunit:
 | 
			
		||||
        sstdout = io.BytesIO()
 | 
			
		||||
        subunit_run.main([sys.argv[0], test_id], sstdout)
 | 
			
		||||
        pfile = subprocess.Popen(cmd_save_results, env=env,
 | 
			
		||||
                                 stdin=subprocess.PIPE)
 | 
			
		||||
        pfile.communicate(input=sstdout.getvalue())
 | 
			
		||||
    else:
 | 
			
		||||
        testtools_run.main([sys.argv[0], test_id], sys.stdout)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _ensure_testr():
 | 
			
		||||
    if not os.path.isdir('.testrepository'):
 | 
			
		||||
        subprocess.call(['testr', 'init'])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _select_and_call_runner(opts, exclude_regex, others):
 | 
			
		||||
    ec = 1
 | 
			
		||||
    _ensure_testr()
 | 
			
		||||
 | 
			
		||||
    if not opts.no_discover and not opts.pdb:
 | 
			
		||||
        ec = call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
 | 
			
		||||
                        opts.slowest, opts.parallel, opts.concurrency,
 | 
			
		||||
                        opts.until_failure, opts.color, None, others)
 | 
			
		||||
    else:
 | 
			
		||||
        if others:
 | 
			
		||||
            print('Unexpected arguments: ' + ' '.join(others))
 | 
			
		||||
            return 2
 | 
			
		||||
        test_to_run = opts.no_discover or opts.pdb
 | 
			
		||||
        if test_to_run.find('/') != -1:
 | 
			
		||||
            test_to_run = rb.path_to_regex(test_to_run)
 | 
			
		||||
        ec = call_subunit_run(test_to_run, opts.pretty, opts.subunit)
 | 
			
		||||
    return ec
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _call_testr_with_list(opts, test_list, others):
 | 
			
		||||
    ec = 1
 | 
			
		||||
    _ensure_testr()
 | 
			
		||||
 | 
			
		||||
    if opts.list:
 | 
			
		||||
        print("\n".join(test_list))
 | 
			
		||||
        return 0
 | 
			
		||||
 | 
			
		||||
    ec = call_testr(None, opts.subunit, opts.pretty, opts.list,
 | 
			
		||||
                    opts.slowest, opts.parallel, opts.concurrency,
 | 
			
		||||
                    opts.until_failure, opts.color, test_list, others)
 | 
			
		||||
    return ec
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ostestr(args):
 | 
			
		||||
    opts, others = get_parser(args)
 | 
			
		||||
    if opts.pretty and opts.subunit:
 | 
			
		||||
        msg = ('Subunit output and pretty output cannot be specified at the '
 | 
			
		||||
               'same time')
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 2
 | 
			
		||||
    if opts.list and opts.no_discover:
 | 
			
		||||
        msg = ('you can not list tests when you are bypassing discovery to '
 | 
			
		||||
               'run a single test')
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 3
 | 
			
		||||
    if not opts.parallel and opts.concurrency:
 | 
			
		||||
        msg = "You can't specify a concurrency to use when running serially"
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 4
 | 
			
		||||
    if (opts.pdb or opts.no_discover) and opts.until_failure:
 | 
			
		||||
        msg = "You can not use until_failure mode with pdb or no-discover"
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 5
 | 
			
		||||
    if ((opts.pdb or opts.no_discover) and
 | 
			
		||||
            (opts.blacklist_file or opts.whitelist_file)):
 | 
			
		||||
        msg = "You can not use blacklist or whitelist with pdb or no-discover"
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 6
 | 
			
		||||
    if ((opts.pdb or opts.no_discover) and (opts.black_regex)):
 | 
			
		||||
        msg = "You can not use black-regex with pdb or no-discover"
 | 
			
		||||
        print(msg)
 | 
			
		||||
        return 7
 | 
			
		||||
 | 
			
		||||
    if opts.path:
 | 
			
		||||
        regex = rb.path_to_regex(opts.path)
 | 
			
		||||
    else:
 | 
			
		||||
        regex = opts.regex
 | 
			
		||||
 | 
			
		||||
    if opts.blacklist_file or opts.whitelist_file or opts.black_regex:
 | 
			
		||||
        list_of_tests = tlb.construct_list(opts.blacklist_file,
 | 
			
		||||
                                           opts.whitelist_file,
 | 
			
		||||
                                           regex,
 | 
			
		||||
                                           opts.black_regex,
 | 
			
		||||
                                           opts.print_exclude)
 | 
			
		||||
        return (_call_testr_with_list(opts, list_of_tests, others))
 | 
			
		||||
    else:
 | 
			
		||||
        return (_select_and_call_runner(opts, regex, others))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    exit(ostestr(sys.argv[1:]))
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,103 +0,0 @@
 | 
			
		||||
# Copyright 2016 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import copy
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_test_list(regex, env=None):
 | 
			
		||||
    env = env or copy.deepcopy(os.environ)
 | 
			
		||||
    testr_args = ['testr', 'list-tests']
 | 
			
		||||
    if regex:
 | 
			
		||||
        testr_args.append(regex)
 | 
			
		||||
    proc = subprocess.Popen(testr_args, env=env,
 | 
			
		||||
                            stdout=subprocess.PIPE, universal_newlines=True)
 | 
			
		||||
    out = proc.communicate()[0]
 | 
			
		||||
    raw_test_list = out.split('\n')
 | 
			
		||||
    bad = False
 | 
			
		||||
    test_list = []
 | 
			
		||||
    exclude_list = ['OS_', 'CAPTURE', 'TEST_TIMEOUT', 'PYTHON',
 | 
			
		||||
                    'subunit.run discover']
 | 
			
		||||
    for line in raw_test_list:
 | 
			
		||||
        for exclude in exclude_list:
 | 
			
		||||
            if exclude in line or not line:
 | 
			
		||||
                bad = True
 | 
			
		||||
                break
 | 
			
		||||
        if not bad:
 | 
			
		||||
            test_list.append(line)
 | 
			
		||||
        bad = False
 | 
			
		||||
    return test_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_skips(regex, message):
 | 
			
		||||
    test_list = _get_test_list(regex)
 | 
			
		||||
    if test_list:
 | 
			
		||||
        if message:
 | 
			
		||||
            print(message)
 | 
			
		||||
        else:
 | 
			
		||||
            print('Skipped because of regex %s:' % regex)
 | 
			
		||||
        for test in test_list:
 | 
			
		||||
            print(test)
 | 
			
		||||
        # Extra whitespace to separate
 | 
			
		||||
        print('\n')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def path_to_regex(path):
 | 
			
		||||
    root, _ = os.path.splitext(path)
 | 
			
		||||
    return root.replace('/', '.')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_regex_from_whitelist_file(file_path):
 | 
			
		||||
    lines = []
 | 
			
		||||
    for line in open(file_path).read().splitlines():
 | 
			
		||||
        split_line = line.strip().split('#')
 | 
			
		||||
        # Before the # is the regex
 | 
			
		||||
        line_regex = split_line[0].strip()
 | 
			
		||||
        if line_regex:
 | 
			
		||||
            lines.append(line_regex)
 | 
			
		||||
    return '|'.join(lines)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def construct_regex(blacklist_file, whitelist_file, regex, print_exclude):
 | 
			
		||||
    """Deprecated, please use testlist_builder.construct_list instead."""
 | 
			
		||||
    if not blacklist_file:
 | 
			
		||||
        exclude_regex = ''
 | 
			
		||||
    else:
 | 
			
		||||
        black_file = open(blacklist_file, 'r')
 | 
			
		||||
        exclude_regex = ''
 | 
			
		||||
        for line in black_file:
 | 
			
		||||
            raw_line = line.strip()
 | 
			
		||||
            split_line = raw_line.split('#')
 | 
			
		||||
            # Before the # is the regex
 | 
			
		||||
            line_regex = split_line[0].strip()
 | 
			
		||||
            if len(split_line) > 1:
 | 
			
		||||
                # After the # is a comment
 | 
			
		||||
                comment = split_line[1].strip()
 | 
			
		||||
            else:
 | 
			
		||||
                comment = ''
 | 
			
		||||
            if line_regex:
 | 
			
		||||
                if print_exclude:
 | 
			
		||||
                    print_skips(line_regex, comment)
 | 
			
		||||
                if exclude_regex:
 | 
			
		||||
                    exclude_regex = '|'.join([line_regex, exclude_regex])
 | 
			
		||||
                else:
 | 
			
		||||
                    exclude_regex = line_regex
 | 
			
		||||
        if exclude_regex:
 | 
			
		||||
            exclude_regex = "^((?!" + exclude_regex + ").)*$"
 | 
			
		||||
    if regex:
 | 
			
		||||
        exclude_regex += regex
 | 
			
		||||
    if whitelist_file:
 | 
			
		||||
        exclude_regex += '%s' % get_regex_from_whitelist_file(whitelist_file)
 | 
			
		||||
    return exclude_regex
 | 
			
		||||
@@ -1,765 +0,0 @@
 | 
			
		||||
#!/usr/bin/python
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
"""
 | 
			
		||||
Utility to convert a subunit stream to an html results file.
 | 
			
		||||
Code is adapted from the pyunit Html test runner at
 | 
			
		||||
http://tungwaiyip.info/software/HTMLTestRunner.html
 | 
			
		||||
 | 
			
		||||
Takes two arguments. First argument is path to subunit log file, second
 | 
			
		||||
argument is path of desired output file. Second argument is optional,
 | 
			
		||||
defaults to 'results.html'.
 | 
			
		||||
 | 
			
		||||
Original HTMLTestRunner License:
 | 
			
		||||
------------------------------------------------------------------------
 | 
			
		||||
Copyright (c) 2004-2007, Wai Yip Tung
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are
 | 
			
		||||
met:
 | 
			
		||||
 | 
			
		||||
* Redistributions of source code must retain the above copyright notice,
 | 
			
		||||
  this list of conditions and the following disclaimer.
 | 
			
		||||
* Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
  notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
  documentation and/or other materials provided with the distribution.
 | 
			
		||||
* Neither the name Wai Yip Tung nor the names of its contributors may be
 | 
			
		||||
  used to endorse or promote products derived from this software without
 | 
			
		||||
  specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 | 
			
		||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 | 
			
		||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 | 
			
		||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
 | 
			
		||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 | 
			
		||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 | 
			
		||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 | 
			
		||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 | 
			
		||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 | 
			
		||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 | 
			
		||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import datetime
 | 
			
		||||
import io
 | 
			
		||||
import sys
 | 
			
		||||
import traceback
 | 
			
		||||
from xml.sax import saxutils
 | 
			
		||||
 | 
			
		||||
import pbr.version
 | 
			
		||||
import subunit
 | 
			
		||||
import testtools
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__version__ = pbr.version.VersionInfo('os_testr').version_string()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TemplateData(object):
 | 
			
		||||
    """Define a HTML template for report customerization and generation.
 | 
			
		||||
 | 
			
		||||
    Overall structure of an HTML report
 | 
			
		||||
 | 
			
		||||
    HTML
 | 
			
		||||
    +------------------------+
 | 
			
		||||
    |<html>                  |
 | 
			
		||||
    |  <head>                |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |   STYLESHEET           |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |   |                |   |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |  </head>               |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |  <body>                |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |   HEADING              |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |   |                |   |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |   REPORT               |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |   |                |   |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |   ENDING               |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |   |                |   |
 | 
			
		||||
    |   +----------------+   |
 | 
			
		||||
    |                        |
 | 
			
		||||
    |  </body>               |
 | 
			
		||||
    |</html>                 |
 | 
			
		||||
    +------------------------+
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    STATUS = {
 | 
			
		||||
        0: 'pass',
 | 
			
		||||
        1: 'fail',
 | 
			
		||||
        2: 'error',
 | 
			
		||||
        3: 'skip',
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    DEFAULT_TITLE = 'Unit Test Report'
 | 
			
		||||
    DEFAULT_DESCRIPTION = ''
 | 
			
		||||
 | 
			
		||||
    # ------------------------------------------------------------------------
 | 
			
		||||
    # HTML Template
 | 
			
		||||
 | 
			
		||||
    HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
 | 
			
		||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
 | 
			
		||||
     "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
 | 
			
		||||
<html xmlns="http://www.w3.org/1999/xhtml">
 | 
			
		||||
<head>
 | 
			
		||||
    <title>%(title)s</title>
 | 
			
		||||
    <meta name="generator" content="%(generator)s"/>
 | 
			
		||||
    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
 | 
			
		||||
    %(stylesheet)s
 | 
			
		||||
</head>
 | 
			
		||||
<body>
 | 
			
		||||
<script language="javascript" type="text/javascript"><!--
 | 
			
		||||
output_list = Array();
 | 
			
		||||
 | 
			
		||||
/* level - 0:Summary; 1:Failed; 2:All */
 | 
			
		||||
function showCase(level) {
 | 
			
		||||
    trs = document.getElementsByTagName("tr");
 | 
			
		||||
    for (var i = 0; i < trs.length; i++) {
 | 
			
		||||
        tr = trs[i];
 | 
			
		||||
        id = tr.id;
 | 
			
		||||
        if (id.substr(0,2) == 'ft') {
 | 
			
		||||
            if (level < 1) {
 | 
			
		||||
                tr.className = 'hiddenRow';
 | 
			
		||||
            }
 | 
			
		||||
            else {
 | 
			
		||||
                tr.className = '';
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        if (id.substr(0,2) == 'pt') {
 | 
			
		||||
            if (level > 1) {
 | 
			
		||||
                tr.className = '';
 | 
			
		||||
            }
 | 
			
		||||
            else {
 | 
			
		||||
                tr.className = 'hiddenRow';
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
function showClassDetail(cid, count) {
 | 
			
		||||
    var id_list = Array(count);
 | 
			
		||||
    var toHide = 1;
 | 
			
		||||
    for (var i = 0; i < count; i++) {
 | 
			
		||||
        tid0 = 't' + cid.substr(1) + '.' + (i+1);
 | 
			
		||||
        tid = 'f' + tid0;
 | 
			
		||||
        tr = document.getElementById(tid);
 | 
			
		||||
        if (!tr) {
 | 
			
		||||
            tid = 'p' + tid0;
 | 
			
		||||
            tr = document.getElementById(tid);
 | 
			
		||||
        }
 | 
			
		||||
        id_list[i] = tid;
 | 
			
		||||
        if (tr.className) {
 | 
			
		||||
            toHide = 0;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    for (var i = 0; i < count; i++) {
 | 
			
		||||
        tid = id_list[i];
 | 
			
		||||
        if (toHide) {
 | 
			
		||||
            document.getElementById('div_'+tid).style.display = 'none'
 | 
			
		||||
            document.getElementById(tid).className = 'hiddenRow';
 | 
			
		||||
        }
 | 
			
		||||
        else {
 | 
			
		||||
            document.getElementById(tid).className = '';
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
function showTestDetail(div_id){
 | 
			
		||||
    var details_div = document.getElementById(div_id)
 | 
			
		||||
    var displayState = details_div.style.display
 | 
			
		||||
    // alert(displayState)
 | 
			
		||||
    if (displayState != 'block' ) {
 | 
			
		||||
        displayState = 'block'
 | 
			
		||||
        details_div.style.display = 'block'
 | 
			
		||||
    }
 | 
			
		||||
    else {
 | 
			
		||||
        details_div.style.display = 'none'
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
function html_escape(s) {
 | 
			
		||||
    s = s.replace(/&/g,'&');
 | 
			
		||||
    s = s.replace(/</g,'<');
 | 
			
		||||
    s = s.replace(/>/g,'>');
 | 
			
		||||
    return s;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* obsoleted by detail in <div>
 | 
			
		||||
function showOutput(id, name) {
 | 
			
		||||
    var w = window.open("", //url
 | 
			
		||||
                    name,
 | 
			
		||||
                    "resizable,scrollbars,status,width=800,height=450");
 | 
			
		||||
    d = w.document;
 | 
			
		||||
    d.write("<pre>");
 | 
			
		||||
    d.write(html_escape(output_list[id]));
 | 
			
		||||
    d.write("\n");
 | 
			
		||||
    d.write("<a href='javascript:window.close()'>close</a>\n");
 | 
			
		||||
    d.write("</pre>\n");
 | 
			
		||||
    d.close();
 | 
			
		||||
}
 | 
			
		||||
*/
 | 
			
		||||
--></script>
 | 
			
		||||
 | 
			
		||||
%(heading)s
 | 
			
		||||
%(report)s
 | 
			
		||||
%(ending)s
 | 
			
		||||
 | 
			
		||||
</body>
 | 
			
		||||
</html>
 | 
			
		||||
"""
 | 
			
		||||
    # variables: (title, generator, stylesheet, heading, report, ending)
 | 
			
		||||
 | 
			
		||||
    # ------------------------------------------------------------------------
 | 
			
		||||
    # Stylesheet
 | 
			
		||||
    #
 | 
			
		||||
    # alternatively use a <link> for external style sheet, e.g.
 | 
			
		||||
    #   <link rel="stylesheet" href="$url" type="text/css">
 | 
			
		||||
 | 
			
		||||
    STYLESHEET_TMPL = """
 | 
			
		||||
<style type="text/css" media="screen">
 | 
			
		||||
body        { font-family: verdana, arial, helvetica, sans-serif;
 | 
			
		||||
    font-size: 80%; }
 | 
			
		||||
table       { font-size: 100%; width: 100%;}
 | 
			
		||||
pre         { font-size: 80%; }
 | 
			
		||||
 | 
			
		||||
/* -- heading -------------------------------------------------------------- */
 | 
			
		||||
h1 {
 | 
			
		||||
        font-size: 16pt;
 | 
			
		||||
        color: gray;
 | 
			
		||||
}
 | 
			
		||||
.heading {
 | 
			
		||||
    margin-top: 0ex;
 | 
			
		||||
    margin-bottom: 1ex;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.heading .attribute {
 | 
			
		||||
    margin-top: 1ex;
 | 
			
		||||
    margin-bottom: 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.heading .description {
 | 
			
		||||
    margin-top: 4ex;
 | 
			
		||||
    margin-bottom: 6ex;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* -- css div popup -------------------------------------------------------- */
 | 
			
		||||
a.popup_link {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
a.popup_link:hover {
 | 
			
		||||
    color: red;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.popup_window {
 | 
			
		||||
    display: none;
 | 
			
		||||
    overflow-x: scroll;
 | 
			
		||||
    /*border: solid #627173 1px; */
 | 
			
		||||
    padding: 10px;
 | 
			
		||||
    background-color: #E6E6D6;
 | 
			
		||||
    font-family: "Ubuntu Mono", "Lucida Console", "Courier New", monospace;
 | 
			
		||||
    text-align: left;
 | 
			
		||||
    font-size: 8pt;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
/* -- report --------------------------------------------------------------- */
 | 
			
		||||
#show_detail_line {
 | 
			
		||||
    margin-top: 3ex;
 | 
			
		||||
    margin-bottom: 1ex;
 | 
			
		||||
}
 | 
			
		||||
#result_table {
 | 
			
		||||
    width: 100%;
 | 
			
		||||
    border-collapse: collapse;
 | 
			
		||||
    border: 1px solid #777;
 | 
			
		||||
}
 | 
			
		||||
#header_row {
 | 
			
		||||
    font-weight: bold;
 | 
			
		||||
    color: white;
 | 
			
		||||
    background-color: #777;
 | 
			
		||||
}
 | 
			
		||||
#result_table td {
 | 
			
		||||
    border: 1px solid #777;
 | 
			
		||||
    padding: 2px;
 | 
			
		||||
}
 | 
			
		||||
#total_row  { font-weight: bold; }
 | 
			
		||||
.passClass  { background-color: #6c6; }
 | 
			
		||||
.failClass  { background-color: #c60; }
 | 
			
		||||
.errorClass { background-color: #c00; }
 | 
			
		||||
.passCase   { color: #6c6; }
 | 
			
		||||
.failCase   { color: #c60; font-weight: bold; }
 | 
			
		||||
.errorCase  { color: #c00; font-weight: bold; }
 | 
			
		||||
.hiddenRow  { display: none; }
 | 
			
		||||
.testcase   { margin-left: 2em; }
 | 
			
		||||
td.testname {width: 40%}
 | 
			
		||||
td.small {width: 40px}
 | 
			
		||||
 | 
			
		||||
/* -- ending --------------------------------------------------------------- */
 | 
			
		||||
#ending {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</style>
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
    # ------------------------------------------------------------------------
 | 
			
		||||
    # Heading
 | 
			
		||||
    #
 | 
			
		||||
 | 
			
		||||
    HEADING_TMPL = """<div class='heading'>
 | 
			
		||||
<h1>%(title)s</h1>
 | 
			
		||||
%(parameters)s
 | 
			
		||||
<p class='description'>%(description)s</p>
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
"""  # variables: (title, parameters, description)
 | 
			
		||||
 | 
			
		||||
    HEADING_ATTRIBUTE_TMPL = """
 | 
			
		||||
<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
 | 
			
		||||
"""  # variables: (name, value)
 | 
			
		||||
 | 
			
		||||
    # ------------------------------------------------------------------------
 | 
			
		||||
    # Report
 | 
			
		||||
    #
 | 
			
		||||
 | 
			
		||||
    REPORT_TMPL = """
 | 
			
		||||
<p id='show_detail_line'>Show
 | 
			
		||||
<a href='javascript:showCase(0)'>Summary</a>
 | 
			
		||||
<a href='javascript:showCase(1)'>Failed</a>
 | 
			
		||||
<a href='javascript:showCase(2)'>All</a>
 | 
			
		||||
</p>
 | 
			
		||||
<table id='result_table'>
 | 
			
		||||
<colgroup>
 | 
			
		||||
<col align='left' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
<col align='right' />
 | 
			
		||||
</colgroup>
 | 
			
		||||
<tr id='header_row'>
 | 
			
		||||
    <td>Test Group/Test case</td>
 | 
			
		||||
    <td>Count</td>
 | 
			
		||||
    <td>Pass</td>
 | 
			
		||||
    <td>Fail</td>
 | 
			
		||||
    <td>Error</td>
 | 
			
		||||
    <td>Skip</td>
 | 
			
		||||
    <td>View</td>
 | 
			
		||||
    <td> </td>
 | 
			
		||||
</tr>
 | 
			
		||||
%(test_list)s
 | 
			
		||||
<tr id='total_row'>
 | 
			
		||||
    <td>Total</td>
 | 
			
		||||
    <td>%(count)s</td>
 | 
			
		||||
    <td>%(Pass)s</td>
 | 
			
		||||
    <td>%(fail)s</td>
 | 
			
		||||
    <td>%(error)s</td>
 | 
			
		||||
    <td>%(skip)s</td>
 | 
			
		||||
    <td> </td>
 | 
			
		||||
    <td> </td>
 | 
			
		||||
</tr>
 | 
			
		||||
</table>
 | 
			
		||||
"""  # variables: (test_list, count, Pass, fail, error)
 | 
			
		||||
 | 
			
		||||
    REPORT_CLASS_TMPL = r"""
 | 
			
		||||
<tr class='%(style)s'>
 | 
			
		||||
    <td class="testname">%(desc)s</td>
 | 
			
		||||
    <td class="small">%(count)s</td>
 | 
			
		||||
    <td class="small">%(Pass)s</td>
 | 
			
		||||
    <td class="small">%(fail)s</td>
 | 
			
		||||
    <td class="small">%(error)s</td>
 | 
			
		||||
    <td class="small">%(skip)s</td>
 | 
			
		||||
    <td class="small"><a href="javascript:showClassDetail('%(cid)s',%(count)s)"
 | 
			
		||||
>Detail</a></td>
 | 
			
		||||
    <td> </td>
 | 
			
		||||
</tr>
 | 
			
		||||
"""  # variables: (style, desc, count, Pass, fail, error, cid)
 | 
			
		||||
 | 
			
		||||
    REPORT_TEST_WITH_OUTPUT_TMPL = r"""
 | 
			
		||||
<tr id='%(tid)s' class='%(Class)s'>
 | 
			
		||||
    <td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
 | 
			
		||||
    <td colspan='7' align='left'>
 | 
			
		||||
 | 
			
		||||
    <!--css div popup start-->
 | 
			
		||||
    <a class="popup_link" onfocus='this.blur();'
 | 
			
		||||
    href="javascript:showTestDetail('div_%(tid)s')" >
 | 
			
		||||
        %(status)s</a>
 | 
			
		||||
 | 
			
		||||
    <div id='div_%(tid)s' class="popup_window">
 | 
			
		||||
        <div style='text-align: right; color:red;cursor:pointer'>
 | 
			
		||||
        <a onfocus='this.blur();'
 | 
			
		||||
onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
 | 
			
		||||
           [x]</a>
 | 
			
		||||
        </div>
 | 
			
		||||
        <pre>
 | 
			
		||||
        %(script)s
 | 
			
		||||
        </pre>
 | 
			
		||||
    </div>
 | 
			
		||||
    <!--css div popup end-->
 | 
			
		||||
 | 
			
		||||
    </td>
 | 
			
		||||
</tr>
 | 
			
		||||
"""  # variables: (tid, Class, style, desc, status)
 | 
			
		||||
 | 
			
		||||
    REPORT_TEST_NO_OUTPUT_TMPL = r"""
 | 
			
		||||
<tr id='%(tid)s' class='%(Class)s'>
 | 
			
		||||
    <td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
 | 
			
		||||
    <td colspan='6' align='center'>%(status)s</td>
 | 
			
		||||
</tr>
 | 
			
		||||
"""  # variables: (tid, Class, style, desc, status)
 | 
			
		||||
 | 
			
		||||
    REPORT_TEST_OUTPUT_TMPL = r"""
 | 
			
		||||
%(id)s: %(output)s
 | 
			
		||||
"""  # variables: (id, output)
 | 
			
		||||
 | 
			
		||||
    # ------------------------------------------------------------------------
 | 
			
		||||
    # ENDING
 | 
			
		||||
    #
 | 
			
		||||
 | 
			
		||||
    ENDING_TMPL = """<div id='ending'> </div>"""
 | 
			
		||||
 | 
			
		||||
# -------------------- The end of the Template class -------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ClassInfoWrapper(object):
 | 
			
		||||
    def __init__(self, name, mod):
 | 
			
		||||
        self.name = name
 | 
			
		||||
        self.mod = mod
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return "%s" % (self.name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HtmlOutput(testtools.TestResult):
 | 
			
		||||
    """Output test results in html."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, html_file='result.html'):
 | 
			
		||||
        super(HtmlOutput, self).__init__()
 | 
			
		||||
        self.success_count = 0
 | 
			
		||||
        self.failure_count = 0
 | 
			
		||||
        self.error_count = 0
 | 
			
		||||
        self.skip_count = 0
 | 
			
		||||
        self.result = []
 | 
			
		||||
        self.html_file = html_file
 | 
			
		||||
 | 
			
		||||
    def addSuccess(self, test):
 | 
			
		||||
        self.success_count += 1
 | 
			
		||||
        output = test.shortDescription()
 | 
			
		||||
        if output is None:
 | 
			
		||||
            output = test.id()
 | 
			
		||||
        self.result.append((0, test, output, ''))
 | 
			
		||||
 | 
			
		||||
    def addSkip(self, test, err):
 | 
			
		||||
        output = test.shortDescription()
 | 
			
		||||
        if output is None:
 | 
			
		||||
            output = test.id()
 | 
			
		||||
        self.skip_count += 1
 | 
			
		||||
        self.result.append((3, test, output, ''))
 | 
			
		||||
 | 
			
		||||
    def addError(self, test, err):
 | 
			
		||||
        output = test.shortDescription()
 | 
			
		||||
        if output is None:
 | 
			
		||||
            output = test.id()
 | 
			
		||||
        # Skipped tests are handled by SkipTest Exceptions.
 | 
			
		||||
        # if err[0] == SkipTest:
 | 
			
		||||
        #     self.skip_count += 1
 | 
			
		||||
        #     self.result.append((3, test, output, ''))
 | 
			
		||||
        else:
 | 
			
		||||
            self.error_count += 1
 | 
			
		||||
            _exc_str = self.formatErr(err)
 | 
			
		||||
            self.result.append((2, test, output, _exc_str))
 | 
			
		||||
 | 
			
		||||
    def addFailure(self, test, err):
 | 
			
		||||
        print(test)
 | 
			
		||||
        self.failure_count += 1
 | 
			
		||||
        _exc_str = self.formatErr(err)
 | 
			
		||||
        output = test.shortDescription()
 | 
			
		||||
        if output is None:
 | 
			
		||||
            output = test.id()
 | 
			
		||||
        self.result.append((1, test, output, _exc_str))
 | 
			
		||||
 | 
			
		||||
    def formatErr(self, err):
 | 
			
		||||
        exctype, value, tb = err
 | 
			
		||||
        return ''.join(traceback.format_exception(exctype, value, tb))
 | 
			
		||||
 | 
			
		||||
    def stopTestRun(self):
 | 
			
		||||
        super(HtmlOutput, self).stopTestRun()
 | 
			
		||||
        self.stopTime = datetime.datetime.now()
 | 
			
		||||
        report_attrs = self._getReportAttributes()
 | 
			
		||||
        generator = 'subunit2html %s' % __version__
 | 
			
		||||
        heading = self._generate_heading(report_attrs)
 | 
			
		||||
        report = self._generate_report()
 | 
			
		||||
        ending = self._generate_ending()
 | 
			
		||||
        output = TemplateData.HTML_TMPL % dict(
 | 
			
		||||
            title=saxutils.escape(TemplateData.DEFAULT_TITLE),
 | 
			
		||||
            generator=generator,
 | 
			
		||||
            stylesheet=TemplateData.STYLESHEET_TMPL,
 | 
			
		||||
            heading=heading,
 | 
			
		||||
            report=report,
 | 
			
		||||
            ending=ending,
 | 
			
		||||
        )
 | 
			
		||||
        if self.html_file:
 | 
			
		||||
            with open(self.html_file, 'wb') as html_file:
 | 
			
		||||
                html_file.write(output.encode('utf8'))
 | 
			
		||||
 | 
			
		||||
    def _getReportAttributes(self):
 | 
			
		||||
        """Return report attributes as a list of (name, value)."""
 | 
			
		||||
        status = []
 | 
			
		||||
        if self.success_count:
 | 
			
		||||
            status.append('Pass %s' % self.success_count)
 | 
			
		||||
        if self.failure_count:
 | 
			
		||||
            status.append('Failure %s' % self.failure_count)
 | 
			
		||||
        if self.error_count:
 | 
			
		||||
            status.append('Error %s' % self.error_count)
 | 
			
		||||
        if self.skip_count:
 | 
			
		||||
            status.append('Skip %s' % self.skip_count)
 | 
			
		||||
        if status:
 | 
			
		||||
            status = ' '.join(status)
 | 
			
		||||
        else:
 | 
			
		||||
            status = 'none'
 | 
			
		||||
        return [
 | 
			
		||||
            ('Status', status),
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    def _generate_heading(self, report_attrs):
 | 
			
		||||
        a_lines = []
 | 
			
		||||
        for name, value in report_attrs:
 | 
			
		||||
            line = TemplateData.HEADING_ATTRIBUTE_TMPL % dict(
 | 
			
		||||
                name=saxutils.escape(name),
 | 
			
		||||
                value=saxutils.escape(value),
 | 
			
		||||
            )
 | 
			
		||||
            a_lines.append(line)
 | 
			
		||||
        heading = TemplateData.HEADING_TMPL % dict(
 | 
			
		||||
            title=saxutils.escape(TemplateData.DEFAULT_TITLE),
 | 
			
		||||
            parameters=''.join(a_lines),
 | 
			
		||||
            description=saxutils.escape(TemplateData.DEFAULT_DESCRIPTION),
 | 
			
		||||
        )
 | 
			
		||||
        return heading
 | 
			
		||||
 | 
			
		||||
    def _generate_report(self):
 | 
			
		||||
        rows = []
 | 
			
		||||
        sortedResult = self._sortResult(self.result)
 | 
			
		||||
        for cid, (cls, cls_results) in enumerate(sortedResult):
 | 
			
		||||
            # subtotal for a class
 | 
			
		||||
            np = nf = ne = ns = 0
 | 
			
		||||
            for n, t, o, e in cls_results:
 | 
			
		||||
                if n == 0:
 | 
			
		||||
                    np += 1
 | 
			
		||||
                elif n == 1:
 | 
			
		||||
                    nf += 1
 | 
			
		||||
                elif n == 2:
 | 
			
		||||
                    ne += 1
 | 
			
		||||
                else:
 | 
			
		||||
                    ns += 1
 | 
			
		||||
 | 
			
		||||
            # format class description
 | 
			
		||||
            if cls.mod == "__main__":
 | 
			
		||||
                name = cls.name
 | 
			
		||||
            else:
 | 
			
		||||
                name = "%s" % (cls.name)
 | 
			
		||||
            doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
 | 
			
		||||
            desc = doc and '%s: %s' % (name, doc) or name
 | 
			
		||||
 | 
			
		||||
            row = TemplateData.REPORT_CLASS_TMPL % dict(
 | 
			
		||||
                style=(ne > 0 and 'errorClass' or nf > 0
 | 
			
		||||
                       and 'failClass' or 'passClass'),
 | 
			
		||||
                desc = desc,
 | 
			
		||||
                count = np + nf + ne + ns,
 | 
			
		||||
                Pass = np,
 | 
			
		||||
                fail = nf,
 | 
			
		||||
                error = ne,
 | 
			
		||||
                skip = ns,
 | 
			
		||||
                cid = 'c%s' % (cid + 1),
 | 
			
		||||
            )
 | 
			
		||||
            rows.append(row)
 | 
			
		||||
 | 
			
		||||
            for tid, (n, t, o, e) in enumerate(cls_results):
 | 
			
		||||
                self._generate_report_test(rows, cid, tid, n, t, o, e)
 | 
			
		||||
 | 
			
		||||
        report = TemplateData.REPORT_TMPL % dict(
 | 
			
		||||
            test_list=''.join(rows),
 | 
			
		||||
            count=str(self.success_count + self.failure_count +
 | 
			
		||||
                      self.error_count + self.skip_count),
 | 
			
		||||
            Pass=str(self.success_count),
 | 
			
		||||
            fail=str(self.failure_count),
 | 
			
		||||
            error=str(self.error_count),
 | 
			
		||||
            skip=str(self.skip_count),
 | 
			
		||||
        )
 | 
			
		||||
        return report
 | 
			
		||||
 | 
			
		||||
    def _sortResult(self, result_list):
 | 
			
		||||
        # unittest does not seems to run in any particular order.
 | 
			
		||||
        # Here at least we want to group them together by class.
 | 
			
		||||
        rmap = {}
 | 
			
		||||
        classes = []
 | 
			
		||||
        # Differentiate between classes that have test failures so we can sort
 | 
			
		||||
        # them at the top of the html page for easier troubleshooting
 | 
			
		||||
        clsmap_has_failure = collections.defaultdict(bool)
 | 
			
		||||
 | 
			
		||||
        def track_has_failure(name, n):
 | 
			
		||||
            if n == 1 or n == 2:
 | 
			
		||||
                clsmap_has_failure[name] = True
 | 
			
		||||
 | 
			
		||||
        for n, t, o, e in result_list:
 | 
			
		||||
            if hasattr(t, '_tests'):
 | 
			
		||||
                for inner_test in t._tests:
 | 
			
		||||
                    name = self._add_cls(rmap, classes, inner_test,
 | 
			
		||||
                                         (n, inner_test, o, e))
 | 
			
		||||
                    track_has_failure(name, n)
 | 
			
		||||
            else:
 | 
			
		||||
                name = self._add_cls(rmap, classes, t, (n, t, o, e))
 | 
			
		||||
                track_has_failure(name, n)
 | 
			
		||||
 | 
			
		||||
        failclasses = []
 | 
			
		||||
        passclasses = []
 | 
			
		||||
        for cls in classes:
 | 
			
		||||
            append_to = (failclasses if clsmap_has_failure[str(cls)]
 | 
			
		||||
                         else passclasses)
 | 
			
		||||
            append_to.append(cls)
 | 
			
		||||
        classort = lambda s: str(s)
 | 
			
		||||
        sortedfailclasses = sorted(failclasses, key=classort)
 | 
			
		||||
        sortedpassclasses = sorted(passclasses, key=classort)
 | 
			
		||||
        sortedclasses = sortedfailclasses + sortedpassclasses
 | 
			
		||||
        r = [(cls, rmap[str(cls)]) for cls in sortedclasses]
 | 
			
		||||
        return r
 | 
			
		||||
 | 
			
		||||
    def _add_cls(self, rmap, classes, test, data_tuple):
 | 
			
		||||
        if hasattr(test, 'test'):
 | 
			
		||||
            test = test.test
 | 
			
		||||
        if test.__class__ == subunit.RemotedTestCase:
 | 
			
		||||
            cl = test._RemotedTestCase__description.rsplit('.', 1)[0]
 | 
			
		||||
        else:
 | 
			
		||||
            cl = test.id().rsplit('.', 1)[0]
 | 
			
		||||
        mod = cl.rsplit('.', 1)[0]
 | 
			
		||||
        cls = ClassInfoWrapper(cl, mod)
 | 
			
		||||
        if not str(cls) in rmap:
 | 
			
		||||
            rmap[str(cls)] = []
 | 
			
		||||
            classes.append(cls)
 | 
			
		||||
        rmap[str(cls)].append(data_tuple)
 | 
			
		||||
        return str(cls)
 | 
			
		||||
 | 
			
		||||
    def _generate_report_test(self, rows, cid, tid, n, t, o, e):
 | 
			
		||||
        # e.g. 'pt1.1', 'ft1.1', etc
 | 
			
		||||
        # ptx.x for passed/skipped tests and ftx.x for failed/errored tests.
 | 
			
		||||
        has_output = bool(o or e)
 | 
			
		||||
        tid = ((n == 0 or n == 3) and
 | 
			
		||||
               'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1)
 | 
			
		||||
        name = t.id().split('.')[-1]
 | 
			
		||||
        # if shortDescription is not the function name, use it
 | 
			
		||||
        if t.shortDescription().find(name) == -1:
 | 
			
		||||
            doc = t.shortDescription()
 | 
			
		||||
        else:
 | 
			
		||||
            doc = None
 | 
			
		||||
        desc = doc and ('%s: %s' % (name, doc)) or name
 | 
			
		||||
        tmpl = (has_output and TemplateData.REPORT_TEST_WITH_OUTPUT_TMPL
 | 
			
		||||
                or TemplateData.REPORT_TEST_NO_OUTPUT_TMPL)
 | 
			
		||||
 | 
			
		||||
        script = TemplateData.REPORT_TEST_OUTPUT_TMPL % dict(
 | 
			
		||||
            id=tid,
 | 
			
		||||
            output=saxutils.escape(o + e),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        row = tmpl % dict(
 | 
			
		||||
            tid=tid,
 | 
			
		||||
            Class=((n == 0 or n == 3) and 'hiddenRow' or 'none'),
 | 
			
		||||
            style=(n == 2 and 'errorCase' or
 | 
			
		||||
                   (n == 1 and 'failCase' or 'none')),
 | 
			
		||||
            desc=desc,
 | 
			
		||||
            script=script,
 | 
			
		||||
            status=TemplateData.STATUS[n],
 | 
			
		||||
        )
 | 
			
		||||
        rows.append(row)
 | 
			
		||||
        if not has_output:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
    def _generate_ending(self):
 | 
			
		||||
        return TemplateData.ENDING_TMPL
 | 
			
		||||
 | 
			
		||||
    def startTestRun(self):
 | 
			
		||||
        super(HtmlOutput, self).startTestRun()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FileAccumulator(testtools.StreamResult):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        super(FileAccumulator, self).__init__()
 | 
			
		||||
        self.route_codes = collections.defaultdict(io.BytesIO)
 | 
			
		||||
 | 
			
		||||
    def status(self, **kwargs):
 | 
			
		||||
        if kwargs.get('file_name') != 'stdout':
 | 
			
		||||
            return
 | 
			
		||||
        file_bytes = kwargs.get('file_bytes')
 | 
			
		||||
        if not file_bytes:
 | 
			
		||||
            return
 | 
			
		||||
        route_code = kwargs.get('route_code')
 | 
			
		||||
        stream = self.route_codes[route_code]
 | 
			
		||||
        stream.write(file_bytes)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    if '--version' in sys.argv:
 | 
			
		||||
        print(__version__)
 | 
			
		||||
        exit(0)
 | 
			
		||||
 | 
			
		||||
    if len(sys.argv) < 2:
 | 
			
		||||
        print("Need at least one argument: path to subunit log.")
 | 
			
		||||
        exit(1)
 | 
			
		||||
    subunit_file = sys.argv[1]
 | 
			
		||||
    if len(sys.argv) > 2:
 | 
			
		||||
        html_file = sys.argv[2]
 | 
			
		||||
    else:
 | 
			
		||||
        html_file = 'results.html'
 | 
			
		||||
 | 
			
		||||
    html_result = HtmlOutput(html_file)
 | 
			
		||||
    stream = open(subunit_file, 'rb')
 | 
			
		||||
 | 
			
		||||
    # Feed the subunit stream through both a V1 and V2 parser.
 | 
			
		||||
    # Depends on having the v2 capable libraries installed.
 | 
			
		||||
    # First V2.
 | 
			
		||||
    # Non-v2 content and captured non-test output will be presented as file
 | 
			
		||||
    # segments called stdout.
 | 
			
		||||
    suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout')
 | 
			
		||||
    # The HTML output code is in legacy mode.
 | 
			
		||||
    result = testtools.StreamToExtendedDecorator(html_result)
 | 
			
		||||
    # Divert non-test output
 | 
			
		||||
    accumulator = FileAccumulator()
 | 
			
		||||
    result = testtools.StreamResultRouter(result)
 | 
			
		||||
    result.add_rule(accumulator, 'test_id', test_id=None)
 | 
			
		||||
    result.startTestRun()
 | 
			
		||||
    suite.run(result)
 | 
			
		||||
    # Now reprocess any found stdout content as V1 subunit
 | 
			
		||||
    for bytes_io in accumulator.route_codes.values():
 | 
			
		||||
        bytes_io.seek(0)
 | 
			
		||||
        suite = subunit.ProtocolTestCase(bytes_io)
 | 
			
		||||
        suite.run(html_result)
 | 
			
		||||
    result.stopTestRun()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,403 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
# Copyright 2014 Samsung Electronics
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
"""Trace a subunit stream in reasonable detail and high accuracy."""
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import datetime
 | 
			
		||||
import functools
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
import pbr.version
 | 
			
		||||
import subunit
 | 
			
		||||
import testtools
 | 
			
		||||
 | 
			
		||||
from os_testr.utils import colorizer
 | 
			
		||||
 | 
			
		||||
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
 | 
			
		||||
# was renamed to dbm.ndbm, this block takes that into account
 | 
			
		||||
try:
 | 
			
		||||
    import anydbm as dbm
 | 
			
		||||
except ImportError:
 | 
			
		||||
    import dbm
 | 
			
		||||
 | 
			
		||||
DAY_SECONDS = 60 * 60 * 24
 | 
			
		||||
FAILS = []
 | 
			
		||||
RESULTS = {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def total_seconds(timedelta):
 | 
			
		||||
    # NOTE(mtreinish): This method is built-in to the timedelta class in
 | 
			
		||||
    # python >= 2.7 it is here to enable it's use on older versions
 | 
			
		||||
    return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 +
 | 
			
		||||
            timedelta.microseconds) / 10 ** 6
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
 | 
			
		||||
    """Clean up the test name for display.
 | 
			
		||||
 | 
			
		||||
    By default we strip out the tags in the test because they don't help us
 | 
			
		||||
    in identifying the test that is run to it's result.
 | 
			
		||||
 | 
			
		||||
    Make it possible to strip out the testscenarios information (not to
 | 
			
		||||
    be confused with tempest scenarios) however that's often needed to
 | 
			
		||||
    indentify generated negative tests.
 | 
			
		||||
    """
 | 
			
		||||
    if strip_tags:
 | 
			
		||||
        tags_start = name.find('[')
 | 
			
		||||
        tags_end = name.find(']')
 | 
			
		||||
        if tags_start > 0 and tags_end > tags_start:
 | 
			
		||||
            newname = name[:tags_start]
 | 
			
		||||
            newname += name[tags_end + 1:]
 | 
			
		||||
            name = newname
 | 
			
		||||
 | 
			
		||||
    if strip_scenarios:
 | 
			
		||||
        tags_start = name.find('(')
 | 
			
		||||
        tags_end = name.find(')')
 | 
			
		||||
        if tags_start > 0 and tags_end > tags_start:
 | 
			
		||||
            newname = name[:tags_start]
 | 
			
		||||
            newname += name[tags_end + 1:]
 | 
			
		||||
            name = newname
 | 
			
		||||
 | 
			
		||||
    return name
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_duration(timestamps):
 | 
			
		||||
    start, end = timestamps
 | 
			
		||||
    if not start or not end:
 | 
			
		||||
        duration = ''
 | 
			
		||||
    else:
 | 
			
		||||
        delta = end - start
 | 
			
		||||
        duration = '%d.%06ds' % (
 | 
			
		||||
            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
 | 
			
		||||
    return duration
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def find_worker(test):
 | 
			
		||||
    """Get the worker number.
 | 
			
		||||
 | 
			
		||||
    If there are no workers because we aren't in a concurrent environment,
 | 
			
		||||
    assume the worker number is 0.
 | 
			
		||||
    """
 | 
			
		||||
    for tag in test['tags']:
 | 
			
		||||
        if tag.startswith('worker-'):
 | 
			
		||||
            return int(tag[7:])
 | 
			
		||||
    return 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Print out stdout/stderr if it exists, always
 | 
			
		||||
def print_attachments(stream, test, all_channels=False):
 | 
			
		||||
    """Print out subunit attachments.
 | 
			
		||||
 | 
			
		||||
    Print out subunit attachments that contain content. This
 | 
			
		||||
    runs in 2 modes, one for successes where we print out just stdout
 | 
			
		||||
    and stderr, and an override that dumps all the attachments.
 | 
			
		||||
    """
 | 
			
		||||
    channels = ('stdout', 'stderr')
 | 
			
		||||
    for name, detail in test['details'].items():
 | 
			
		||||
        # NOTE(sdague): the subunit names are a little crazy, and actually
 | 
			
		||||
        # are in the form pythonlogging:'' (with the colon and quotes)
 | 
			
		||||
        name = name.split(':')[0]
 | 
			
		||||
        if detail.content_type.type == 'test':
 | 
			
		||||
            detail.content_type.type = 'text'
 | 
			
		||||
        if (all_channels or name in channels) and detail.as_text():
 | 
			
		||||
            title = "Captured %s:" % name
 | 
			
		||||
            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
 | 
			
		||||
            # indent attachment lines 4 spaces to make them visually
 | 
			
		||||
            # offset
 | 
			
		||||
            for line in detail.as_text().split('\n'):
 | 
			
		||||
                line = line.encode('utf8')
 | 
			
		||||
                stream.write("    %s\n" % line)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def find_test_run_time_diff(test_id, run_time):
 | 
			
		||||
    times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
 | 
			
		||||
                                 'times.dbm')
 | 
			
		||||
    if os.path.isfile(times_db_path):
 | 
			
		||||
        try:
 | 
			
		||||
            test_times = dbm.open(times_db_path)
 | 
			
		||||
        except Exception:
 | 
			
		||||
            return False
 | 
			
		||||
        try:
 | 
			
		||||
            avg_runtime = float(test_times.get(str(test_id), False))
 | 
			
		||||
        except Exception:
 | 
			
		||||
            try:
 | 
			
		||||
                avg_runtime = float(test_times[str(test_id)])
 | 
			
		||||
            except Exception:
 | 
			
		||||
                avg_runtime = False
 | 
			
		||||
 | 
			
		||||
        if avg_runtime and avg_runtime > 0:
 | 
			
		||||
            run_time = float(run_time.rstrip('s'))
 | 
			
		||||
            perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
 | 
			
		||||
            return perc_diff
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def show_outcome(stream, test, print_failures=False, failonly=False,
 | 
			
		||||
                 enable_diff=False, threshold='0', abbreviate=False,
 | 
			
		||||
                 enable_color=False):
 | 
			
		||||
    global RESULTS
 | 
			
		||||
    status = test['status']
 | 
			
		||||
    # TODO(sdague): ask lifeless why on this?
 | 
			
		||||
    if status == 'exists':
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    worker = find_worker(test)
 | 
			
		||||
    name = cleanup_test_name(test['id'])
 | 
			
		||||
    duration = get_duration(test['timestamps'])
 | 
			
		||||
 | 
			
		||||
    if worker not in RESULTS:
 | 
			
		||||
        RESULTS[worker] = []
 | 
			
		||||
    RESULTS[worker].append(test)
 | 
			
		||||
 | 
			
		||||
    # don't count the end of the return code as a fail
 | 
			
		||||
    if name == 'process-returncode':
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    for color in [colorizer.AnsiColorizer, colorizer.NullColorizer]:
 | 
			
		||||
        if not enable_color:
 | 
			
		||||
            color = colorizer.NullColorizer(stream)
 | 
			
		||||
            break
 | 
			
		||||
        if color.supported():
 | 
			
		||||
            color = color(stream)
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    if status == 'fail' or status == 'uxsuccess':
 | 
			
		||||
        FAILS.append(test)
 | 
			
		||||
        if abbreviate:
 | 
			
		||||
            color.write('F', 'red')
 | 
			
		||||
        else:
 | 
			
		||||
            stream.write('{%s} %s [%s] ... ' % (
 | 
			
		||||
                worker, name, duration))
 | 
			
		||||
            color.write('FAILED', 'red')
 | 
			
		||||
            stream.write('\n')
 | 
			
		||||
            if not print_failures:
 | 
			
		||||
                print_attachments(stream, test, all_channels=True)
 | 
			
		||||
    elif not failonly:
 | 
			
		||||
        if status == 'success' or status == 'xfail':
 | 
			
		||||
            if abbreviate:
 | 
			
		||||
                color.write('.', 'green')
 | 
			
		||||
            else:
 | 
			
		||||
                out_string = '{%s} %s [%s' % (worker, name, duration)
 | 
			
		||||
                perc_diff = find_test_run_time_diff(test['id'], duration)
 | 
			
		||||
                if enable_diff:
 | 
			
		||||
                    if perc_diff and abs(perc_diff) >= abs(float(threshold)):
 | 
			
		||||
                        if perc_diff > 0:
 | 
			
		||||
                            out_string = out_string + ' +%.2f%%' % perc_diff
 | 
			
		||||
                        else:
 | 
			
		||||
                            out_string = out_string + ' %.2f%%' % perc_diff
 | 
			
		||||
                stream.write(out_string + '] ... ')
 | 
			
		||||
                color.write('ok', 'green')
 | 
			
		||||
                stream.write('\n')
 | 
			
		||||
                print_attachments(stream, test)
 | 
			
		||||
        elif status == 'skip':
 | 
			
		||||
            if abbreviate:
 | 
			
		||||
                color.write('S', 'blue')
 | 
			
		||||
            else:
 | 
			
		||||
                reason = test['details'].get('reason', '')
 | 
			
		||||
                if reason:
 | 
			
		||||
                    reason = ': ' + reason.as_text()
 | 
			
		||||
                stream.write('{%s} %s ... ' % (
 | 
			
		||||
                    worker, name))
 | 
			
		||||
                color.write('SKIPPED', 'blue')
 | 
			
		||||
                stream.write('%s' % (reason))
 | 
			
		||||
                stream.write('\n')
 | 
			
		||||
        else:
 | 
			
		||||
            if abbreviate:
 | 
			
		||||
                stream.write('%s' % test['status'][0])
 | 
			
		||||
            else:
 | 
			
		||||
                stream.write('{%s} %s [%s] ... %s\n' % (
 | 
			
		||||
                    worker, name, duration, test['status']))
 | 
			
		||||
                if not print_failures:
 | 
			
		||||
                    print_attachments(stream, test, all_channels=True)
 | 
			
		||||
 | 
			
		||||
    stream.flush()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_fails(stream):
 | 
			
		||||
    """Print summary failure report.
 | 
			
		||||
 | 
			
		||||
    Currently unused, however there remains debate on inline vs. at end
 | 
			
		||||
    reporting, so leave the utility function for later use.
 | 
			
		||||
    """
 | 
			
		||||
    if not FAILS:
 | 
			
		||||
        return
 | 
			
		||||
    stream.write("\n==============================\n")
 | 
			
		||||
    stream.write("Failed %s tests - output below:" % len(FAILS))
 | 
			
		||||
    stream.write("\n==============================\n")
 | 
			
		||||
    for f in FAILS:
 | 
			
		||||
        stream.write("\n%s\n" % f['id'])
 | 
			
		||||
        stream.write("%s\n" % ('-' * len(f['id'])))
 | 
			
		||||
        print_attachments(stream, f, all_channels=True)
 | 
			
		||||
    stream.write('\n')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def count_tests(key, value):
 | 
			
		||||
    count = 0
 | 
			
		||||
    for k, v in RESULTS.items():
 | 
			
		||||
        for item in v:
 | 
			
		||||
            if key in item:
 | 
			
		||||
                if re.search(value, item[key]):
 | 
			
		||||
                    count += 1
 | 
			
		||||
    return count
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def run_time():
 | 
			
		||||
    runtime = 0.0
 | 
			
		||||
    for k, v in RESULTS.items():
 | 
			
		||||
        for test in v:
 | 
			
		||||
            test_dur = get_duration(test['timestamps']).strip('s')
 | 
			
		||||
            # NOTE(toabctl): get_duration() can return an empty string
 | 
			
		||||
            # which leads to a ValueError when casting to float
 | 
			
		||||
            if test_dur:
 | 
			
		||||
                runtime += float(test_dur)
 | 
			
		||||
    return runtime
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def worker_stats(worker):
 | 
			
		||||
    tests = RESULTS[worker]
 | 
			
		||||
    num_tests = len(tests)
 | 
			
		||||
    stop_time = tests[-1]['timestamps'][1]
 | 
			
		||||
    start_time = tests[0]['timestamps'][0]
 | 
			
		||||
    if not start_time or not stop_time:
 | 
			
		||||
        delta = 'N/A'
 | 
			
		||||
    else:
 | 
			
		||||
        delta = stop_time - start_time
 | 
			
		||||
    return num_tests, str(delta)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_summary(stream, elapsed_time):
 | 
			
		||||
    stream.write("\n======\nTotals\n======\n")
 | 
			
		||||
    stream.write("Ran: %s tests in %.4f sec.\n" % (
 | 
			
		||||
        count_tests('status', '.*'), total_seconds(elapsed_time)))
 | 
			
		||||
    stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))
 | 
			
		||||
    stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))
 | 
			
		||||
    stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))
 | 
			
		||||
    stream.write(" - Unexpected Success: %s\n" % count_tests('status',
 | 
			
		||||
                                                             '^uxsuccess$'))
 | 
			
		||||
    stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))
 | 
			
		||||
    stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())
 | 
			
		||||
 | 
			
		||||
    # we could have no results, especially as we filter out the process-codes
 | 
			
		||||
    if RESULTS:
 | 
			
		||||
        stream.write("\n==============\nWorker Balance\n==============\n")
 | 
			
		||||
 | 
			
		||||
        for w in range(max(RESULTS.keys()) + 1):
 | 
			
		||||
            if w not in RESULTS:
 | 
			
		||||
                stream.write(
 | 
			
		||||
                    " - WARNING: missing Worker %s! "
 | 
			
		||||
                    "Race in testr accounting.\n" % w)
 | 
			
		||||
            else:
 | 
			
		||||
                num, time = worker_stats(w)
 | 
			
		||||
                out_str = " - Worker %s (%s tests) => %s" % (w, num, time)
 | 
			
		||||
                if time.isdigit():
 | 
			
		||||
                    out_str += 's'
 | 
			
		||||
                out_str += '\n'
 | 
			
		||||
                stream.write(out_str)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__version__ = pbr.version.VersionInfo('os_testr').version_string()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_args():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--version', action='version',
 | 
			
		||||
                        version='%s' % __version__)
 | 
			
		||||
    parser.add_argument('--no-failure-debug', '-n', action='store_true',
 | 
			
		||||
                        dest='print_failures', help='Disable printing failure '
 | 
			
		||||
                        'debug information in realtime')
 | 
			
		||||
    parser.add_argument('--fails', '-f', action='store_true',
 | 
			
		||||
                        dest='post_fails', help='Print failure debug '
 | 
			
		||||
                        'information after the stream is proccesed')
 | 
			
		||||
    parser.add_argument('--failonly', action='store_true',
 | 
			
		||||
                        dest='failonly', help="Don't print success items",
 | 
			
		||||
                        default=(
 | 
			
		||||
                            os.environ.get('TRACE_FAILONLY', False)
 | 
			
		||||
                            is not False))
 | 
			
		||||
    parser.add_argument('--abbreviate', '-a', action='store_true',
 | 
			
		||||
                        dest='abbreviate', help='Print one character status'
 | 
			
		||||
                                                'for each test')
 | 
			
		||||
    parser.add_argument('--perc-diff', '-d', action='store_true',
 | 
			
		||||
                        dest='enable_diff',
 | 
			
		||||
                        help="Print percent change in run time on each test ")
 | 
			
		||||
    parser.add_argument('--diff-threshold', '-t', dest='threshold',
 | 
			
		||||
                        help="Threshold to use for displaying percent change "
 | 
			
		||||
                             "from the avg run time. If one is not specified "
 | 
			
		||||
                             "the percent change will always be displayed")
 | 
			
		||||
    parser.add_argument('--no-summary', action='store_true',
 | 
			
		||||
                        help="Don't print the summary of the test run after "
 | 
			
		||||
                             " completes")
 | 
			
		||||
    parser.add_argument('--color', action='store_true',
 | 
			
		||||
                        help="Print results with colors")
 | 
			
		||||
    return parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def trace(stdin, stdout, print_failures=False, failonly=False,
 | 
			
		||||
          enable_diff=False, abbreviate=False, color=False, post_fails=False,
 | 
			
		||||
          no_summary=False):
 | 
			
		||||
    stream = subunit.ByteStreamToStreamResult(
 | 
			
		||||
        stdin, non_subunit_name='stdout')
 | 
			
		||||
    outcomes = testtools.StreamToDict(
 | 
			
		||||
        functools.partial(show_outcome, stdout,
 | 
			
		||||
                          print_failures=print_failures,
 | 
			
		||||
                          failonly=failonly,
 | 
			
		||||
                          enable_diff=enable_diff,
 | 
			
		||||
                          abbreviate=abbreviate,
 | 
			
		||||
                          enable_color=color))
 | 
			
		||||
    summary = testtools.StreamSummary()
 | 
			
		||||
    result = testtools.CopyStreamResult([outcomes, summary])
 | 
			
		||||
    result = testtools.StreamResultRouter(result)
 | 
			
		||||
    cat = subunit.test_results.CatFiles(stdout)
 | 
			
		||||
    result.add_rule(cat, 'test_id', test_id=None)
 | 
			
		||||
    start_time = datetime.datetime.utcnow()
 | 
			
		||||
    result.startTestRun()
 | 
			
		||||
    try:
 | 
			
		||||
        stream.run(result)
 | 
			
		||||
    finally:
 | 
			
		||||
        result.stopTestRun()
 | 
			
		||||
    stop_time = datetime.datetime.utcnow()
 | 
			
		||||
    elapsed_time = stop_time - start_time
 | 
			
		||||
 | 
			
		||||
    if count_tests('status', '.*') == 0:
 | 
			
		||||
        print("The test run didn't actually run any tests")
 | 
			
		||||
        return 1
 | 
			
		||||
    if post_fails:
 | 
			
		||||
        print_fails(stdout)
 | 
			
		||||
    if not no_summary:
 | 
			
		||||
        print_summary(stdout, elapsed_time)
 | 
			
		||||
 | 
			
		||||
    # NOTE(mtreinish): Ideally this should live in testtools streamSummary
 | 
			
		||||
    # this is just in place until the behavior lands there (if it ever does)
 | 
			
		||||
    if count_tests('status', '^success$') == 0:
 | 
			
		||||
        print("\nNo tests were successful during the run")
 | 
			
		||||
        return 1
 | 
			
		||||
    return 0 if summary.wasSuccessful() else 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    args = parse_args()
 | 
			
		||||
    exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly,
 | 
			
		||||
               args.enable_diff, args.abbreviate, args.color, args.post_fails,
 | 
			
		||||
               args.no_summary))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,107 +0,0 @@
 | 
			
		||||
# Copyright 2016 RedHat, Inc.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
from os_testr import regex_builder
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def black_reader(blacklist_file):
 | 
			
		||||
    black_file = open(blacklist_file, 'r')
 | 
			
		||||
    regex_comment_lst = []  # tupple of (regex_compild, msg, skipped_lst)
 | 
			
		||||
    for line in black_file:
 | 
			
		||||
        raw_line = line.strip()
 | 
			
		||||
        split_line = raw_line.split('#')
 | 
			
		||||
        # Before the # is the regex
 | 
			
		||||
        line_regex = split_line[0].strip()
 | 
			
		||||
        if len(split_line) > 1:
 | 
			
		||||
            # After the # is a comment
 | 
			
		||||
            comment = ''.join(split_line[1:]).strip()
 | 
			
		||||
        else:
 | 
			
		||||
            comment = 'Skipped because of regex %s:' % line_regex
 | 
			
		||||
        if not line_regex:
 | 
			
		||||
            continue
 | 
			
		||||
        regex_comment_lst.append((re.compile(line_regex), comment, []))
 | 
			
		||||
    return regex_comment_lst
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_skips(regex, message, test_list):
 | 
			
		||||
    for test in test_list:
 | 
			
		||||
        print(test)
 | 
			
		||||
    # Extra whitespace to separate
 | 
			
		||||
    print('\n')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def construct_list(blacklist_file, whitelist_file, regex, black_regex,
 | 
			
		||||
                   print_exclude):
 | 
			
		||||
    """Filters the discovered test cases
 | 
			
		||||
 | 
			
		||||
    :retrun: iterable of strings. The strings are full
 | 
			
		||||
        test cases names, including tags like.:
 | 
			
		||||
        "project.api.TestClass.test_case[positive]"
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    if not regex:
 | 
			
		||||
        regex = ''  # handle the other false things
 | 
			
		||||
 | 
			
		||||
    if whitelist_file:
 | 
			
		||||
        white_re = regex_builder.get_regex_from_whitelist_file(whitelist_file)
 | 
			
		||||
    else:
 | 
			
		||||
        white_re = ''
 | 
			
		||||
 | 
			
		||||
    if not regex and white_re:
 | 
			
		||||
        regex = white_re
 | 
			
		||||
    elif regex and white_re:
 | 
			
		||||
        regex = '|'.join((regex, white_re))
 | 
			
		||||
 | 
			
		||||
    if blacklist_file:
 | 
			
		||||
        black_data = black_reader(blacklist_file)
 | 
			
		||||
    else:
 | 
			
		||||
        black_data = None
 | 
			
		||||
 | 
			
		||||
    if black_regex:
 | 
			
		||||
        msg = "Skipped because of regex provided as a command line argument:"
 | 
			
		||||
        record = (re.compile(black_regex), msg, [])
 | 
			
		||||
        if black_data:
 | 
			
		||||
            black_data.append(record)
 | 
			
		||||
        else:
 | 
			
		||||
            black_data = [record]
 | 
			
		||||
 | 
			
		||||
    search_filter = re.compile(regex)
 | 
			
		||||
 | 
			
		||||
    # NOTE(afazekas): we do not want to pass a giant re
 | 
			
		||||
    # to an external application due to the arg length limitatios
 | 
			
		||||
    list_of_test_cases = [test_case for test_case in
 | 
			
		||||
                          regex_builder._get_test_list('')
 | 
			
		||||
                          if search_filter.search(test_case)]
 | 
			
		||||
    set_of_test_cases = set(list_of_test_cases)
 | 
			
		||||
 | 
			
		||||
    if not black_data:
 | 
			
		||||
        return set_of_test_cases
 | 
			
		||||
 | 
			
		||||
    # NOTE(afazekas): We might use a faster logic when the
 | 
			
		||||
    # print option is not requested
 | 
			
		||||
    for (rex, msg, s_list) in black_data:
 | 
			
		||||
        for test_case in list_of_test_cases:
 | 
			
		||||
            if rex.search(test_case):
 | 
			
		||||
                # NOTE(mtreinish): In the case of overlapping regex the test
 | 
			
		||||
                # case might have already been removed from the set of tests
 | 
			
		||||
                if test_case in set_of_test_cases:
 | 
			
		||||
                    set_of_test_cases.remove(test_case)
 | 
			
		||||
                    s_list.append(test_case)
 | 
			
		||||
 | 
			
		||||
    if print_exclude:
 | 
			
		||||
        for (rex, msg, s_list) in black_data:
 | 
			
		||||
            if s_list:
 | 
			
		||||
                print_skips(rex, msg, s_list)
 | 
			
		||||
    return set_of_test_cases
 | 
			
		||||
@@ -1,23 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2010-2011 OpenStack Foundation
 | 
			
		||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
from oslotest import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestCase(base.BaseTestCase):
 | 
			
		||||
 | 
			
		||||
    """Test case base class for all unit tests."""
 | 
			
		||||
@@ -1,23 +0,0 @@
 | 
			
		||||
# Copyright 2013 IBM Corp.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import testtools
 | 
			
		||||
 | 
			
		||||
class FakeTestClass(testtools.TestCase):
 | 
			
		||||
    def test_pass(self):
 | 
			
		||||
        self.assertTrue(False)
 | 
			
		||||
 | 
			
		||||
    def test_pass_list(self):
 | 
			
		||||
        test_list = ['test', 'a', 'b']
 | 
			
		||||
        self.assertIn('fail', test_list)
 | 
			
		||||
@@ -1,23 +0,0 @@
 | 
			
		||||
# Copyright 2013 IBM Corp.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import testtools
 | 
			
		||||
 | 
			
		||||
class FakeTestClass(testtools.TestCase):
 | 
			
		||||
    def test_pass(self):
 | 
			
		||||
        self.assertTrue(True)
 | 
			
		||||
 | 
			
		||||
    def test_pass_list(self):
 | 
			
		||||
        test_list = ['test', 'a', 'b']
 | 
			
		||||
        self.assertIn('test', test_list)
 | 
			
		||||
@@ -1,20 +0,0 @@
 | 
			
		||||
[metadata]
 | 
			
		||||
name = tempest_unit_tests
 | 
			
		||||
version = 1
 | 
			
		||||
summary = Fake Project for testing wrapper scripts
 | 
			
		||||
author = OpenStack
 | 
			
		||||
author-email = openstack-dev@lists.openstack.org
 | 
			
		||||
home-page = http://www.openstack.org/
 | 
			
		||||
classifier =
 | 
			
		||||
    Intended Audience :: Information Technology
 | 
			
		||||
    Intended Audience :: System Administrators
 | 
			
		||||
    Intended Audience :: Developers
 | 
			
		||||
    License :: OSI Approved :: Apache Software License
 | 
			
		||||
    Operating System :: POSIX :: Linux
 | 
			
		||||
    Programming Language :: Python
 | 
			
		||||
    Programming Language :: Python :: 2
 | 
			
		||||
    Programming Language :: Python :: 2.7
 | 
			
		||||
 | 
			
		||||
[global]
 | 
			
		||||
setup-hooks =
 | 
			
		||||
    pbr.hooks.setup_hook
 | 
			
		||||
@@ -1,5 +0,0 @@
 | 
			
		||||
[DEFAULT]
 | 
			
		||||
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
 | 
			
		||||
test_id_option=--load-list $IDFILE
 | 
			
		||||
test_list_option=--list
 | 
			
		||||
group_regex=([^\.]*\.)*
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							@@ -1,187 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
test_os_testr
 | 
			
		||||
----------------------------------
 | 
			
		||||
 | 
			
		||||
Tests for `os_testr` module.
 | 
			
		||||
"""
 | 
			
		||||
import io
 | 
			
		||||
import mock
 | 
			
		||||
 | 
			
		||||
from os_testr import ostestr as os_testr
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestGetParser(base.TestCase):
 | 
			
		||||
    def test_pretty(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['--pretty'])
 | 
			
		||||
        self.assertEqual(True, namespace[0].pretty)
 | 
			
		||||
        namespace = os_testr.get_parser(['--no-pretty'])
 | 
			
		||||
        self.assertEqual(False, namespace[0].pretty)
 | 
			
		||||
        self.assertRaises(SystemExit, os_testr.get_parser,
 | 
			
		||||
                          ['--no-pretty', '--pretty'])
 | 
			
		||||
 | 
			
		||||
    def test_slowest(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['--slowest'])
 | 
			
		||||
        self.assertEqual(True, namespace[0].slowest)
 | 
			
		||||
        namespace = os_testr.get_parser(['--no-slowest'])
 | 
			
		||||
        self.assertEqual(False, namespace[0].slowest)
 | 
			
		||||
        self.assertRaises(SystemExit, os_testr.get_parser,
 | 
			
		||||
                          ['--no-slowest', '--slowest'])
 | 
			
		||||
 | 
			
		||||
    def test_parallel(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['--parallel'])
 | 
			
		||||
        self.assertEqual(True, namespace[0].parallel)
 | 
			
		||||
        namespace = os_testr.get_parser(['--serial'])
 | 
			
		||||
        self.assertEqual(False, namespace[0].parallel)
 | 
			
		||||
        self.assertRaises(SystemExit, os_testr.get_parser,
 | 
			
		||||
                          ['--parallel', '--serial'])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestCallers(base.TestCase):
 | 
			
		||||
    def test_no_discover(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['-n', 'project.tests.foo'])
 | 
			
		||||
 | 
			
		||||
        def _fake_exit(arg):
 | 
			
		||||
            self.assertTrue(arg)
 | 
			
		||||
 | 
			
		||||
        def _fake_run(*args, **kwargs):
 | 
			
		||||
            return 'project.tests.foo' in args
 | 
			
		||||
 | 
			
		||||
        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
 | 
			
		||||
                mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
 | 
			
		||||
                mock.patch.object(os_testr,
 | 
			
		||||
                                  'call_subunit_run',
 | 
			
		||||
                                  side_effect=_fake_run):
 | 
			
		||||
            os_testr.main()
 | 
			
		||||
 | 
			
		||||
    def test_no_discover_path(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['-n', 'project/tests/foo'])
 | 
			
		||||
 | 
			
		||||
        def _fake_exit(arg):
 | 
			
		||||
            self.assertTrue(arg)
 | 
			
		||||
 | 
			
		||||
        def _fake_run(*args, **kwargs):
 | 
			
		||||
            return 'project.tests.foo' in args
 | 
			
		||||
 | 
			
		||||
        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
 | 
			
		||||
                mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
 | 
			
		||||
                mock.patch.object(os_testr,
 | 
			
		||||
                                  'call_subunit_run',
 | 
			
		||||
                                  side_effect=_fake_run):
 | 
			
		||||
            os_testr.main()
 | 
			
		||||
 | 
			
		||||
    def test_pdb(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['--pdb', 'project.tests.foo'])
 | 
			
		||||
 | 
			
		||||
        def _fake_exit(arg):
 | 
			
		||||
            self.assertTrue(arg)
 | 
			
		||||
 | 
			
		||||
        def _fake_run(*args, **kwargs):
 | 
			
		||||
            return 'project.tests.foo' in args
 | 
			
		||||
 | 
			
		||||
        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
 | 
			
		||||
                mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
 | 
			
		||||
                mock.patch.object(os_testr,
 | 
			
		||||
                                  'call_subunit_run',
 | 
			
		||||
                                  side_effect=_fake_run):
 | 
			
		||||
            os_testr.main()
 | 
			
		||||
 | 
			
		||||
    def test_pdb_path(self):
 | 
			
		||||
        namespace = os_testr.get_parser(['--pdb', 'project/tests/foo'])
 | 
			
		||||
 | 
			
		||||
        def _fake_exit(arg):
 | 
			
		||||
            self.assertTrue(arg)
 | 
			
		||||
 | 
			
		||||
        def _fake_run(*args, **kwargs):
 | 
			
		||||
            return 'project.tests.foo' in args
 | 
			
		||||
 | 
			
		||||
        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
 | 
			
		||||
                mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
 | 
			
		||||
                mock.patch.object(os_testr,
 | 
			
		||||
                                  'call_subunit_run',
 | 
			
		||||
                                  side_effect=_fake_run):
 | 
			
		||||
            os_testr.main()
 | 
			
		||||
 | 
			
		||||
    def test_call_subunit_run_pretty(self):
 | 
			
		||||
        '''Test call_subunit_run
 | 
			
		||||
 | 
			
		||||
        Test ostestr call_subunit_run function when:
 | 
			
		||||
        Pretty is True
 | 
			
		||||
        '''
 | 
			
		||||
        pretty = True
 | 
			
		||||
        subunit = False
 | 
			
		||||
 | 
			
		||||
        with mock.patch('subprocess.Popen', autospec=True) as mock_popen:
 | 
			
		||||
            mock_popen.return_value.returncode = 0
 | 
			
		||||
            mock_popen.return_value.stdout = io.BytesIO()
 | 
			
		||||
 | 
			
		||||
            os_testr.call_subunit_run('project.tests.foo', pretty, subunit)
 | 
			
		||||
 | 
			
		||||
            # Validate Popen was called three times
 | 
			
		||||
            self.assertTrue(mock_popen.called, 'Popen was never called')
 | 
			
		||||
            count = mock_popen.call_count
 | 
			
		||||
            self.assertEqual(3, count, 'Popen was called %s'
 | 
			
		||||
                             ' instead of 3 times' % count)
 | 
			
		||||
 | 
			
		||||
            # Validate Popen called the right functions
 | 
			
		||||
            called = mock_popen.call_args_list
 | 
			
		||||
            msg = "Function %s not called"
 | 
			
		||||
            function = ['python', '-m', 'subunit.run', 'project.tests.foo']
 | 
			
		||||
            self.assertIn(function, called[0][0], msg % 'subunit.run')
 | 
			
		||||
            function = ['testr', 'load', '--subunit']
 | 
			
		||||
            self.assertIn(function, called[1][0], msg % 'testr load')
 | 
			
		||||
            function = ['subunit-trace', '--no-failure-debug', '-f']
 | 
			
		||||
            self.assertIn(function, called[2][0], msg % 'subunit-trace')
 | 
			
		||||
 | 
			
		||||
    def test_call_subunit_run_sub(self):
 | 
			
		||||
        '''Test call_subunit run
 | 
			
		||||
 | 
			
		||||
        Test ostestr call_subunit_run function when:
 | 
			
		||||
        Pretty is False and Subunit is True
 | 
			
		||||
        '''
 | 
			
		||||
        pretty = False
 | 
			
		||||
        subunit = True
 | 
			
		||||
 | 
			
		||||
        with mock.patch('subprocess.Popen', autospec=True) as mock_popen:
 | 
			
		||||
            os_testr.call_subunit_run('project.tests.foo', pretty, subunit)
 | 
			
		||||
 | 
			
		||||
            # Validate Popen was called once
 | 
			
		||||
            self.assertTrue(mock_popen.called, 'Popen was never called')
 | 
			
		||||
            count = mock_popen.call_count
 | 
			
		||||
            self.assertEqual(1, count, 'Popen was called more than once')
 | 
			
		||||
 | 
			
		||||
            # Validate Popen called the right function
 | 
			
		||||
            called = mock_popen.call_args
 | 
			
		||||
            function = ['testr', 'load', '--subunit']
 | 
			
		||||
            self.assertIn(function, called[0], "testr load not called")
 | 
			
		||||
 | 
			
		||||
    def test_call_subunit_run_testtools(self):
 | 
			
		||||
        '''Test call_subunit_run
 | 
			
		||||
 | 
			
		||||
        Test ostestr call_subunit_run function when:
 | 
			
		||||
        Pretty is False and Subunit is False
 | 
			
		||||
        '''
 | 
			
		||||
        pretty = False
 | 
			
		||||
        subunit = False
 | 
			
		||||
 | 
			
		||||
        with mock.patch('testtools.run.main', autospec=True) as mock_run:
 | 
			
		||||
            os_testr.call_subunit_run('project.tests.foo', pretty, subunit)
 | 
			
		||||
 | 
			
		||||
            # Validate testtool.run was called once
 | 
			
		||||
            self.assertTrue(mock_run.called, 'testtools.run was never called')
 | 
			
		||||
            count = mock_run.call_count
 | 
			
		||||
            self.assertEqual(1, count, 'testtools.run called more than once')
 | 
			
		||||
@@ -1,189 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import mock
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from os_testr import regex_builder as os_testr
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestPathToRegex(base.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_file_name(self):
 | 
			
		||||
        result = os_testr.path_to_regex("tests/network/v2/test_net.py")
 | 
			
		||||
        self.assertEqual("tests.network.v2.test_net", result)
 | 
			
		||||
        result = os_testr.path_to_regex("openstack/tests/network/v2")
 | 
			
		||||
        self.assertEqual("openstack.tests.network.v2", result)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestConstructRegex(base.TestCase):
 | 
			
		||||
    def test_regex_passthrough(self):
 | 
			
		||||
        result = os_testr.construct_regex(None, None, 'fake_regex', False)
 | 
			
		||||
        self.assertEqual(result, 'fake_regex')
 | 
			
		||||
 | 
			
		||||
    def test_blacklist_regex_with_comments(self):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s # A Comment\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None, None, False)
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            result,
 | 
			
		||||
            "^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
 | 
			
		||||
 | 
			
		||||
    def test_whitelist_regex_with_comments(self):
 | 
			
		||||
        whitelist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            whitelist_file.write('fake_regex_%s # A Comment\n' % i)
 | 
			
		||||
        whitelist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=whitelist_file):
 | 
			
		||||
            result = os_testr.construct_regex(None, 'fake_path', None, False)
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            result,
 | 
			
		||||
            "fake_regex_0|fake_regex_1|fake_regex_2|fake_regex_3")
 | 
			
		||||
 | 
			
		||||
    def test_blacklist_regex_without_comments(self):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None, None, False)
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            result,
 | 
			
		||||
            "^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
 | 
			
		||||
 | 
			
		||||
    def test_blacklist_regex_with_comments_and_regex(self):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s # Comments\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None,
 | 
			
		||||
                                              'fake_regex', False)
 | 
			
		||||
 | 
			
		||||
            expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
 | 
			
		||||
                              "fake_regex_0).)*$fake_regex")
 | 
			
		||||
            self.assertEqual(result, expected_regex)
 | 
			
		||||
 | 
			
		||||
    def test_blacklist_regex_without_comments_and_regex(self):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None,
 | 
			
		||||
                                              'fake_regex', False)
 | 
			
		||||
 | 
			
		||||
            expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
 | 
			
		||||
                              "fake_regex_0).)*$fake_regex")
 | 
			
		||||
            self.assertEqual(result, expected_regex)
 | 
			
		||||
 | 
			
		||||
    @mock.patch.object(os_testr, 'print_skips')
 | 
			
		||||
    def test_blacklist_regex_with_comment_print_skips(self, print_mock):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s # Comment\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None,
 | 
			
		||||
                                              None, True)
 | 
			
		||||
 | 
			
		||||
        expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
 | 
			
		||||
                          "fake_regex_0).)*$")
 | 
			
		||||
        self.assertEqual(result, expected_regex)
 | 
			
		||||
        calls = print_mock.mock_calls
 | 
			
		||||
        self.assertEqual(len(calls), 4)
 | 
			
		||||
        args = list(map(lambda x: x[1], calls))
 | 
			
		||||
        self.assertIn(('fake_regex_0', 'Comment'), args)
 | 
			
		||||
        self.assertIn(('fake_regex_1', 'Comment'), args)
 | 
			
		||||
        self.assertIn(('fake_regex_2', 'Comment'), args)
 | 
			
		||||
        self.assertIn(('fake_regex_3', 'Comment'), args)
 | 
			
		||||
 | 
			
		||||
    @mock.patch.object(os_testr, 'print_skips')
 | 
			
		||||
    def test_blacklist_regex_without_comment_print_skips(self, print_mock):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = os_testr.construct_regex('fake_path', None,
 | 
			
		||||
                                              None, True)
 | 
			
		||||
 | 
			
		||||
        expected_regex = ("^((?!fake_regex_3|fake_regex_2|"
 | 
			
		||||
                          "fake_regex_1|fake_regex_0).)*$")
 | 
			
		||||
        self.assertEqual(result, expected_regex)
 | 
			
		||||
        calls = print_mock.mock_calls
 | 
			
		||||
        self.assertEqual(len(calls), 4)
 | 
			
		||||
        args = list(map(lambda x: x[1], calls))
 | 
			
		||||
        self.assertIn(('fake_regex_0', ''), args)
 | 
			
		||||
        self.assertIn(('fake_regex_1', ''), args)
 | 
			
		||||
        self.assertIn(('fake_regex_2', ''), args)
 | 
			
		||||
        self.assertIn(('fake_regex_3', ''), args)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestWhitelistFile(base.TestCase):
 | 
			
		||||
    def test_read_whitelist_file(self):
 | 
			
		||||
        file_contents = """regex_a
 | 
			
		||||
regex_b"""
 | 
			
		||||
        whitelist_file = six.StringIO()
 | 
			
		||||
        whitelist_file.write(file_contents)
 | 
			
		||||
        whitelist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=whitelist_file):
 | 
			
		||||
            regex = os_testr.get_regex_from_whitelist_file('/path/to/not_used')
 | 
			
		||||
        self.assertEqual('regex_a|regex_b', regex)
 | 
			
		||||
 | 
			
		||||
    def test_whitelist_regex_without_comments_and_regex(self):
 | 
			
		||||
        file_contents = """regex_a
 | 
			
		||||
regex_b"""
 | 
			
		||||
        whitelist_file = six.StringIO()
 | 
			
		||||
        whitelist_file.write(file_contents)
 | 
			
		||||
        whitelist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=whitelist_file):
 | 
			
		||||
            result = os_testr.construct_regex(None, 'fake_path',
 | 
			
		||||
                                              None, False)
 | 
			
		||||
 | 
			
		||||
            expected_regex = 'regex_a|regex_b'
 | 
			
		||||
            self.assertEqual(result, expected_regex)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestGetTestList(base.TestCase):
 | 
			
		||||
    def test__get_test_list(self):
 | 
			
		||||
        test_list = os_testr._get_test_list('test__get_test_list')
 | 
			
		||||
        self.assertIn('test__get_test_list', test_list[0])
 | 
			
		||||
 | 
			
		||||
    def test__get_test_list_regex_is_empty(self):
 | 
			
		||||
        test_list = os_testr._get_test_list('')
 | 
			
		||||
        self.assertIn('', test_list[0])
 | 
			
		||||
 | 
			
		||||
    def test__get_test_list_regex_is_none(self):
 | 
			
		||||
        test_list = os_testr._get_test_list(None)
 | 
			
		||||
        # NOTE(masayukig): We should get all of the tests. So we should have
 | 
			
		||||
        # more than one test case.
 | 
			
		||||
        self.assertGreater(len(test_list), 1)
 | 
			
		||||
        self.assertIn('os_testr.tests.test_regex_builder.'
 | 
			
		||||
                      'TestGetTestList.test__get_test_list_regex_is_none',
 | 
			
		||||
                      test_list)
 | 
			
		||||
@@ -1,104 +0,0 @@
 | 
			
		||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import subprocess
 | 
			
		||||
import tempfile
 | 
			
		||||
 | 
			
		||||
import testtools
 | 
			
		||||
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
from six import StringIO
 | 
			
		||||
 | 
			
		||||
DEVNULL = open(os.devnull, 'wb')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestReturnCodes(base.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(TestReturnCodes, self).setUp()
 | 
			
		||||
        # Setup test dirs
 | 
			
		||||
        self.directory = tempfile.mkdtemp(prefix='ostestr-unit')
 | 
			
		||||
        self.addCleanup(shutil.rmtree, self.directory)
 | 
			
		||||
        self.test_dir = os.path.join(self.directory, 'tests')
 | 
			
		||||
        os.mkdir(self.test_dir)
 | 
			
		||||
        # Setup Test files
 | 
			
		||||
        self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
 | 
			
		||||
        self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
 | 
			
		||||
        self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
 | 
			
		||||
        self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
 | 
			
		||||
        self.init_file = os.path.join(self.test_dir, '__init__.py')
 | 
			
		||||
        self.setup_py = os.path.join(self.directory, 'setup.py')
 | 
			
		||||
        shutil.copy('os_testr/tests/files/testr-conf', self.testr_conf_file)
 | 
			
		||||
        shutil.copy('os_testr/tests/files/passing-tests', self.passing_file)
 | 
			
		||||
        shutil.copy('os_testr/tests/files/failing-tests', self.failing_file)
 | 
			
		||||
        shutil.copy('setup.py', self.setup_py)
 | 
			
		||||
        shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file)
 | 
			
		||||
        shutil.copy('os_testr/tests/files/__init__.py', self.init_file)
 | 
			
		||||
 | 
			
		||||
        self.stdout = StringIO()
 | 
			
		||||
        self.stderr = StringIO()
 | 
			
		||||
        # Change directory, run wrapper and check result
 | 
			
		||||
        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
 | 
			
		||||
        os.chdir(self.directory)
 | 
			
		||||
 | 
			
		||||
    def assertRunExit(self, cmd, expected, subunit=False):
 | 
			
		||||
        p = subprocess.Popen(
 | 
			
		||||
            "%s" % cmd, shell=True,
 | 
			
		||||
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 | 
			
		||||
        out, err = p.communicate()
 | 
			
		||||
 | 
			
		||||
        if not subunit:
 | 
			
		||||
            self.assertEqual(
 | 
			
		||||
                p.returncode, expected,
 | 
			
		||||
                "Stdout: %s; Stderr: %s" % (out, err))
 | 
			
		||||
        else:
 | 
			
		||||
            self.assertEqual(p.returncode, expected,
 | 
			
		||||
                             "Expected return code: %s doesn't match actual "
 | 
			
		||||
                             "return code of: %s" % (expected, p.returncode))
 | 
			
		||||
 | 
			
		||||
    def test_default_passing(self):
 | 
			
		||||
        self.assertRunExit('ostestr --regex passing', 0)
 | 
			
		||||
 | 
			
		||||
    def test_default_fails(self):
 | 
			
		||||
        self.assertRunExit('ostestr', 1)
 | 
			
		||||
 | 
			
		||||
    def test_default_passing_no_slowest(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-slowest --regex passing', 0)
 | 
			
		||||
 | 
			
		||||
    def test_default_fails_no_slowest(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-slowest', 1)
 | 
			
		||||
 | 
			
		||||
    def test_default_serial_passing(self):
 | 
			
		||||
        self.assertRunExit('ostestr --serial --regex passing', 0)
 | 
			
		||||
 | 
			
		||||
    def test_default_serial_fails(self):
 | 
			
		||||
        self.assertRunExit('ostestr --serial', 1)
 | 
			
		||||
 | 
			
		||||
    def test_testr_subunit_passing(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-pretty --subunit --regex passing', 0,
 | 
			
		||||
                           subunit=True)
 | 
			
		||||
 | 
			
		||||
    @testtools.skip('Skipped because of testrepository lp bug #1411804')
 | 
			
		||||
    def test_testr_subunit_fails(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-pretty --subunit', 1, subunit=True)
 | 
			
		||||
 | 
			
		||||
    def test_testr_no_pretty_passing(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-pretty --regex passing', 0)
 | 
			
		||||
 | 
			
		||||
    def test_testr_no_pretty_fails(self):
 | 
			
		||||
        self.assertRunExit('ostestr --no-pretty', 1)
 | 
			
		||||
 | 
			
		||||
    def test_list(self):
 | 
			
		||||
        self.assertRunExit('ostestr --list', 0)
 | 
			
		||||
@@ -1,78 +0,0 @@
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from ddt import data
 | 
			
		||||
from ddt import ddt
 | 
			
		||||
from subunit import RemotedTestCase
 | 
			
		||||
from testtools import PlaceHolder
 | 
			
		||||
 | 
			
		||||
from os_testr import subunit2html
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ddt
 | 
			
		||||
class TestSubunit2html(base.TestCase):
 | 
			
		||||
    @data(RemotedTestCase, PlaceHolder)
 | 
			
		||||
    def test_class_parsing(self, test_cls):
 | 
			
		||||
        """Tests that the class paths are parsed for v1 & v2 tests"""
 | 
			
		||||
        test_ = test_cls("example.path.to.test.method")
 | 
			
		||||
        obj_ = subunit2html.HtmlOutput()
 | 
			
		||||
        cls_ = []
 | 
			
		||||
        obj_._add_cls({}, cls_, test_, ())
 | 
			
		||||
        self.assertEqual("example.path.to.test", cls_[0].name)
 | 
			
		||||
 | 
			
		||||
    @data(RemotedTestCase, PlaceHolder)
 | 
			
		||||
    def test_result_sorting(self, test_cls):
 | 
			
		||||
        tests = []
 | 
			
		||||
        for i in range(9):
 | 
			
		||||
            tests.append(test_cls('example.path.to.test%d.method' % i))
 | 
			
		||||
        # addFailure, addError, and addSkip need the real exc_info
 | 
			
		||||
        try:
 | 
			
		||||
            raise Exception('fake')
 | 
			
		||||
        except Exception:
 | 
			
		||||
            err = sys.exc_info()
 | 
			
		||||
        obj = subunit2html.HtmlOutput()
 | 
			
		||||
        obj.addSuccess(tests[3])
 | 
			
		||||
        obj.addSuccess(tests[1])
 | 
			
		||||
        # example.path.to.test2 has a failure
 | 
			
		||||
        obj.addFailure(tests[2], err)
 | 
			
		||||
        obj.addSkip(tests[0], err)
 | 
			
		||||
        obj.addSuccess(tests[8])
 | 
			
		||||
        # example.path.to.test5 has a failure (error)
 | 
			
		||||
        obj.addError(tests[5], err)
 | 
			
		||||
        # example.path.to.test4 has a failure
 | 
			
		||||
        obj.addFailure(tests[4], err)
 | 
			
		||||
        obj.addSuccess(tests[7])
 | 
			
		||||
        # example.path.to.test6 has a success, a failure, and a success
 | 
			
		||||
        obj.addSuccess(tests[6])
 | 
			
		||||
        obj.addFailure(tests[6], err)
 | 
			
		||||
        obj.addSuccess(tests[6])
 | 
			
		||||
        sorted_result = obj._sortResult(obj.result)
 | 
			
		||||
        # _sortResult returns a list of results of format:
 | 
			
		||||
        #   [(class, [test_result_tuple, ...]), ...]
 | 
			
		||||
        # sorted by str(class)
 | 
			
		||||
        #
 | 
			
		||||
        # Classes with failures (2, 4, 5, and 6) should be sorted separately
 | 
			
		||||
        # at the top. The rest of the classes should be in sorted order after.
 | 
			
		||||
        expected_class_order = ['example.path.to.test2',
 | 
			
		||||
                                'example.path.to.test4',
 | 
			
		||||
                                'example.path.to.test5',
 | 
			
		||||
                                'example.path.to.test6',
 | 
			
		||||
                                'example.path.to.test0',
 | 
			
		||||
                                'example.path.to.test1',
 | 
			
		||||
                                'example.path.to.test3',
 | 
			
		||||
                                'example.path.to.test7',
 | 
			
		||||
                                'example.path.to.test8']
 | 
			
		||||
        for i, r in enumerate(sorted_result):
 | 
			
		||||
            self.assertEqual(expected_class_order[i], str(r[0]))
 | 
			
		||||
@@ -1,96 +0,0 @@
 | 
			
		||||
# Copyright 2015 SUSE Linux GmbH
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
from datetime import datetime as dt
 | 
			
		||||
import io
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from ddt import data
 | 
			
		||||
from ddt import ddt
 | 
			
		||||
from ddt import unpack
 | 
			
		||||
from mock import patch
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from os_testr import subunit_trace
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ddt
 | 
			
		||||
class TestSubunitTrace(base.TestCase):
 | 
			
		||||
 | 
			
		||||
    @data(([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            dt(2015, 4, 17, 22, 23, 14, 111111)],
 | 
			
		||||
           "0.000000s"),
 | 
			
		||||
          ([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            dt(2015, 4, 17, 22, 23, 15, 111111)],
 | 
			
		||||
           "1.000000s"),
 | 
			
		||||
          ([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            None],
 | 
			
		||||
           ""))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_get_durating(self, timestamps, expected_result):
 | 
			
		||||
        self.assertEqual(subunit_trace.get_duration(timestamps),
 | 
			
		||||
                         expected_result)
 | 
			
		||||
 | 
			
		||||
    @data(([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            dt(2015, 4, 17, 22, 23, 14, 111111)],
 | 
			
		||||
           0.0),
 | 
			
		||||
          ([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            dt(2015, 4, 17, 22, 23, 15, 111111)],
 | 
			
		||||
           1.0),
 | 
			
		||||
          ([dt(2015, 4, 17, 22, 23, 14, 111111),
 | 
			
		||||
            None],
 | 
			
		||||
           0.0))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_run_time(self, timestamps, expected_result):
 | 
			
		||||
        patched_res = {
 | 
			
		||||
            0: [
 | 
			
		||||
                {'timestamps': timestamps}
 | 
			
		||||
            ]
 | 
			
		||||
        }
 | 
			
		||||
        with patch.dict(subunit_trace.RESULTS, patched_res, clear=True):
 | 
			
		||||
            self.assertEqual(subunit_trace.run_time(), expected_result)
 | 
			
		||||
 | 
			
		||||
    def test_return_code_all_skips(self):
 | 
			
		||||
        skips_stream = os.path.join(
 | 
			
		||||
            os.path.dirname(os.path.abspath(__file__)),
 | 
			
		||||
            'sample_streams/all_skips.subunit')
 | 
			
		||||
        p = subprocess.Popen(['subunit-trace'], stdin=subprocess.PIPE)
 | 
			
		||||
        with open(skips_stream, 'rb') as stream:
 | 
			
		||||
            p.communicate(stream.read())
 | 
			
		||||
        self.assertEqual(1, p.returncode)
 | 
			
		||||
 | 
			
		||||
    def test_return_code_normal_run(self):
 | 
			
		||||
        regular_stream = os.path.join(
 | 
			
		||||
            os.path.dirname(os.path.abspath(__file__)),
 | 
			
		||||
            'sample_streams/successful.subunit')
 | 
			
		||||
        p = subprocess.Popen(['subunit-trace'], stdin=subprocess.PIPE)
 | 
			
		||||
        with open(regular_stream, 'rb') as stream:
 | 
			
		||||
            p.communicate(stream.read())
 | 
			
		||||
        self.assertEqual(0, p.returncode)
 | 
			
		||||
 | 
			
		||||
    def test_trace(self):
 | 
			
		||||
        regular_stream = os.path.join(
 | 
			
		||||
            os.path.dirname(os.path.abspath(__file__)),
 | 
			
		||||
            'sample_streams/successful.subunit')
 | 
			
		||||
        bytes_ = io.BytesIO()
 | 
			
		||||
        with open(regular_stream, 'rb') as stream:
 | 
			
		||||
            bytes_.write(six.binary_type(stream.read()))
 | 
			
		||||
        bytes_.seek(0)
 | 
			
		||||
        stdin = io.TextIOWrapper(io.BufferedReader(bytes_))
 | 
			
		||||
        returncode = subunit_trace.trace(stdin, sys.stdout)
 | 
			
		||||
        self.assertEqual(0, returncode)
 | 
			
		||||
@@ -1,138 +0,0 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#      http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
 | 
			
		||||
import mock
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from os_testr import testlist_builder as list_builder
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestBlackReader(base.TestCase):
 | 
			
		||||
    def test_black_reader(self):
 | 
			
		||||
        blacklist_file = six.StringIO()
 | 
			
		||||
        for i in range(4):
 | 
			
		||||
            blacklist_file.write('fake_regex_%s\n' % i)
 | 
			
		||||
            blacklist_file.write('fake_regex_with_note_%s # note\n' % i)
 | 
			
		||||
        blacklist_file.seek(0)
 | 
			
		||||
        with mock.patch('six.moves.builtins.open',
 | 
			
		||||
                        return_value=blacklist_file):
 | 
			
		||||
            result = list_builder.black_reader('fake_path')
 | 
			
		||||
        self.assertEqual(2 * 4, len(result))
 | 
			
		||||
        note_cnt = 0
 | 
			
		||||
        # not assuming ordering, mainly just testing the type
 | 
			
		||||
        for r in result:
 | 
			
		||||
            self.assertEqual(r[2], [])
 | 
			
		||||
            if r[1] == 'note':
 | 
			
		||||
                note_cnt += 1
 | 
			
		||||
            self.assertIn('search', dir(r[0]))  # like a compiled regex
 | 
			
		||||
        self.assertEqual(note_cnt, 4)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestConstructList(base.TestCase):
 | 
			
		||||
    def test_simple_re(self):
 | 
			
		||||
        test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            result = list_builder.construct_list(None,
 | 
			
		||||
                                                 None,
 | 
			
		||||
                                                 'foo',
 | 
			
		||||
                                                 None,
 | 
			
		||||
                                                 False)
 | 
			
		||||
        self.assertEqual(list(result), ['fake_test(scen)[egg,foo])'])
 | 
			
		||||
 | 
			
		||||
    def test_simple_black_re(self):
 | 
			
		||||
        test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            result = list_builder.construct_list(None,
 | 
			
		||||
                                                 None,
 | 
			
		||||
                                                 None,
 | 
			
		||||
                                                 'foo',
 | 
			
		||||
                                                 False)
 | 
			
		||||
        self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
 | 
			
		||||
 | 
			
		||||
    def test_blacklist(self):
 | 
			
		||||
        black_list = [(re.compile('foo'), 'foo not liked', [])]
 | 
			
		||||
        test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            with mock.patch('os_testr.testlist_builder.black_reader',
 | 
			
		||||
                            return_value=black_list):
 | 
			
		||||
                result = list_builder.construct_list('file',
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     'fake_test',
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     False)
 | 
			
		||||
        self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
 | 
			
		||||
 | 
			
		||||
    def test_whitelist(self):
 | 
			
		||||
        white_list = 'fake_test1|fake_test2'
 | 
			
		||||
        test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]']
 | 
			
		||||
        white_getter = 'os_testr.regex_builder.get_regex_from_whitelist_file'
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            with mock.patch(white_getter,
 | 
			
		||||
                            return_value=white_list):
 | 
			
		||||
                result = list_builder.construct_list(None,
 | 
			
		||||
                                                     'file',
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     False)
 | 
			
		||||
        self.assertEqual(set(result),
 | 
			
		||||
                         set(('fake_test1[tg]', 'fake_test2[tg]')))
 | 
			
		||||
 | 
			
		||||
    def test_whitelist_blacklist_re(self):
 | 
			
		||||
        white_list = 'fake_test1|fake_test2'
 | 
			
		||||
        test_lists = ['fake_test1[tg]', 'fake_test2[spam]',
 | 
			
		||||
                      'fake_test3[tg,foo]', 'fake_test4[spam]']
 | 
			
		||||
        black_list = [(re.compile('spam'), 'spam not liked', [])]
 | 
			
		||||
        white_getter = 'os_testr.regex_builder.get_regex_from_whitelist_file'
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            with mock.patch(white_getter,
 | 
			
		||||
                            return_value=white_list):
 | 
			
		||||
                with mock.patch('os_testr.testlist_builder.black_reader',
 | 
			
		||||
                                return_value=black_list):
 | 
			
		||||
                    result = list_builder.construct_list('black_file',
 | 
			
		||||
                                                         'white_file',
 | 
			
		||||
                                                         'foo',
 | 
			
		||||
                                                         None,
 | 
			
		||||
                                                         False)
 | 
			
		||||
        self.assertEqual(set(result),
 | 
			
		||||
                         set(('fake_test1[tg]', 'fake_test3[tg,foo]')))
 | 
			
		||||
 | 
			
		||||
    def test_overlapping_black_regex(self):
 | 
			
		||||
 | 
			
		||||
        black_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'),
 | 
			
		||||
                       '', []),
 | 
			
		||||
                      (re.compile('compute.test_keypairs.KeypairsTestV21'),
 | 
			
		||||
                       '', [])]
 | 
			
		||||
        test_lists = [
 | 
			
		||||
            'compute.test_keypairs.KeypairsTestV210.test_create_keypair',
 | 
			
		||||
            'compute.test_keypairs.KeypairsTestV21.test_create_keypair',
 | 
			
		||||
            'compute.test_fake.FakeTest.test_fake_test']
 | 
			
		||||
        with mock.patch('os_testr.regex_builder._get_test_list',
 | 
			
		||||
                        return_value=test_lists):
 | 
			
		||||
            with mock.patch('os_testr.testlist_builder.black_reader',
 | 
			
		||||
                            return_value=black_list):
 | 
			
		||||
                result = list_builder.construct_list('file',
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     'fake_test',
 | 
			
		||||
                                                     None,
 | 
			
		||||
                                                     False)
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            list(result), ['compute.test_fake.FakeTest.test_fake_test'])
 | 
			
		||||
@@ -1,76 +0,0 @@
 | 
			
		||||
# Copyright 2016 Hewlett Packard Enterprise Development LP
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
#    not use this file except in compliance with the License. You may obtain
 | 
			
		||||
#    a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#         http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
#    Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
#    License for the specific language governing permissions and limitations
 | 
			
		||||
#    under the License.
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from ddt import data
 | 
			
		||||
from ddt import ddt
 | 
			
		||||
from ddt import unpack
 | 
			
		||||
 | 
			
		||||
from os_testr.tests import base
 | 
			
		||||
from os_testr.utils import colorizer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ddt
 | 
			
		||||
class TestNullColorizer(base.TestCase):
 | 
			
		||||
 | 
			
		||||
    @data(None, "foo", sys.stdout, )
 | 
			
		||||
    def test_supported_always_true(self, stream):
 | 
			
		||||
        self.assertTrue(colorizer.NullColorizer.supported(stream))
 | 
			
		||||
 | 
			
		||||
    @data(("foo", "red"), ("foo", "bar"))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_write_string_ignore_color(self, text, color):
 | 
			
		||||
        output = six.StringIO()
 | 
			
		||||
        c = colorizer.NullColorizer(output)
 | 
			
		||||
        c.write(text, color)
 | 
			
		||||
        self.assertEqual(text, output.getvalue())
 | 
			
		||||
 | 
			
		||||
    @data((None, "red"), (None, None))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_write_none_exception(self, text, color):
 | 
			
		||||
        c = colorizer.NullColorizer(sys.stdout)
 | 
			
		||||
        self.assertRaises(TypeError, c.write, text, color)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ddt
 | 
			
		||||
class TestAnsiColorizer(base.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_supported_false(self):
 | 
			
		||||
        # NOTE(masayukig): This returns False because our unittest env isn't
 | 
			
		||||
        #  interactive
 | 
			
		||||
        self.assertFalse(colorizer.AnsiColorizer.supported(sys.stdout))
 | 
			
		||||
 | 
			
		||||
    @data(None, "foo")
 | 
			
		||||
    def test_supported_error(self, stream):
 | 
			
		||||
        self.assertRaises(AttributeError,
 | 
			
		||||
                          colorizer.AnsiColorizer.supported, stream)
 | 
			
		||||
 | 
			
		||||
    @data(("foo", "red", "31"), ("foo", "blue", "34"))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_write_string_valid_color(self, text, color, color_code):
 | 
			
		||||
        output = six.StringIO()
 | 
			
		||||
        c = colorizer.AnsiColorizer(output)
 | 
			
		||||
        c.write(text, color)
 | 
			
		||||
        self.assertIn(text, output.getvalue())
 | 
			
		||||
        self.assertIn(color_code, output.getvalue())
 | 
			
		||||
 | 
			
		||||
    @data(("foo", None), ("foo", "invalid_color"))
 | 
			
		||||
    @unpack
 | 
			
		||||
    def test_write_string_invalid_color(self, text, color):
 | 
			
		||||
        output = six.StringIO()
 | 
			
		||||
        c = colorizer.AnsiColorizer(output)
 | 
			
		||||
        self.assertRaises(KeyError, c.write, text, color)
 | 
			
		||||
@@ -1,98 +0,0 @@
 | 
			
		||||
# Copyright 2015 NEC Corporation
 | 
			
		||||
# All Rights Reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
			
		||||
# not use this file except in compliance with the License. You may obtain
 | 
			
		||||
# a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
			
		||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
			
		||||
# License for the specific language governing permissions and limitations
 | 
			
		||||
# under the License.
 | 
			
		||||
#
 | 
			
		||||
# Colorizer Code is borrowed from Twisted:
 | 
			
		||||
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
 | 
			
		||||
#
 | 
			
		||||
#    Permission is hereby granted, free of charge, to any person obtaining
 | 
			
		||||
#    a copy of this software and associated documentation files (the
 | 
			
		||||
#    "Software"), to deal in the Software without restriction, including
 | 
			
		||||
#    without limitation the rights to use, copy, modify, merge, publish,
 | 
			
		||||
#    distribute, sublicense, and/or sell copies of the Software, and to
 | 
			
		||||
#    permit persons to whom the Software is furnished to do so, subject to
 | 
			
		||||
#    the following conditions:
 | 
			
		||||
#
 | 
			
		||||
#    The above copyright notice and this permission notice shall be
 | 
			
		||||
#    included in all copies or substantial portions of the Software.
 | 
			
		||||
#
 | 
			
		||||
#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 | 
			
		||||
#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 | 
			
		||||
#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 | 
			
		||||
#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 | 
			
		||||
#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 | 
			
		||||
#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 | 
			
		||||
#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AnsiColorizer(object):
 | 
			
		||||
    """A colorizer is an object that loosely wraps around a stream
 | 
			
		||||
 | 
			
		||||
    allowing callers to write text to the stream in a particular color.
 | 
			
		||||
 | 
			
		||||
    Colorizer classes must implement C{supported()} and C{write(text, color)}.
 | 
			
		||||
    """
 | 
			
		||||
    _colors = dict(black=30, red=31, green=32, yellow=33,
 | 
			
		||||
                   blue=34, magenta=35, cyan=36, white=37)
 | 
			
		||||
 | 
			
		||||
    def __init__(self, stream):
 | 
			
		||||
        self.stream = stream
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def supported(cls, stream=sys.stdout):
 | 
			
		||||
        """Check the current platform supports coloring terminal output
 | 
			
		||||
 | 
			
		||||
        A class method that returns True if the current platform supports
 | 
			
		||||
        coloring terminal output using this method. Returns False otherwise.
 | 
			
		||||
        """
 | 
			
		||||
        if not stream.isatty():
 | 
			
		||||
            return False  # auto color only on TTYs
 | 
			
		||||
        try:
 | 
			
		||||
            import curses
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            return False
 | 
			
		||||
        else:
 | 
			
		||||
            try:
 | 
			
		||||
                try:
 | 
			
		||||
                    return curses.tigetnum("colors") > 2
 | 
			
		||||
                except curses.error:
 | 
			
		||||
                    curses.setupterm()
 | 
			
		||||
                    return curses.tigetnum("colors") > 2
 | 
			
		||||
            except Exception:
 | 
			
		||||
                # guess false in case of error
 | 
			
		||||
                return False
 | 
			
		||||
 | 
			
		||||
    def write(self, text, color):
 | 
			
		||||
        """Write the given text to the stream in the given color.
 | 
			
		||||
 | 
			
		||||
        @param text: Text to be written to the stream.
 | 
			
		||||
 | 
			
		||||
        @param color: A string label for a color. e.g. 'red', 'white'.
 | 
			
		||||
        """
 | 
			
		||||
        color = self._colors[color]
 | 
			
		||||
        self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NullColorizer(object):
 | 
			
		||||
    """See _AnsiColorizer docstring."""
 | 
			
		||||
    def __init__(self, stream):
 | 
			
		||||
        self.stream = stream
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def supported(cls, stream=sys.stdout):
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    def write(self, text, color):
 | 
			
		||||
        self.stream.write(text)
 | 
			
		||||
@@ -1,8 +0,0 @@
 | 
			
		||||
# The order of packages is significant, because pip processes them in the order
 | 
			
		||||
# of appearance. Changing the order has an impact on the overall integration
 | 
			
		||||
# process, which may cause wedges in the gate later.
 | 
			
		||||
 | 
			
		||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
 | 
			
		||||
testrepository>=0.0.18 # Apache-2.0/BSD
 | 
			
		||||
python-subunit>=0.0.18 # Apache-2.0/BSD
 | 
			
		||||
testtools>=1.4.0 # MIT
 | 
			
		||||
							
								
								
									
										40
									
								
								setup.cfg
									
									
									
									
									
								
							
							
						
						
									
										40
									
								
								setup.cfg
									
									
									
									
									
								
							@@ -1,40 +0,0 @@
 | 
			
		||||
[metadata]
 | 
			
		||||
name = os-testr
 | 
			
		||||
summary = A testr wrapper to provide functionality for OpenStack projects
 | 
			
		||||
description-file =
 | 
			
		||||
    README.rst
 | 
			
		||||
author = OpenStack
 | 
			
		||||
author-email = openstack-dev@lists.openstack.org
 | 
			
		||||
home-page = http://docs.openstack.org/developer/os-testr/
 | 
			
		||||
classifier =
 | 
			
		||||
    Environment :: OpenStack
 | 
			
		||||
    Intended Audience :: Information Technology
 | 
			
		||||
    Intended Audience :: System Administrators
 | 
			
		||||
    License :: OSI Approved :: Apache Software License
 | 
			
		||||
    Operating System :: POSIX :: Linux
 | 
			
		||||
    Programming Language :: Python
 | 
			
		||||
    Programming Language :: Python :: 2
 | 
			
		||||
    Programming Language :: Python :: 2.7
 | 
			
		||||
    Programming Language :: Python :: 3
 | 
			
		||||
    Programming Language :: Python :: 3.4
 | 
			
		||||
    Programming Language :: Python :: 3.5
 | 
			
		||||
 | 
			
		||||
[files]
 | 
			
		||||
packages =
 | 
			
		||||
    os_testr
 | 
			
		||||
 | 
			
		||||
[entry_points]
 | 
			
		||||
console_scripts =
 | 
			
		||||
    subunit-trace = os_testr.subunit_trace:main
 | 
			
		||||
    ostestr = os_testr.ostestr:main
 | 
			
		||||
    subunit2html = os_testr.subunit2html:main
 | 
			
		||||
    generate-subunit = os_testr.generate_subunit:main
 | 
			
		||||
 | 
			
		||||
[build_sphinx]
 | 
			
		||||
source-dir = doc/source
 | 
			
		||||
build-dir = doc/build
 | 
			
		||||
all_files = 1
 | 
			
		||||
warning-is-error = 1
 | 
			
		||||
 | 
			
		||||
[upload_sphinx]
 | 
			
		||||
upload-dir = doc/build/html
 | 
			
		||||
							
								
								
									
										29
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								setup.py
									
									
									
									
									
								
							@@ -1,29 +0,0 @@
 | 
			
		||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
# implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
 | 
			
		||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
 | 
			
		||||
import setuptools
 | 
			
		||||
 | 
			
		||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
 | 
			
		||||
# setuptools if some other modules registered functions in `atexit`.
 | 
			
		||||
# solution from: http://bugs.python.org/issue15881#msg170215
 | 
			
		||||
try:
 | 
			
		||||
    import multiprocessing  # noqa
 | 
			
		||||
except ImportError:
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
setuptools.setup(
 | 
			
		||||
    setup_requires=['pbr>=2.0.0'],
 | 
			
		||||
    pbr=True)
 | 
			
		||||
@@ -1,13 +0,0 @@
 | 
			
		||||
# The order of packages is significant, because pip processes them in the order
 | 
			
		||||
# of appearance. Changing the order has an impact on the overall integration
 | 
			
		||||
# process, which may cause wedges in the gate later.
 | 
			
		||||
 | 
			
		||||
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
 | 
			
		||||
 | 
			
		||||
coverage!=4.4,>=4.0 # Apache-2.0
 | 
			
		||||
sphinx>=1.6.2 # BSD
 | 
			
		||||
openstackdocstheme>=1.11.0 # Apache-2.0
 | 
			
		||||
oslotest>=1.10.0 # Apache-2.0
 | 
			
		||||
testscenarios>=0.4 # Apache-2.0/BSD
 | 
			
		||||
ddt>=1.0.1 # MIT
 | 
			
		||||
six>=1.9.0 # MIT
 | 
			
		||||
@@ -1,30 +0,0 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Client constraint file contains this client version pin that is in conflict
 | 
			
		||||
# with installing the client from source. We should remove the version pin in
 | 
			
		||||
# the constraints file before applying it for from-source installation.
 | 
			
		||||
 | 
			
		||||
CONSTRAINTS_FILE="$1"
 | 
			
		||||
shift 1
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
 | 
			
		||||
# published to logs.openstack.org for easy debugging.
 | 
			
		||||
localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
 | 
			
		||||
 | 
			
		||||
if [[ "$CONSTRAINTS_FILE" != http* ]]; then
 | 
			
		||||
    CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE"
 | 
			
		||||
fi
 | 
			
		||||
# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
 | 
			
		||||
curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile"
 | 
			
		||||
 | 
			
		||||
pip install -c"$localfile" openstack-requirements
 | 
			
		||||
 | 
			
		||||
# This is the main purpose of the script: Allow local installation of
 | 
			
		||||
# the current repo. It is listed in constraints file and thus any
 | 
			
		||||
# install will be constrained and we need to unconstrain it.
 | 
			
		||||
edit-constraints "$localfile" -- "$CLIENT_NAME"
 | 
			
		||||
 | 
			
		||||
pip install -c"$localfile" -U "$@"
 | 
			
		||||
exit $?
 | 
			
		||||
							
								
								
									
										41
									
								
								tox.ini
									
									
									
									
									
								
							
							
						
						
									
										41
									
								
								tox.ini
									
									
									
									
									
								
							@@ -1,41 +0,0 @@
 | 
			
		||||
[tox]
 | 
			
		||||
minversion = 2.0
 | 
			
		||||
envlist = py35,py34,py27,pypy,pep8
 | 
			
		||||
skipsdist = True
 | 
			
		||||
 | 
			
		||||
[testenv]
 | 
			
		||||
usedevelop = True
 | 
			
		||||
install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
 | 
			
		||||
setenv =
 | 
			
		||||
   VIRTUAL_ENV={envdir}
 | 
			
		||||
   BRANCH_NAME=master
 | 
			
		||||
   CLIENT_NAME=os-testr
 | 
			
		||||
whitelist_externals = find
 | 
			
		||||
deps = -r{toxinidir}/requirements.txt
 | 
			
		||||
       -r{toxinidir}/test-requirements.txt
 | 
			
		||||
commands =
 | 
			
		||||
         find . -type f -name "*.pyc" -delete
 | 
			
		||||
         ostestr {posargs}
 | 
			
		||||
 | 
			
		||||
[testenv:pep8]
 | 
			
		||||
commands = flake8
 | 
			
		||||
 | 
			
		||||
[testenv:venv]
 | 
			
		||||
commands = {posargs}
 | 
			
		||||
 | 
			
		||||
[testenv:cover]
 | 
			
		||||
commands = python setup.py test --coverage --coverage-package-name='os_testr' --testr-args='{posargs}'
 | 
			
		||||
 | 
			
		||||
[testenv:docs]
 | 
			
		||||
commands = python setup.py build_sphinx
 | 
			
		||||
 | 
			
		||||
[testenv:debug]
 | 
			
		||||
commands = oslo_debug_helper {posargs}
 | 
			
		||||
 | 
			
		||||
[flake8]
 | 
			
		||||
# E123, E125 skipped as they are invalid PEP-8.
 | 
			
		||||
 | 
			
		||||
show-source = True
 | 
			
		||||
ignore = E123,E125
 | 
			
		||||
builtins = _
 | 
			
		||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
 | 
			
		||||
		Reference in New Issue
	
	Block a user