Testing OpenStack upgrades
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

203 lines
7.5 KiB

# Copyright 2018 Red Hat
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
import sys
import typing # noqa
import testtools
from tobiko.common import _exception
os.environ.setdefault('PYTHON', sys.executable)
class TestCasesFinder(object):
def __init__(self, config=None, repo_type=None, repo_url=None,
test_path=None, top_dir=None, group_regex=None,
blacklist_file=None, whitelist_file=None, black_regex=None,
:param str config: The path to the stestr config file. Must be a
:param str repo_type: This is the type of repository to use. Valid
choices are 'file' and 'sql'.
:param str repo_url: The url of the repository to use.
:param str test_path: Set the test path to use for unittest discovery.
If both this and the corresponding config file option are set, this
value will be used.
:param str top_dir: The top dir to use for unittest discovery. This
takes precedence over the value in the config file. (if one is
present in the config file)
:param str group_regex: Set a group regex to use for grouping tests
together in the stestr scheduler. If both this and the
corresponding config file option are set this value will be used.
:param str blacklist_file: Path to a blacklist file, this file contains
a separate regex exclude on each newline.
:param str whitelist_file: Path to a whitelist file, this file contains
a separate regex on each newline.
:param str black_regex: Test rejection regex. If a test cases name
matches on re.search() operation, it will be removed from the final
test list.
:param list filters: A list of string regex filters to initially apply
on the test list. Tests that match any of the regexes will be used.
(assuming any other filtering specified also uses it)
self.config = config or '.stestr.conf'
self.repo_type = repo_type or 'file'
self.repo_url = repo_url
self.test_path = test_path
self.top_dir = top_dir
self.group_regex = group_regex
self.blacklist_file = blacklist_file
self.whitelist_file = whitelist_file
self.black_regex = black_regex
self.filters = filters
def discover_test_cases(self, **kwargs):
"""Iterate over test_ids for a project
This method will print the test_ids for tests in a project. You can
filter the output just like with the run command to see exactly what
will be run.
from stestr import config_file
params = dict(config=self.config, repo_type=self.repo_type,
repo_url=self.repo_url, test_path=self.test_path,
top_dir=self.top_dir, group_regex=self.group_regex,
black_regex=self.black_regex, filters=self.filters)
if kwargs:
ids = None
config = params.pop('config')
conf = config_file.TestrConf(config)
filters = params.pop('filters')
blacklist_file = params.pop('blacklist_file')
whitelist_file = params.pop('whitelist_file')
black_regex = params.pop('black_regex')
cmd = conf.get_run_command(
regexes=filters, repo_type=params['repo_type'],
repo_url=params['repo_url'], group_regex=params['group_regex'],
blacklist_file=blacklist_file, whitelist_file=whitelist_file,
black_regex=black_regex, test_path=params['test_path'],
not_filtered = filters is None and blacklist_file is None\
and whitelist_file is None and black_regex is None
# List tests if the fixture has not already needed to to filter.
if not_filtered:
ids = cmd.list_tests()
ids = cmd.test_ids
except SystemExit as ex:
raise RuntimeError("Error discovering test cases IDs with "
f"parameters: {params}") from ex
return sorted(ids)
FINDER = TestCasesFinder()
def discover_test_cases(finder=FINDER, **kwargs):
return finder.discover_test_cases(**kwargs)
class TestCasesManager(object):
def __init__(self):
self._test_cases: typing.List[testtools.TestCase] = []
def get_test_case(self) -> testtools.TestCase:
return self._test_cases[-1]
except IndexError:
def pop_test_case(self) -> testtools.TestCase:
return self._test_cases.pop()
def push_test_case(self, test_case: testtools.TestCase):
_exception.check_valid_type(test_case, testtools.TestCase)
TEST_CASES = TestCasesManager()
def push_test_case(test_case: testtools.TestCase,
manager: TestCasesManager = TEST_CASES):
return manager.push_test_case(test_case=test_case)
def pop_test_case(manager: TestCasesManager = TEST_CASES) -> \
return manager.pop_test_case()
def get_test_case(manager: TestCasesManager = TEST_CASES) -> \
return manager.get_test_case()
class DummyTestCase(testtools.TestCase):
def runTest(self):
DUMMY_TEST_CASE = DummyTestCase()
def run_test(test_case: testtools.TestCase,
test_result: testtools.TestResult = None) -> testtools.TestResult:
test_result = test_result or testtools.TestResult()
return test_result
def assert_in(needle, haystack, message: typing.Optional[str] = None,
manager: TestCasesManager = TEST_CASES):
get_test_case(manager=manager).assertIn(needle, haystack, message)
def get_skipped_test_cases(test_result: testtools.TestResult,
skip_reason: typing.Optional[str] = None):
if skip_reason is not None:
assert_in(skip_reason, test_result.skip_reasons)
return test_result.skip_reasons[skip_reason]
skipped_test_cases = list()
for cases in test_result.skip_reasons.values():
return skipped_test_cases
def assert_test_case_was_skipped(test_case: testtools.TestCase,
test_result: testtools.TestResult,
skip_reason: str = None,
manager: TestCasesManager = TEST_CASES):
skipped_tests = get_skipped_test_cases(test_result=test_result,
assert_in(test_case, skipped_tests, manager=manager)