Fix unit tests to detect import failures
This commit adds onto the existing unit test which simply runs 'testr list-tests' to run discovery on tempest. This is done to ensure that from a bare tempest repo with the requirements all present we can actually import and list all the tests. However, there wasn't a check of the return code. Historically this has been an issue with discovery failing so adding a check to enforce it moving forward should prevent it from slipping back in. Part of this is to also fix the test so that it runs discovery on the correct test suite. In order to make the unit test pass a config dependency in the scenario test test_basic_server_ops during the testscenarios call in load_tests had to be removed. Instead if there is a configuration error raised while running load tests on this module instead of failing it will just return the base test class. This is fine because the test itself will fail on the same configuration dependencies that load_tests is, however it won't happen silently during discovery anymore. Change-Id: Ie74c020de7c9c27adc4ff68dddbae5d4481d1224changes/10/121910/8
parent
912cb93310
commit
a0f820f372
|
@ -25,6 +25,7 @@ from tempest import auth
|
|||
from tempest import clients
|
||||
from tempest.common.utils import misc
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
@ -151,9 +152,12 @@ def load_tests_input_scenario_utils(*args):
|
|||
loader, standard_tests, pattern = args
|
||||
else:
|
||||
standard_tests, module, loader = args
|
||||
scenario_utils = InputScenarioUtils()
|
||||
scenario_flavor = scenario_utils.scenario_flavors
|
||||
scenario_image = scenario_utils.scenario_images
|
||||
try:
|
||||
scenario_utils = InputScenarioUtils()
|
||||
scenario_flavor = scenario_utils.scenario_flavors
|
||||
scenario_image = scenario_utils.scenario_images
|
||||
except exceptions.InvalidConfiguration:
|
||||
return standard_tests
|
||||
for test in testtools.iterate_tests(standard_tests):
|
||||
setattr(test, 'scenarios', testscenarios.multiply_scenarios(
|
||||
scenario_image,
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
@ -20,16 +21,23 @@ from tempest.tests import base
|
|||
|
||||
class TestTestList(base.TestCase):
|
||||
|
||||
def test_no_import_errors(self):
|
||||
def test_testr_list_tests_no_errors(self):
|
||||
# Remove unit test discover path from env to test tempest tests
|
||||
test_env = os.environ.copy()
|
||||
test_env.pop('OS_TEST_PATH')
|
||||
import_failures = []
|
||||
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
|
||||
ids = p.stdout.read()
|
||||
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
|
||||
env=test_env)
|
||||
ids, err = p.communicate()
|
||||
self.assertEqual(0, p.returncode,
|
||||
"test discovery failed, one or more files cause an "
|
||||
"error on import")
|
||||
ids = ids.split('\n')
|
||||
for test_id in ids:
|
||||
if re.match('(\w+\.){3}\w+', test_id):
|
||||
if not test_id.startswith('tempest.'):
|
||||
fail_id = test_id.split('unittest.loader.ModuleImport'
|
||||
'Failure.')[1]
|
||||
parts = test_id.partition('tempest')
|
||||
fail_id = parts[1] + parts[2]
|
||||
import_failures.append(fail_id)
|
||||
error_message = ("The following tests have import failures and aren't"
|
||||
" being run with test filters %s" % import_failures)
|
||||
|
|
Loading…
Reference in New Issue