Add benchmark for tempest. Part 2
Several new scenarios added in tempest benchmark: * scenario for launching all tests * scenario for launching all tests from given set(special set_name validator was added for this scenario) * scenario for launching all tests from given list of test names (Also, test coverage for validator `tempest_tests_exists` was increased for situations like in this scenario) * scenario for launching all tests which match given regex expression bp benchmark-scenarios-based-on-tempest Change-Id: I3e715cb360dec3d5d8683a9001c4f2221b49d1ac
This commit is contained in:
parent
e4258b1e3b
commit
769aeb366c
11
doc/samples/tasks/tempest/all_tests.json
Normal file
11
doc/samples/tasks/tempest/all_tests.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"TempestScenario.all": [
|
||||
{
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 1,
|
||||
"concurrency": 1
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
7
doc/samples/tasks/tempest/all_tests.yaml
Normal file
7
doc/samples/tasks/tempest/all_tests.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
TempestScenario.all:
|
||||
-
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
17
doc/samples/tasks/tempest/list_of_tests.json
Normal file
17
doc/samples/tasks/tempest/list_of_tests.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"TempestScenario.list_of_tests": [
|
||||
{
|
||||
"args": {
|
||||
"test_names": [
|
||||
"tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params",
|
||||
"tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status"
|
||||
]
|
||||
},
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 10,
|
||||
"concurrency": 1
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
11
doc/samples/tasks/tempest/list_of_tests.yaml
Normal file
11
doc/samples/tasks/tempest/list_of_tests.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
TempestScenario.list_of_tests:
|
||||
-
|
||||
args:
|
||||
test_names:
|
||||
- "tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params"
|
||||
- "tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 10
|
||||
concurrency: 1
|
12
doc/samples/tasks/tempest/set.json
Normal file
12
doc/samples/tasks/tempest/set.json
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"TempestScenario.set": [
|
||||
{
|
||||
"args": {"set_name": "image"},
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 1,
|
||||
"concurrency": 1
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
9
doc/samples/tasks/tempest/set.yaml
Normal file
9
doc/samples/tasks/tempest/set.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
TempestScenario.set:
|
||||
-
|
||||
args:
|
||||
set_name: "image"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
12
doc/samples/tasks/tempest/specific_regex.json
Normal file
12
doc/samples/tasks/tempest/specific_regex.json
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"TempestScenario.specific_regex": [
|
||||
{
|
||||
"args": {"regex": "^tempest.*image.*Server.*$"},
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 1,
|
||||
"concurrency": 1
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
9
doc/samples/tasks/tempest/specific_regex.yaml
Normal file
9
doc/samples/tasks/tempest/specific_regex.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
TempestScenario.specific_regex:
|
||||
-
|
||||
args:
|
||||
"regex": "^tempest.*image.*Server.*$"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
@ -13,7 +13,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally import exceptions
|
||||
@ -51,6 +54,12 @@ class Tempest(base.Context):
|
||||
|
||||
self.context["verifier"] = self.verifier
|
||||
|
||||
# Create temporary directory for xml-results.
|
||||
self.results_dir = os.path.join(
|
||||
tempfile.gettempdir(), "%s-results" % self.task.task.uuid)
|
||||
os.mkdir(self.results_dir)
|
||||
self.context["tmp_results_dir"] = self.results_dir
|
||||
|
||||
@utils.log_task_wrapper(LOG.info, _("Exit context: `tempest`"))
|
||||
def cleanup(self):
|
||||
try:
|
||||
@ -65,3 +74,4 @@ class Tempest(base.Context):
|
||||
cwd=self.verifier.tempest_path)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.error("Tempest cleanup failed.")
|
||||
shutil.rmtree(self.results_dir)
|
||||
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark.scenarios.tempest import utils
|
||||
from rally.benchmark import validation as valid
|
||||
from rally import consts
|
||||
|
||||
@ -22,13 +23,66 @@ class TempestScenario(base.Scenario):
|
||||
|
||||
@valid.add_validator(valid.tempest_tests_exists())
|
||||
@base.scenario(context={"tempest": {}})
|
||||
def single_test(self, test_name):
|
||||
@utils.tempest_log_wrapper
|
||||
def single_test(self, test_name, log_file):
|
||||
"""Launch a single test
|
||||
|
||||
:param test_name: name of tempest scenario for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
"""
|
||||
if (not test_name.startswith("tempest.api.")
|
||||
and test_name.split('.')[0] in consts.TEMPEST_TEST_SETS):
|
||||
test_name = "tempest.api." + test_name
|
||||
|
||||
self.context()["verifier"].run(test_name)
|
||||
self.context()["verifier"].run(test_name, log_file)
|
||||
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def all(self, log_file):
|
||||
"""Launch all discovered tests
|
||||
|
||||
:param log_file: name of file for junitxml results
|
||||
"""
|
||||
|
||||
self.context()["verifier"].run("", log_file)
|
||||
|
||||
@valid.add_validator(valid.tempest_set_exists())
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def set(self, set_name, log_file):
|
||||
"""Launch one by one methods from the set
|
||||
|
||||
:param set_name: set name of tempest scenarios for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
"""
|
||||
|
||||
if set_name == "full":
|
||||
testr_arg = ""
|
||||
elif set_name == "smoke":
|
||||
testr_arg = "smoke"
|
||||
else:
|
||||
testr_arg = "tempest.api.%s" % set_name
|
||||
|
||||
self._context["verifier"].run(testr_arg, log_file)
|
||||
|
||||
@valid.add_validator(valid.tempest_tests_exists())
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def list_of_tests(self, test_names, log_file):
|
||||
"""Launch all tests from given list
|
||||
|
||||
:param test_names: list of tempest scenarios for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
"""
|
||||
|
||||
self._context["verifier"].run(" ".join(test_names), log_file)
|
||||
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def specific_regex(self, regex, log_file):
|
||||
"""Launch all tests which match given regex
|
||||
|
||||
:param log_file: name of file for junitxml results
|
||||
"""
|
||||
|
||||
self._context["verifier"].run(regex, log_file)
|
||||
|
52
rally/benchmark/scenarios/tempest/utils.py
Normal file
52
rally/benchmark/scenarios/tempest/utils.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import six
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from rally import exceptions
|
||||
from rally.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
def tempest_log_wrapper(func):
|
||||
def inner_func(scenario_obj, *args, **kwargs):
|
||||
if "log_file" not in kwargs:
|
||||
# set temporary log file
|
||||
kwargs["log_file"] = os.path.join(
|
||||
scenario_obj.context()["tmp_results_dir"],
|
||||
os.path.basename(tempfile.NamedTemporaryFile().name))
|
||||
|
||||
# run target scenario
|
||||
try:
|
||||
func(scenario_obj, *args, **kwargs)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
# parse and save results
|
||||
total, tests = scenario_obj.context()["verifier"].parse_results(
|
||||
kwargs["log_file"])
|
||||
if total and tests:
|
||||
scenario_obj._add_atomic_actions("test_execution",
|
||||
total.get("time"))
|
||||
if total.get("errors") or total.get("failures"):
|
||||
raise exceptions.TempestBenchmarkFailure([
|
||||
test for test in six.itervalues(tests)
|
||||
if test["status"] == "FAIL"])
|
||||
else:
|
||||
raise exceptions.TempestBenchmarkFailure(_("No information"))
|
||||
|
||||
return inner_func
|
@ -224,7 +224,7 @@ def image_valid_on_flavor(flavor_name, image_name):
|
||||
def tempest_tests_exists():
|
||||
"""Returns validator for tempest test."""
|
||||
def tempest_test_exists_validator(**kwargs):
|
||||
verifier = tempest.Tempest(kwargs['task'].task.deployment_uuid)
|
||||
verifier = tempest.Tempest(kwargs["task"].task.deployment_uuid)
|
||||
if not verifier.is_installed():
|
||||
verifier.install()
|
||||
if not verifier.is_configured():
|
||||
@ -232,15 +232,15 @@ def tempest_tests_exists():
|
||||
|
||||
allowed_tests = verifier.discover_tests()
|
||||
|
||||
if 'test_name' in kwargs:
|
||||
tests = [kwargs['test_name']]
|
||||
if "test_name" in kwargs:
|
||||
tests = [kwargs["test_name"]]
|
||||
else:
|
||||
tests = kwargs['test_names']
|
||||
tests = kwargs["test_names"]
|
||||
|
||||
for test in tests:
|
||||
if (not test.startswith("tempest.api.")
|
||||
and test.split('.')[0] in consts.TEMPEST_TEST_SETS):
|
||||
tests[tests.index(test)] = 'tempest.api.' + test
|
||||
and test.split(".")[0] in consts.TEMPEST_TEST_SETS):
|
||||
tests[tests.index(test)] = "tempest.api." + test
|
||||
|
||||
wrong_tests = set(tests) - allowed_tests
|
||||
|
||||
@ -248,11 +248,23 @@ def tempest_tests_exists():
|
||||
return ValidationResult()
|
||||
else:
|
||||
message = (_("One or more tests not found: '%s'") %
|
||||
"', '".join(wrong_tests))
|
||||
"', '".join(sorted(wrong_tests)))
|
||||
return ValidationResult(False, message)
|
||||
return tempest_test_exists_validator
|
||||
|
||||
|
||||
def tempest_set_exists():
|
||||
"""Returns validator for tempest set."""
|
||||
def tempest_set_exists_validator(**kwargs):
|
||||
if kwargs["set_name"] not in consts.TEMPEST_TEST_SETS:
|
||||
message = _("Set name '%s' not found.") % kwargs["set_name"]
|
||||
return ValidationResult(False, message)
|
||||
else:
|
||||
return ValidationResult()
|
||||
|
||||
return tempest_set_exists_validator
|
||||
|
||||
|
||||
def required_parameters(params):
|
||||
"""Returns validator for required parameters
|
||||
|
||||
|
@ -215,6 +215,10 @@ class TempestSetupFailure(RallyException):
|
||||
msg_fmt = _("Unable to setup tempest: '%(message)s'")
|
||||
|
||||
|
||||
class TempestBenchmarkFailure(RallyException):
|
||||
msg_fmt = _("Failed tempest test(s): '%(message)s'")
|
||||
|
||||
|
||||
class BenchmarkSetupFailure(RallyException):
|
||||
msg_fmt = _("Unable to setup benchmark: '%(message)s'")
|
||||
|
||||
|
@ -171,11 +171,14 @@ class Tempest(object):
|
||||
print("Test set %s has been finished with error. "
|
||||
"Check log for details" % set_name)
|
||||
|
||||
def run(self, testr_arg=None):
|
||||
def run(self, testr_arg=None, log_file=None):
|
||||
"""Launch tempest with given arguments
|
||||
|
||||
:param testr_arg: argument which will be transmitted into testr
|
||||
:type testr_arg: str
|
||||
:param log_file: file name for junitxml results of tests. If not
|
||||
specified, value from "self.log_file" will be chosen.
|
||||
:type testr_arg: str
|
||||
|
||||
:raises: :class:`subprocess.CalledProcessError` if tests has been
|
||||
finished with error.
|
||||
@ -190,7 +193,7 @@ class Tempest(object):
|
||||
"venv": self.venv_wrapper,
|
||||
"arg": testr_arg,
|
||||
"tempest_path": self.tempest_path,
|
||||
"log_file": self.log_file
|
||||
"log_file": log_file or self.log_file
|
||||
})
|
||||
LOG.debug("Test(s) started by the command: %s" % test_cmd)
|
||||
subprocess.check_call(test_cmd, cwd=self.tempest_path,
|
||||
|
@ -32,11 +32,13 @@ class TempestContextTestCase(test.TestCase):
|
||||
task.task.deployment_uuid.return_value = "fake_uuid"
|
||||
self.context = {"task": task}
|
||||
|
||||
@mock.patch(CONTEXT + ".os.mkdir")
|
||||
@mock.patch(TEMPEST + ".Tempest.generate_config_file")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.install")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
def test_setup(self, mock_is_install, mock_install, mock_is_cfg, mock_cfg):
|
||||
def test_setup(self, mock_is_install, mock_install, mock_is_cfg, mock_cfg,
|
||||
mock_mkdir):
|
||||
mock_is_install.return_value = True
|
||||
mock_is_cfg.return_value = False
|
||||
|
||||
@ -48,11 +50,12 @@ class TempestContextTestCase(test.TestCase):
|
||||
self.assertEqual(1, mock_cfg.call_count)
|
||||
self.assertEqual('/dev/null', benchmark.verifier.log_file)
|
||||
|
||||
@mock.patch(CONTEXT + ".os.mkdir")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + ".Tempest.install")
|
||||
def test_setup_failure_on_tempest_installation(
|
||||
self, mock_install, mock_is_installed, mock_is_cfg):
|
||||
self, mock_install, mock_is_installed, mock_is_cfg, mock_mkdir):
|
||||
mock_is_installed.return_value = False
|
||||
mock_install.side_effect = exceptions.TempestSetupFailure()
|
||||
|
||||
@ -61,10 +64,12 @@ class TempestContextTestCase(test.TestCase):
|
||||
self.assertRaises(exceptions.BenchmarkSetupFailure, benchmark.setup)
|
||||
self.assertEqual(0, mock_is_cfg.call_count)
|
||||
|
||||
@mock.patch(CONTEXT + ".shutil")
|
||||
@mock.patch(CONTEXT + ".subprocess")
|
||||
def test_cleanup(self, mock_sp):
|
||||
def test_cleanup(self, mock_sp, mock_shutil):
|
||||
benchmark = tempest.Tempest(self.context)
|
||||
benchmark.verifier = mock.MagicMock()
|
||||
benchmark.results_dir = "/tmp/path"
|
||||
|
||||
benchmark.cleanup()
|
||||
|
||||
@ -73,3 +78,4 @@ class TempestContextTestCase(test.TestCase):
|
||||
(benchmark.verifier.tempest_path, benchmark.verifier.venv_wrapper),
|
||||
shell=True, cwd=benchmark.verifier.tempest_path,
|
||||
env=benchmark.verifier.env)
|
||||
mock_shutil.rmtree.assert_called_once_with("/tmp/path")
|
||||
|
@ -20,6 +20,7 @@ from rally.verification.verifiers.tempest import tempest as verifier
|
||||
from tests import test
|
||||
|
||||
VERIFIER = "rally.verification.verifiers.tempest.tempest"
|
||||
TS = "rally.benchmark.scenarios.tempest"
|
||||
|
||||
|
||||
class TempestScenarioTestCase(test.TestCase):
|
||||
@ -28,21 +29,71 @@ class TempestScenarioTestCase(test.TestCase):
|
||||
super(TempestScenarioTestCase, self).setUp()
|
||||
self.verifier = verifier.Tempest("fake_uuid")
|
||||
self.verifier.log_file = "/dev/null"
|
||||
self.context = {"verifier": self.verifier}
|
||||
self.verifier.parse_results = mock.MagicMock()
|
||||
self.verifier.parse_results.return_value = ({"fake": True},
|
||||
{"have_results": True})
|
||||
self.context = {"verifier": self.verifier,
|
||||
"tmp_results_dir": "/dev"}
|
||||
self.scenario = tempest.TempestScenario(self.context)
|
||||
self.scenario._add_atomic_actions = mock.MagicMock()
|
||||
|
||||
def get_tests_launcher_cmd(self, tests):
|
||||
return ("%(venv)s testr run --parallel --subunit %(tests)s "
|
||||
"| %(venv)s subunit2junitxml --forward --output-to=/dev/null "
|
||||
"| %(venv)s subunit-2to1 "
|
||||
"| %(venv)s %(tempest_path)s/tools/colorizer.py" %
|
||||
{
|
||||
"venv": self.verifier.venv_wrapper,
|
||||
"tempest_path": self.verifier.tempest_path,
|
||||
"tests": " ".join(tests)
|
||||
})
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
@mock.patch(VERIFIER + ".subprocess")
|
||||
def test_single_test(self, mock_sp):
|
||||
self.scenario.single_test("tempest.api.fake.test")
|
||||
expected_call = (
|
||||
"%(venv)s testr run --parallel --subunit tempest.api.fake.test "
|
||||
"| %(venv)s subunit2junitxml --forward --output-to=/dev/null "
|
||||
"| %(venv)s subunit-2to1 "
|
||||
"| %(venv)s %(tempest_path)s/tools/colorizer.py" %
|
||||
{
|
||||
"venv": self.verifier.venv_wrapper,
|
||||
"tempest_path": self.verifier.tempest_path
|
||||
})
|
||||
def test_single_test(self, mock_sp, mock_tmp):
|
||||
mock_tmp.NamedTemporaryFile().name = "/dev/null"
|
||||
fake_test = "tempest.api.fake.test"
|
||||
|
||||
self.scenario.single_test(test_name=fake_test)
|
||||
|
||||
expected_call = self.get_tests_launcher_cmd([fake_test])
|
||||
mock_sp.check_call.assert_called_once_with(
|
||||
expected_call, cwd=self.verifier.tempest_path,
|
||||
env=self.verifier.env, shell=True)
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
@mock.patch(VERIFIER + ".subprocess")
|
||||
def test_all(self, mock_sp, mock_tmp):
|
||||
mock_tmp.NamedTemporaryFile().name = "/dev/null"
|
||||
|
||||
self.scenario.all()
|
||||
|
||||
expected_call = self.get_tests_launcher_cmd([])
|
||||
mock_sp.check_call.assert_called_once_with(
|
||||
expected_call, cwd=self.verifier.tempest_path,
|
||||
env=self.verifier.env, shell=True)
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
@mock.patch(VERIFIER + ".subprocess")
|
||||
def test_set(self, mock_sp, mock_tmp):
|
||||
mock_tmp.NamedTemporaryFile().name = "/dev/null"
|
||||
|
||||
self.scenario.set("smoke")
|
||||
|
||||
expected_call = self.get_tests_launcher_cmd(["smoke"])
|
||||
mock_sp.check_call.assert_called_once_with(
|
||||
expected_call, cwd=self.verifier.tempest_path,
|
||||
env=self.verifier.env, shell=True)
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
@mock.patch(VERIFIER + ".subprocess")
|
||||
def test_list_of_tests(self, mock_sp, mock_tmp):
|
||||
mock_tmp.NamedTemporaryFile().name = "/dev/null"
|
||||
fake_tests = ["tempest.fake.test1", "tempest.fake.test2"]
|
||||
|
||||
self.scenario.list_of_tests(fake_tests)
|
||||
|
||||
expected_call = self.get_tests_launcher_cmd(fake_tests)
|
||||
mock_sp.check_call.assert_called_once_with(
|
||||
expected_call, cwd=self.verifier.tempest_path,
|
||||
env=self.verifier.env, shell=True)
|
||||
|
57
tests/benchmark/scenarios/tempest/test_utils.py
Normal file
57
tests/benchmark/scenarios/tempest/test_utils.py
Normal file
@ -0,0 +1,57 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from rally.benchmark.scenarios.tempest import tempest
|
||||
from rally.benchmark.scenarios.tempest import utils
|
||||
from tests import test
|
||||
|
||||
TS = "rally.benchmark.scenarios.tempest"
|
||||
|
||||
|
||||
class TempestLogWrappersTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TempestLogWrappersTestCase, self).setUp()
|
||||
verifier = mock.MagicMock()
|
||||
verifier.parse_results.return_value = ({"fake": True},
|
||||
{"have_results": True})
|
||||
|
||||
context = {"tmp_results_dir": "/tmp/dir", "verifier": verifier}
|
||||
self.scenario = tempest.TempestScenario(context)
|
||||
self.scenario._add_atomic_actions = mock.MagicMock()
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
def test_launch_without_specified_log_file(self, mock_tmp):
|
||||
mock_tmp.NamedTemporaryFile().name = "tmp_file"
|
||||
target_func = mock.MagicMock()
|
||||
func = utils.tempest_log_wrapper(target_func)
|
||||
|
||||
func(self.scenario)
|
||||
|
||||
target_func.assert_called_once_with(self.scenario,
|
||||
log_file="/tmp/dir/tmp_file")
|
||||
|
||||
@mock.patch(TS + ".utils.tempfile")
|
||||
def test_launch_with_specified_log_file(self, mock_tmp):
|
||||
target_func = mock.MagicMock()
|
||||
func = utils.tempest_log_wrapper(target_func)
|
||||
|
||||
func(self.scenario, log_file='log_file')
|
||||
|
||||
target_func.assert_called_once_with(self.scenario,
|
||||
log_file="log_file")
|
||||
self.assertEqual(0, mock_tmp.NamedTemporaryFile.call_count)
|
@ -20,11 +20,12 @@ from glanceclient import exc as glance_exc
|
||||
from novaclient import exceptions as nova_exc
|
||||
|
||||
from rally.benchmark import validation
|
||||
from rally.openstack.common.gettextutils import _
|
||||
from tests import fakes
|
||||
from tests import test
|
||||
|
||||
|
||||
TEMPEST = 'rally.verification.verifiers.tempest.tempest'
|
||||
TEMPEST = "rally.verification.verifiers.tempest.tempest"
|
||||
|
||||
|
||||
class ValidationUtilsTestCase(test.TestCase):
|
||||
@ -279,34 +280,100 @@ class ValidationUtilsTestCase(test.TestCase):
|
||||
self.assertFalse(result.is_valid)
|
||||
self.assertEqual(result.msg, "Flavor with id '101' not found")
|
||||
|
||||
@mock.patch(TEMPEST + '.Tempest.is_configured')
|
||||
@mock.patch(TEMPEST + '.Tempest.is_installed')
|
||||
@mock.patch(TEMPEST + '.subprocess')
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + ".subprocess")
|
||||
def test_tempest_test_name_not_valid(self, mock_sp, mock_install,
|
||||
mock_config):
|
||||
mock_sp.Popen().communicate.return_value = (
|
||||
'tempest.api.fake_test1[gate]\ntempest.api.fate_test2\n',)
|
||||
"tempest.api.fake_test1[gate]\ntempest.api.fate_test2\n",)
|
||||
mock_install.return_value = True
|
||||
mock_config.return_value = True
|
||||
|
||||
validator = validation.tempest_tests_exists()
|
||||
result = validator(test_name='no_valid_test_name',
|
||||
result = validator(test_name="no_valid_test_name",
|
||||
task=mock.MagicMock())
|
||||
self.assertFalse(result.is_valid)
|
||||
self.assertEqual("One or more tests not found: 'no_valid_test_name'",
|
||||
result.msg)
|
||||
|
||||
@mock.patch(TEMPEST + '.Tempest.is_configured')
|
||||
@mock.patch(TEMPEST + '.Tempest.is_installed')
|
||||
@mock.patch(TEMPEST + '.subprocess')
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + ".subprocess")
|
||||
def test_tempest_test_name_valid(self, mock_sp, mock_install, mock_config):
|
||||
mock_sp.Popen().communicate.return_value = (
|
||||
'tempest.api.compute.fake_test1[gate]\n'
|
||||
'tempest.api.image.fake_test2\n',)
|
||||
"tempest.api.compute.fake_test1[gate]\n"
|
||||
"tempest.api.image.fake_test2\n",)
|
||||
mock_install.return_value = True
|
||||
mock_config.return_value = True
|
||||
|
||||
validator = validation.tempest_tests_exists()
|
||||
result = validator(test_name='image.fake_test2', task=mock.MagicMock())
|
||||
result = validator(test_name="image.fake_test2", task=mock.MagicMock())
|
||||
|
||||
self.assertTrue(result.is_valid)
|
||||
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + ".subprocess")
|
||||
def test_tempest_test_names_one_invalid(self, mock_sp, mock_install,
|
||||
mock_config):
|
||||
mock_sp.Popen().communicate.return_value = ('\n'.join([
|
||||
"tempest.api.fake_test1[gate]",
|
||||
"tempest.api.fake_test2",
|
||||
"tempest.api.fake_test3[gate,smoke]",
|
||||
"tempest.api.fate_test4[fake]"]),)
|
||||
mock_install.return_value = True
|
||||
mock_config.return_value = True
|
||||
|
||||
validator = validation.tempest_tests_exists()
|
||||
result = validator(test_names=["tempest.api.fake_test2",
|
||||
"tempest.api.invalid.test"],
|
||||
task=mock.MagicMock())
|
||||
|
||||
self.assertFalse(result.is_valid)
|
||||
self.assertEqual(_("One or more tests not found: '%s'") %
|
||||
"tempest.api.invalid.test", result.msg)
|
||||
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + ".subprocess")
|
||||
def test_tempest_test_names_all_invalid(self, mock_sp, mock_install,
|
||||
mock_config):
|
||||
mock_sp.Popen().communicate.return_value = ("\n".join([
|
||||
"tempest.api.fake_test1[gate]",
|
||||
"tempest.api.fake_test2",
|
||||
"tempest.api.fake_test3[gate,smoke]",
|
||||
"tempest.api.fate_test4[fake]"]),)
|
||||
mock_install.return_value = True
|
||||
mock_config.return_value = True
|
||||
|
||||
validator = validation.tempest_tests_exists()
|
||||
result = validator(test_names=["tempest.api.invalid.test1",
|
||||
"tempest.api.invalid.test2"],
|
||||
task=mock.MagicMock())
|
||||
|
||||
self.assertFalse(result.is_valid)
|
||||
self.assertEqual(
|
||||
_("One or more tests not found: '%s'") %
|
||||
"tempest.api.invalid.test1', 'tempest.api.invalid.test2",
|
||||
result.msg)
|
||||
|
||||
@mock.patch(TEMPEST + ".Tempest.is_configured")
|
||||
@mock.patch(TEMPEST + ".Tempest.is_installed")
|
||||
@mock.patch(TEMPEST + '.subprocess')
|
||||
def test_tempest_test_names_all_valid(self, mock_sp, mock_install,
|
||||
mock_config):
|
||||
mock_sp.Popen().communicate.return_value = ("\n".join([
|
||||
"tempest.api.fake_test1[gate]",
|
||||
"tempest.api.fake_test2",
|
||||
"tempest.api.fake_test3[gate,smoke]",
|
||||
"tempest.api.fate_test4[fake]"]),)
|
||||
mock_install.return_value = True
|
||||
mock_config.return_value = True
|
||||
|
||||
validator = validation.tempest_tests_exists()
|
||||
result = validator(test_names=["tempest.api.fake_test1",
|
||||
"tempest.api.fake_test2"],
|
||||
task=mock.MagicMock())
|
||||
|
||||
self.assertTrue(result.is_valid)
|
||||
|
Loading…
x
Reference in New Issue
Block a user