Improve CLI functional tests
We fix a bug connected to wrong task configuration used in CLI tests for tasks. We also refactor the tests a bit to make them more readable. CLI functional tests have been moved to a separate directory, and the tests_ci/ directory has been supplied with a README file. Co-authored-by: Sergey Skripnick<sskripnick@mirantis.com> Change-Id: I927c91f119765f27ee6b8b7c98d4156c02700102 Closes-Bug: 1374407
This commit is contained in:
parent
f171cfe236
commit
543a7900f5
15
tests_ci/README.rst
Normal file
15
tests_ci/README.rst
Normal file
@ -0,0 +1,15 @@
|
||||
===============
|
||||
Rally Gate Jobs
|
||||
===============
|
||||
|
||||
For each patch submitted for review on Gerrit, there is a set of tests called **gate jobs** to be run against it. These tests check whether the Rally code works correctly after applying the patch and provide additional guarantees that it won't break the software when it gets merged. Rally gate jobs contain tests checking the codestyle (via *pep8*), unit tests suites, functional tests and a set of Rally benchmark tasks that are executed against a real *devstack* deployment.
|
||||
|
||||
|
||||
rally-gate.sh
|
||||
-------------
|
||||
This script runs a set of real Rally benchmark tasks and fetches their results in textual / visualized form (available via a special html page by clicking the corresponding job title in Gerrit). It checks that scenarios don't fail while being executed against a devstack deployment and also tests SLA criteria to ensure that benchmark tasks have completed successfully.
|
||||
|
||||
|
||||
rally-integrated.sh
|
||||
-------------------
|
||||
This script runs a functional tests suite for Rally CLI. The tests call a range of Rally CLI commands and check that their output contains the expected data.
|
@ -3,7 +3,7 @@
|
||||
env
|
||||
|
||||
mkdir -p .testrepository
|
||||
python -m subunit.run discover tests_ci > .testrepository/subunit.log
|
||||
python -m subunit.run discover tests_functional > .testrepository/subunit.log
|
||||
EXIT_CODE=$?
|
||||
|
||||
subunit2pyunit < .testrepository/subunit.log
|
||||
|
@ -35,18 +35,26 @@ class DeploymentTestCase(unittest.TestCase):
|
||||
self.rally("deployment endpoint"))
|
||||
|
||||
def test_create_fromfile(self):
|
||||
fake_d_conf = "/tmp/.tmp.deployment"
|
||||
self.rally("deployment create --name t_create_file --filename %s"
|
||||
% fake_d_conf)
|
||||
with mock.patch.dict("os.environ", utils.TEST_ENV):
|
||||
self.rally("deployment create --name t_create_env --fromenv")
|
||||
with open("/tmp/.tmp.deployment", "w") as f:
|
||||
f.write(self.rally("deployment config"))
|
||||
self.rally("deployment create --name t_create_file "
|
||||
"--filename /tmp/.tmp.deployment")
|
||||
self.assertIn("t_create_file", self.rally("deployment list"))
|
||||
|
||||
def test_config(self):
|
||||
fake_d_conf = "/tmp/.tmp.deployment"
|
||||
self.rally("deployment create --name t_create_file --filename %s"
|
||||
% fake_d_conf)
|
||||
with open(fake_d_conf, "r") as conf:
|
||||
self.assertDictEqual(json.loads(conf.read()),
|
||||
json.loads(self.rally("deployment config")))
|
||||
with mock.patch.dict("os.environ", utils.TEST_ENV):
|
||||
self.rally("deployment create --name t_create_env --fromenv")
|
||||
config = json.loads(self.rally("deployment config"))
|
||||
self.assertEqual(utils.TEST_ENV["OS_USERNAME"],
|
||||
config["admin"]["username"])
|
||||
self.assertEqual(utils.TEST_ENV["OS_PASSWORD"],
|
||||
config["admin"]["password"])
|
||||
self.assertEqual(utils.TEST_ENV["OS_TENANT_NAME"],
|
||||
config["admin"]["tenant_name"])
|
||||
self.assertEqual(utils.TEST_ENV["OS_AUTH_URL"],
|
||||
config["auth_url"])
|
||||
|
||||
def test_destroy(self):
|
||||
with mock.patch.dict("os.environ", utils.TEST_ENV):
|
@ -17,6 +17,7 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
import test_cli_utils as utils
|
||||
|
||||
|
||||
@ -26,9 +27,6 @@ class TaskTestCase(unittest.TestCase):
|
||||
return {
|
||||
"Dummy.dummy_random_fail_in_atomic": [
|
||||
{
|
||||
"args": {
|
||||
"name_length": 10
|
||||
},
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 100,
|
||||
@ -125,9 +123,11 @@ class SLATestCase(unittest.TestCase):
|
||||
expected = [
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_seconds_per_iteration",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "success": True},
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_failure_percent",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "success": True},
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
@ -66,7 +66,7 @@ class Rally(object):
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, fake=False):
|
||||
# NOTE(sskripnick): we shoud change home dir to avoid races
|
||||
# and do not touch any user files in ~/.rally
|
||||
os.environ["HOME"] = pwd.getpwuid(os.getuid()).pw_dir
|
||||
@ -83,16 +83,6 @@ class Rally(object):
|
||||
subprocess.call(["rally-manage", "--config-file", config_filename,
|
||||
"db", "recreate"])
|
||||
self("deployment create --file /tmp/.rd.json --name MAIN")
|
||||
with open("/tmp/.tmp.deployment", "w") as d_conf:
|
||||
d_conf.write(
|
||||
"""{
|
||||
"type": "ExistingCloud",
|
||||
"auth_url": "http://fake/",
|
||||
"admin": {
|
||||
"username": "admin",
|
||||
"password": "admin",
|
||||
"tenant_name": "admin"
|
||||
}\n}""")
|
||||
|
||||
def __del__(self):
|
||||
shutil.rmtree(self.tmp_dir)
|
Loading…
Reference in New Issue
Block a user