Test engine & Test cases
This patch continues the work on the Test engine (which was started in the previous one). It introduces several bugfixes and improvements to the implementation of the test engine and also provides it with a test case.
This commit is contained in:
parent
5274270dec
commit
a0035ac49d
@ -21,6 +21,7 @@ import string
|
||||
|
||||
from rally.benchmark import tests
|
||||
from rally.benchmark import utils
|
||||
from rally import exceptions
|
||||
|
||||
import ConfigParser
|
||||
|
||||
@ -47,18 +48,18 @@ class TestEngine(object):
|
||||
|
||||
:param test_config: {
|
||||
'verify': ['sanity', 'snapshot', 'smoke'],
|
||||
'benchmark': {[
|
||||
'benchmark': [
|
||||
{'method1': {'args': [...], 'times': 1,
|
||||
'concurrency': 1}},
|
||||
{'method2': {'args': [...], 'times': 2,
|
||||
'concurrency': 4}},
|
||||
]},
|
||||
],
|
||||
}
|
||||
"""
|
||||
self._verify_test_config(test_config)
|
||||
self.test_config = test_config
|
||||
|
||||
def _verify_test_config(test_config):
|
||||
def _verify_test_config(self, test_config):
|
||||
"""Verifies and possibly modifies the given test config so that it can
|
||||
be used during verification and benchmarking tests.
|
||||
|
||||
@ -68,27 +69,26 @@ class TestEngine(object):
|
||||
:raises: Exception if the test config is not valid
|
||||
"""
|
||||
if 'verify' in test_config:
|
||||
for verification_test in test_config['verify']:
|
||||
if verification_test not in tests.verification_tests:
|
||||
# TODO(msdubov): Raise some special Exception class here?
|
||||
raise Exception('Unknown verification test: %s' %
|
||||
verification_test)
|
||||
for test_name in test_config['verify']:
|
||||
if test_name not in tests.verification_tests:
|
||||
raise exceptions.NoSuchTestException(test_name=test_name)
|
||||
else:
|
||||
# NOTE(msdubov): if 'verify' not specified, run all verification
|
||||
# tests by default.
|
||||
test_config['verify'] = tests.verification_tests.keys()
|
||||
# TODO(msdubov): Also verify the 'benchmark' part of the config here.
|
||||
|
||||
def _write_temporary_config(config, config_path):
|
||||
def _write_temporary_config(self, config, config_path):
|
||||
cp = ConfigParser.RawConfigParser()
|
||||
for section in config.iterkeys():
|
||||
cp.add_section(section)
|
||||
for option in config[section].iterkeys():
|
||||
value = config[section][option]
|
||||
cp.set(section, option, value)
|
||||
with open(config_path, 'w') as f:
|
||||
cp.write(f)
|
||||
|
||||
def _delete_temporary_config(config, config_path):
|
||||
def _delete_temporary_config(self, config_path):
|
||||
os.remove(config_path)
|
||||
|
||||
def _random_file_path(self):
|
||||
@ -129,11 +129,18 @@ class TestEngine(object):
|
||||
return self
|
||||
|
||||
def verify(self):
|
||||
"""Runs OSTF tests to verify the current cloud deployment."""
|
||||
"""Runs OSTF tests to verify the current cloud deployment.
|
||||
|
||||
:returns: True if all tests have passed; False otherwise
|
||||
"""
|
||||
tester = utils.Tester(self.cloud_config_path)
|
||||
tester.tests = [tests.verification_tests[test_name]
|
||||
for test_name in self.test_config['verify']]
|
||||
tester.run_all()
|
||||
verification_tests = [tests.verification_tests[test_name]
|
||||
for test_name in self.test_config['verify']]
|
||||
for test_results in tester.run_all(verification_tests):
|
||||
for result in test_results.itervalues():
|
||||
if result['status'] != 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
def benchmark(self):
|
||||
"""Runs the benchmarks according to the test configuration
|
||||
|
@ -84,6 +84,14 @@ class ImmutableException(RallyException):
|
||||
msg_fmt = _("This object is immutable.")
|
||||
|
||||
|
||||
class InvalidConfigException(RallyException):
|
||||
msg_fmt = _("This config is invalid")
|
||||
|
||||
|
||||
class NoSuchTestException(InvalidConfigException):
|
||||
msg_fmt = _("No such test: `%(test_name)s`.")
|
||||
|
||||
|
||||
class NotFoundException(RallyException):
|
||||
msg_fmt = _("Not found.")
|
||||
|
||||
|
77
tests/benchmark/test_test_engine.py
Normal file
77
tests/benchmark/test_test_engine.py
Normal file
@ -0,0 +1,77 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for the Test engine."""
|
||||
import mock
|
||||
import os
|
||||
|
||||
from rally.benchmark import engine
|
||||
from rally import exceptions
|
||||
from rally import test
|
||||
|
||||
|
||||
class TestEngineTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestEngineTestCase, self).setUp()
|
||||
self.valid_test_config = {
|
||||
'verify': ['sanity', 'smoke'],
|
||||
'benchmark': []
|
||||
}
|
||||
self.invalid_test_config = {
|
||||
'verify': ['sanity', 'some_not_existing_test'],
|
||||
'benchmark': []
|
||||
}
|
||||
self.valid_cloud_config = {
|
||||
'identity': {
|
||||
'admin_name': 'admin',
|
||||
'admin_password': 'admin'
|
||||
},
|
||||
'compute': {
|
||||
'controller_nodes': 'localhost'
|
||||
}
|
||||
}
|
||||
run_success = {
|
||||
'proc': {'msg': 'msg', 'status': 0, 'proc_name': 'proc'}
|
||||
}
|
||||
self.run_mock = mock.patch('rally.benchmark.utils.Tester.run',
|
||||
mock.Mock(return_value=run_success))
|
||||
self.run_mock.start()
|
||||
|
||||
def tearDown(self):
|
||||
self.run_mock.stop()
|
||||
super(TestEngineTestCase, self).tearDown()
|
||||
|
||||
def test_verify_test_config(self):
|
||||
try:
|
||||
engine.TestEngine(self.valid_test_config)
|
||||
except Exception as e:
|
||||
self.fail("Unexpected exception in test config" +
|
||||
"verification: %s" % str(e))
|
||||
self.assertRaises(exceptions.NoSuchTestException,
|
||||
engine.TestEngine, self.invalid_test_config)
|
||||
|
||||
def test_bind(self):
|
||||
test_engine = engine.TestEngine(self.valid_test_config)
|
||||
with test_engine.bind(self.valid_cloud_config):
|
||||
self.assertTrue(os.path.exists(test_engine.cloud_config_path))
|
||||
self.assertFalse(os.path.exists(test_engine.cloud_config_path))
|
||||
|
||||
def test_verify(self):
|
||||
test_engine = engine.TestEngine(self.valid_test_config)
|
||||
with test_engine.bind(self.valid_cloud_config):
|
||||
self.assertTrue(test_engine.verify())
|
@ -15,7 +15,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for benchmarks."""
|
||||
"""Tests for utils."""
|
||||
import mock
|
||||
|
||||
from rally.benchmark import utils
|
||||
@ -30,19 +30,19 @@ def test_dummy_2():
|
||||
pass
|
||||
|
||||
|
||||
class BenchmarkTestCase(test.NoDBTestCase):
|
||||
class UtilsTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(BenchmarkTestCase, self).setUp()
|
||||
super(UtilsTestCase, self).setUp()
|
||||
self.fc = mock.patch('fuel_health.cleanup.cleanup')
|
||||
self.fc.start()
|
||||
|
||||
def tearDown(self):
|
||||
self.fc.stop()
|
||||
super(BenchmarkTestCase, self).tearDown()
|
||||
super(UtilsTestCase, self).tearDown()
|
||||
|
||||
def test_running_test(self):
|
||||
tester = utils.Tester('rally/benchmark/test.conf')
|
||||
test = ['./tests/benchmark/test_benchmark.py', '-k', 'test_dummy']
|
||||
test = ['./tests/benchmark/test_utils.py', '-k', 'test_dummy']
|
||||
for result in tester.run(test, times=1, concurrent=1).itervalues():
|
||||
self.assertEqual(result['status'], 0)
|
||||
for result in tester.run(test, times=3, concurrent=2).itervalues():
|
||||
@ -52,8 +52,8 @@ class BenchmarkTestCase(test.NoDBTestCase):
|
||||
|
||||
def test_running_multiple_tests(self):
|
||||
tester = utils.Tester('rally/benchmark/test.conf')
|
||||
tests = [['./tests/benchmark/test_benchmark.py', '-k', 'test_dummy'],
|
||||
['./tests/benchmark/test_benchmark.py', '-k', 'test_dummy_2']]
|
||||
tests = [['./tests/benchmark/test_utils.py', '-k', 'test_dummy'],
|
||||
['./tests/benchmark/test_utils.py', '-k', 'test_dummy_2']]
|
||||
for test_results in tester.run_all(tests):
|
||||
for result in test_results.itervalues():
|
||||
self.assertEqual(result['status'], 0)
|
Loading…
Reference in New Issue
Block a user