Benchmark test framework for Rally
This patch introduces the benchmark test framework for rally. The presented framework relies heavily on the usage of test configuration files which allow to set up which tests should be run in which order and how many times, as well as parameterize them (e.g., to configure, say, the number of compute nodes used in load testing). The test configuration also allows to set whether the benchmark tests should be launched concurrently in several threads. Test configuration processing is provided by the further development of TestEngine, which now also can validate the test configuration passed to its constructor. The patch also contains some refactoring of the Tester class which makes it configurable through the test configuration. This functionality is supported by the TestConfigManager class, another child class of the base ConfigManager defined in one of the previous patches. Blueprint test-engine Change-Id: Idd1296537d5a65c0b353ee9812d2e360d3c8ea39
This commit is contained in:
@@ -17,17 +17,30 @@
|
||||
|
||||
import abc
|
||||
import ConfigParser
|
||||
import json
|
||||
|
||||
|
||||
class ConfigManager(ConfigParser.RawConfigParser, object):
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
@abc.abstractmethod
|
||||
def to_dict(self):
|
||||
pass
|
||||
def __init__(self, config=None):
|
||||
super(ConfigManager, self).__init__()
|
||||
if config:
|
||||
if isinstance(config, basestring):
|
||||
self.read(config)
|
||||
elif isinstance(config, dict):
|
||||
self.read_from_dict(config)
|
||||
|
||||
def read_from_dict(self, dct, transform=str, replace=False):
|
||||
for section_name, section in dct.iteritems():
|
||||
if not self.has_section(section_name):
|
||||
self.add_section(section_name)
|
||||
for opt in section:
|
||||
if not self.has_option(section_name, opt) or replace:
|
||||
self.set(section_name, opt, transform(section[opt]))
|
||||
|
||||
@abc.abstractmethod
|
||||
def read_from_dict(self):
|
||||
def to_dict(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -98,18 +111,69 @@ class CloudConfigManager(ConfigManager):
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super(CloudConfigManager, self).__init__()
|
||||
def __init__(self, config=None):
|
||||
super(CloudConfigManager, self).__init__(config)
|
||||
self.read_from_dict(self._DEFAULT_CLOUD_CONFIG)
|
||||
|
||||
def read_from_dict(self, dct):
|
||||
for section_name, section in dct.iteritems():
|
||||
self.add_section(section_name)
|
||||
for option in section:
|
||||
self.set(section_name, option, section[option])
|
||||
|
||||
def to_dict(self):
|
||||
res = {}
|
||||
for section in self.sections():
|
||||
res[section] = dict(self.items(section))
|
||||
return res
|
||||
|
||||
|
||||
class TestConfigManager(ConfigManager):
|
||||
|
||||
def read_from_dict(self, dct):
|
||||
super(TestConfigManager, self).read_from_dict(dct, json.dumps)
|
||||
|
||||
def to_dict(self):
|
||||
res = {}
|
||||
for section in self.sections():
|
||||
# NOTE(msdubov): test configs contain json strings as their values.
|
||||
parsed_items = map(lambda (opt, val): (opt, json.loads(val)),
|
||||
self.items(section))
|
||||
res[section] = dict(parsed_items)
|
||||
return res
|
||||
|
||||
test_config_schema = {
|
||||
"type": "object",
|
||||
"$schema": "http://json-schema.org/draft-03/schema",
|
||||
"properties": {
|
||||
"verify": {
|
||||
"type": "object",
|
||||
"properties": {"tests_to_run": {"type": "array"}},
|
||||
"additionalProperties": False
|
||||
},
|
||||
"benchmark": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tests_to_run": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".*": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"args": {"type": "object"},
|
||||
"times": {"type": "number"},
|
||||
"concurrent": {"type": "number"}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tests_setUp": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".*": {"type": "object"},
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
@@ -15,21 +15,22 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import jsonschema
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
|
||||
from rally.benchmark import config
|
||||
from rally.benchmark import tests
|
||||
from rally.benchmark import utils
|
||||
from rally import exceptions
|
||||
|
||||
import ConfigParser
|
||||
|
||||
|
||||
class TestEngine(object):
|
||||
"""The test engine class, an instance of which is initialized by the
|
||||
Orchestrator with the test configuration and then is used to launch OSTF
|
||||
tests, to benchmark the deployment and finally to process the results.
|
||||
tests and to benchmark the deployment.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -37,70 +38,102 @@ class TestEngine(object):
|
||||
...
|
||||
test = TestEngine(test_config)
|
||||
# Deploying the cloud...
|
||||
with test.bind(deployment_config):
|
||||
with test.bind(cloud_config):
|
||||
test.verify()
|
||||
test.benchmark()
|
||||
test.process_results()
|
||||
"""
|
||||
|
||||
def __init__(self, test_config):
|
||||
"""TestEngine constructor.
|
||||
|
||||
:param test_config: {
|
||||
'verify': ['sanity', 'snapshot', 'smoke'],
|
||||
'benchmark': [
|
||||
{'method1': {'args': [...], 'times': 1,
|
||||
'concurrency': 1}},
|
||||
{'method2': {'args': [...], 'times': 2,
|
||||
'concurrency': 4}},
|
||||
],
|
||||
}
|
||||
:param test_config: Dictionary of form {
|
||||
"verify": {
|
||||
"tests_to_run": ["sanity", "snapshot", "smoke"]
|
||||
},
|
||||
"benchmark": {
|
||||
"tests_setUp": {
|
||||
"nova.server_metadata": {"servers_to_boot": 10}
|
||||
}
|
||||
"tests_to_run": {
|
||||
"nova.server_metadata.test_set_and_delete_meta": [
|
||||
{"args": {"amount": 5}, "times": 1, "concurrent": 1},
|
||||
{"args": {"amount": 10}, "times": 4, "concurrent": 2}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
self._verify_test_config(test_config)
|
||||
self.test_config = test_config
|
||||
self._validate_test_config(test_config)
|
||||
test_config = self._format_test_config(test_config)
|
||||
self.test_config = config.TestConfigManager(test_config)
|
||||
|
||||
def _verify_test_config(self, test_config):
|
||||
"""Verifies and possibly modifies the given test config so that it can
|
||||
be used during verification and benchmarking tests.
|
||||
def _validate_test_config(self, test_config):
|
||||
"""Checks whether the given test config is valid and can be used during
|
||||
verification and benchmarking tests.
|
||||
|
||||
:param test_config: Dictionary in the same format as for the __init__
|
||||
method.
|
||||
|
||||
:raises: Exception if the test config is not valid
|
||||
"""
|
||||
if 'verify' in test_config:
|
||||
for test_name in test_config['verify']:
|
||||
if test_name not in tests.verification_tests:
|
||||
raise exceptions.NoSuchTestException(test_name=test_name)
|
||||
else:
|
||||
# NOTE(msdubov): if 'verify' not specified, run all verification
|
||||
# tests by default.
|
||||
test_config['verify'] = tests.verification_tests.keys()
|
||||
# TODO(msdubov): Also verify the 'benchmark' part of the config here.
|
||||
# Perform schema verification
|
||||
try:
|
||||
jsonschema.validate(test_config, config.test_config_schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
raise exceptions.InvalidConfigException(message=e.message)
|
||||
|
||||
def _write_temporary_config(self, config, config_path):
|
||||
cp = ConfigParser.RawConfigParser()
|
||||
for section in config.iterkeys():
|
||||
cp.add_section(section)
|
||||
for option in config[section].iterkeys():
|
||||
value = config[section][option]
|
||||
cp.set(section, option, value)
|
||||
with open(config_path, 'w') as f:
|
||||
cp.write(f)
|
||||
# Check for test names
|
||||
for test_type in ['verify', 'benchmark']:
|
||||
if (test_type not in test_config or
|
||||
'tests_to_run' not in test_config[test_type]):
|
||||
continue
|
||||
for test in test_config[test_type]['tests_to_run']:
|
||||
if test not in tests.tests[test_type]:
|
||||
raise exceptions.NoSuchTestException(test_name=test)
|
||||
|
||||
def _format_test_config(self, test_config):
|
||||
"""Returns a formatted copy of the given valid test config so that
|
||||
it can be used during verification and benchmarking tests.
|
||||
|
||||
:param test_config: Dictionary in the same format as for the __init__
|
||||
method.
|
||||
|
||||
:returns: Dictionary
|
||||
"""
|
||||
formatted_test_config = copy.deepcopy(test_config)
|
||||
# NOTE(msdubov): if 'verify' or 'benchmark' tests are not specified,
|
||||
# run them all by default.
|
||||
if ('verify' not in formatted_test_config or
|
||||
'tests_to_run' not in formatted_test_config['verify']):
|
||||
formatted_test_config['verify'] = {
|
||||
'tests_to_run': tests.verification_tests.keys()
|
||||
}
|
||||
if ('benchmark' not in formatted_test_config or
|
||||
'tests_to_run' not in formatted_test_config['benchmark']):
|
||||
tests_to_run = dict((test_name, [{}])
|
||||
for test_name in tests.benchmark_tests.keys())
|
||||
formatted_test_config['benchmark'] = {
|
||||
'tests_to_run': tests_to_run
|
||||
}
|
||||
return formatted_test_config
|
||||
|
||||
def _delete_temporary_config(self, config_path):
|
||||
os.remove(config_path)
|
||||
|
||||
def _random_file_path(self):
|
||||
def _generate_temporary_file_path(self):
|
||||
file_name = ''.join(random.choice(string.letters) for i in xrange(16))
|
||||
file_path = 'rally/benchmark/temp/'
|
||||
return os.path.abspath(file_path + file_name)
|
||||
|
||||
def __enter__(self):
|
||||
self._write_temporary_config(self.cloud_config, self.cloud_config_path)
|
||||
with open(self.cloud_config_path, 'w') as f:
|
||||
self.cloud_config.write(f)
|
||||
with open(self.test_config_path, 'w') as f:
|
||||
self.test_config.write(f)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self._delete_temporary_config(self.cloud_config_path)
|
||||
os.remove(self.cloud_config_path)
|
||||
os.remove(self.test_config_path)
|
||||
|
||||
def bind(self, cloud_config):
|
||||
"""Binds an existing deployment configuration to the test engine.
|
||||
@@ -109,23 +142,17 @@ class TestEngine(object):
|
||||
passed as a two-level dictionary: the top-level
|
||||
keys should be section names while the keys on
|
||||
the second level should represent option names.
|
||||
E.g., {
|
||||
'identity': {
|
||||
'admin_name': 'admin',
|
||||
'admin_password': 'admin',
|
||||
...
|
||||
},
|
||||
'compute': {
|
||||
'controller_nodes': 'localhost',
|
||||
...
|
||||
},
|
||||
...
|
||||
}
|
||||
E.g., see the default cloud configuration in the
|
||||
rally.benchmark.config.CloudConfigManager class.
|
||||
|
||||
:returns: self (the method should be called in a 'with' statement)
|
||||
"""
|
||||
self.cloud_config = cloud_config
|
||||
self.cloud_config_path = self._random_file_path()
|
||||
self.cloud_config = config.CloudConfigManager()
|
||||
self.cloud_config.read_from_dict(cloud_config)
|
||||
|
||||
self.cloud_config_path = self._generate_temporary_file_path()
|
||||
self.test_config_path = self._generate_temporary_file_path()
|
||||
|
||||
return self
|
||||
|
||||
def verify(self):
|
||||
@@ -134,21 +161,24 @@ class TestEngine(object):
|
||||
:raises: VerificationException if some of the verification tests failed
|
||||
"""
|
||||
tester = utils.Tester(self.cloud_config_path)
|
||||
verification_tests = [tests.verification_tests[test_name]
|
||||
for test_name in self.test_config['verify']]
|
||||
tests_to_run = self.test_config.to_dict()['verify']['tests_to_run']
|
||||
verification_tests = dict((test, tests.verification_tests[test])
|
||||
for test in tests_to_run)
|
||||
for test_results in tester.run_all(verification_tests):
|
||||
for result in test_results.itervalues():
|
||||
if result['status'] != 0:
|
||||
raise exceptions.VerificationException(
|
||||
test_message=result['msg'])
|
||||
raise exceptions.DeploymentVerificationException(
|
||||
test_message=result['msg'])
|
||||
|
||||
def benchmark(self):
|
||||
"""Runs the benchmarks according to the test configuration
|
||||
the test engine was initialized with.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def process_results(self):
|
||||
"""Processes benchmarking results using Zipkin & Tomograph."""
|
||||
# TODO(msdubov): process results.
|
||||
raise NotImplementedError()
|
||||
:returns: List of dicts, each dict containing the results of all the
|
||||
corresponding benchmark test launches
|
||||
"""
|
||||
tester = utils.Tester(self.cloud_config_path, self.test_config_path)
|
||||
tests_to_run = self.test_config.to_dict()['benchmark']['tests_to_run']
|
||||
benchmark_tests = dict((test, tests.benchmark_tests[test])
|
||||
for test in tests_to_run)
|
||||
return tester.run_all(benchmark_tests)
|
||||
|
0
rally/benchmark/test_scenarios/__init__.py
Normal file
0
rally/benchmark/test_scenarios/__init__.py
Normal file
33
rally/benchmark/test_scenarios/fake.py
Normal file
33
rally/benchmark/test_scenarios/fake.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import unittest2
|
||||
|
||||
from rally.benchmark import utils
|
||||
|
||||
|
||||
class FakeTest(unittest2.TestCase):
|
||||
|
||||
# NOTE(msdubov): The class is introduced for testing purposes exclusively;
|
||||
# it's been placed here because the TestEngine looks up the
|
||||
# tests under the 'rally' directory.
|
||||
|
||||
@utils.parameterize_from_test_config('fake')
|
||||
def test_parameterize(self, arg=1):
|
||||
# NOTE(msdubov): The method is called just from one single test case
|
||||
# with config specifying the arg value changed to 5.
|
||||
self.assertEqual(arg, 5)
|
40
rally/benchmark/test_scenarios/utils.py
Normal file
40
rally/benchmark/test_scenarios/utils.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
import fuel_health.manager
|
||||
import fuel_health.test
|
||||
|
||||
from rally.benchmark import config
|
||||
|
||||
|
||||
class ParameterizableTestCase(fuel_health.test.TestCase):
|
||||
|
||||
manager_class = fuel_health.manager.Manager
|
||||
|
||||
def setUp(self):
|
||||
super(ParameterizableTestCase, self).setUp()
|
||||
# NOTE(msdubov): setUp method parametrization from test configuration;
|
||||
# the passed parameters can then be used in subclasses
|
||||
# via the self._get_param() method.
|
||||
test_config = config.TestConfigManager(os.environ['PYTEST_CONFIG'])
|
||||
tests_setUp = test_config.to_dict()['benchmark'].get('tests_setUp', {})
|
||||
self._params = tests_setUp.get(self.benchmark_name, {})
|
||||
|
||||
def _get_param(self, param_name, default_value=None):
|
||||
return self._params.get(param_name, default_value)
|
@@ -15,6 +15,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# NOTE(msdubov): This file contains the pre-defined mappings from test names
|
||||
# to pytest arguments passed while launching these tests. The
|
||||
# test names listed here should be used in test configuration
|
||||
# files.
|
||||
|
||||
verification_tests = {
|
||||
'sanity': ['--pyargs', 'fuel_health.tests.sanity'],
|
||||
@@ -23,3 +27,8 @@ verification_tests = {
|
||||
'snapshot': ['--pyargs', 'fuel_health.tests.smoke', '-k',
|
||||
'"test_snapshot"']
|
||||
}
|
||||
|
||||
# TODO(msdubov): Implement an automatic benchmark tests collector.
|
||||
benchmark_tests = {}
|
||||
|
||||
tests = {'verify': verification_tests, 'benchmark': benchmark_tests}
|
||||
|
@@ -15,28 +15,97 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import multiprocessing
|
||||
import os
|
||||
import pytest
|
||||
import time
|
||||
|
||||
import fuel_health.cleanup as fuel_cleanup
|
||||
|
||||
from rally.benchmark import config
|
||||
from rally import utils
|
||||
|
||||
|
||||
def parameterize_from_test_config(benchmark_name):
|
||||
"""Decorator that configures the test function parameters through the
|
||||
test configuration stored in the temporary file (created by TestEngine).
|
||||
|
||||
:param benchmark_name: The benchmark name. The test function settings
|
||||
will be searched in the configuration under the key
|
||||
`benchmark_name`.`function_name`
|
||||
"""
|
||||
def decorator(test_function):
|
||||
@functools.wraps(test_function)
|
||||
def wrapper(*args, **kwargs):
|
||||
test_config = config.TestConfigManager()
|
||||
test_config.read(os.environ['PYTEST_CONFIG'])
|
||||
current_test_run_index = int(os.environ['PYTEST_RUN_INDEX'])
|
||||
tests_to_run = test_config.to_dict()['benchmark']['tests_to_run']
|
||||
current_test_runs = tests_to_run['%s.%s' % (benchmark_name,
|
||||
test_function.__name__)]
|
||||
current_test_config = current_test_runs[current_test_run_index]
|
||||
kwargs.update(current_test_config.get('args', {}))
|
||||
test_function(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class Tester(object):
|
||||
|
||||
def __init__(self, config_path):
|
||||
self._config_path = os.path.abspath(config_path)
|
||||
def __init__(self, cloud_config_path, test_config_path=None):
|
||||
self._cloud_config_path = os.path.abspath(cloud_config_path)
|
||||
if test_config_path:
|
||||
self._test_config_manager = config.TestConfigManager(
|
||||
test_config_path)
|
||||
os.environ['PYTEST_CONFIG'] = os.path.abspath(test_config_path)
|
||||
else:
|
||||
self._test_config_manager = None
|
||||
self._q = multiprocessing.Queue()
|
||||
|
||||
def run_all(self, tests):
|
||||
"""Launches all the given tests, trying to parameterize the tests
|
||||
using the test configuration.
|
||||
|
||||
:param tests: Dictionary of form {'test_name': [test_args]}
|
||||
|
||||
:returns: List of dicts, each dict containing the results of all
|
||||
the run() method calls for the corresponding test
|
||||
"""
|
||||
# NOTE(msdubov): Benchmark tests can be configured to be run several
|
||||
# times and/or concurrently (using test configuration).
|
||||
if self._test_config_manager:
|
||||
test_config = self._test_config_manager.to_dict()
|
||||
tests_to_run = test_config['benchmark']['tests_to_run']
|
||||
else:
|
||||
tests_to_run = {}
|
||||
|
||||
res = []
|
||||
for test in tests:
|
||||
res.append(self.run(test))
|
||||
for test_name in tests:
|
||||
test_runs = tests_to_run.get(test_name, [{}])
|
||||
for i, test_run in enumerate(test_runs):
|
||||
times = test_run.get('times', 1)
|
||||
concurrent = test_run.get('concurrent', 1)
|
||||
os.environ['PYTEST_RUN_INDEX'] = str(i)
|
||||
res.append(self.run(tests[test_name],
|
||||
times=times, concurrent=concurrent))
|
||||
return res
|
||||
|
||||
def run(self, test_args, times=1, concurrent=1):
|
||||
"""Launches a test (specified by pytest args) several times and/or
|
||||
concurrently (optional).
|
||||
|
||||
:param test_args: Arguments to be passed to pytest, e.g.
|
||||
['--pyargs', 'fuel_health.tests.sanity']
|
||||
:param test_args: The number of times the test should be launched
|
||||
:param concurrent: The number of concurrent processed to be used while
|
||||
launching the test
|
||||
|
||||
:returns: Dict of dicts (each containing 'status', 'msg' and
|
||||
'proc_name' fields', one dict for a single test run.
|
||||
The keys in the top-level dictionary are the corresponding
|
||||
process names
|
||||
"""
|
||||
res = {}
|
||||
processes = {}
|
||||
proc_id = 0
|
||||
@@ -59,7 +128,7 @@ class Tester(object):
|
||||
break
|
||||
time.sleep(0.5)
|
||||
|
||||
self._cleanup(self._config_path)
|
||||
self._cleanup(self._cloud_config_path)
|
||||
return res
|
||||
|
||||
def _start_test_process(self, id, test_args):
|
||||
@@ -71,13 +140,13 @@ class Tester(object):
|
||||
return {proc_name: test}
|
||||
|
||||
def _run_test(self, test_args, proc_name):
|
||||
os.environ['OSTF_CONFIG'] = self._config_path
|
||||
os.environ['OSTF_CONFIG'] = self._cloud_config_path
|
||||
with utils.StdOutCapture() as out:
|
||||
status = pytest.main(args=test_args)
|
||||
msg = filter(lambda line: line and '===' not in line,
|
||||
out.getvalue().split('\n'))
|
||||
self._q.put({'msg': msg, 'status': status, 'proc_name': proc_name})
|
||||
|
||||
def _cleanup(self, path):
|
||||
os.environ['OSTF_CONFIG'] = path
|
||||
def _cleanup(self, cloud_config_path):
|
||||
os.environ['OSTF_CONFIG'] = cloud_config_path
|
||||
fuel_cleanup.cleanup()
|
||||
|
@@ -85,7 +85,7 @@ class ImmutableException(RallyException):
|
||||
|
||||
|
||||
class InvalidConfigException(RallyException):
|
||||
msg_fmt = _("This config is invalid")
|
||||
msg_fmt = _("This config is invalid: `%(message)s`")
|
||||
|
||||
|
||||
class NoSuchTestException(InvalidConfigException):
|
||||
@@ -96,7 +96,7 @@ class TestException(RallyException):
|
||||
msg_fmt = _("Test failed: %(test_message)s")
|
||||
|
||||
|
||||
class VerificationException(TestException):
|
||||
class DeploymentVerificationException(TestException):
|
||||
msg_fmt = _("Verification test failed: %(test_message)s")
|
||||
|
||||
|
||||
|
@@ -1,14 +1,15 @@
|
||||
Babel>=0.9.6
|
||||
eventlet>=0.9.17
|
||||
netaddr>=0.7.6
|
||||
iso8601>=0.1.4
|
||||
jsonschema>=2.0.0
|
||||
netaddr>=0.7.6
|
||||
paramiko>=1.8.0
|
||||
pbr>=0.5.21,<1.0
|
||||
psutil
|
||||
pytest
|
||||
SQLAlchemy>=0.7.8,<0.7.99
|
||||
sh
|
||||
six
|
||||
pytest
|
||||
|
||||
-e git+https://github.com/simpleranchero/fuel-ostf-tests#egg=fuel-ostf-tests
|
||||
|
||||
|
@@ -7,7 +7,6 @@ hacking>=0.6,<0.8
|
||||
coverage
|
||||
discover
|
||||
fixtures>=0.3.12
|
||||
jsonschema
|
||||
mock>=1.0
|
||||
mox>=0.5.3
|
||||
python-subunit<=0.0.13
|
||||
|
@@ -28,13 +28,26 @@ class TestEngineTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestEngineTestCase, self).setUp()
|
||||
|
||||
self.valid_test_config = {
|
||||
'verify': ['sanity', 'smoke'],
|
||||
'benchmark': []
|
||||
'verify': {
|
||||
'tests_to_run': ['sanity', 'smoke']
|
||||
},
|
||||
'benchmark': {
|
||||
'tests_to_run': {}
|
||||
}
|
||||
}
|
||||
self.invalid_test_config = {
|
||||
'verify': ['sanity', 'some_not_existing_test'],
|
||||
'benchmark': []
|
||||
self.invalid_test_config_bad_test_name = {
|
||||
'verify': {
|
||||
'tests_to_run': ['sanity', 'some_not_existing_test']
|
||||
},
|
||||
'benchmark': {}
|
||||
}
|
||||
self.invalid_test_config_bad_key = {
|
||||
'verify': {
|
||||
'tests_to_run': ['sanity', 'smoke']
|
||||
},
|
||||
'benchmarck': {}
|
||||
}
|
||||
self.valid_cloud_config = {
|
||||
'identity': {
|
||||
@@ -45,8 +58,9 @@ class TestEngineTestCase(test.NoDBTestCase):
|
||||
'controller_nodes': 'localhost'
|
||||
}
|
||||
}
|
||||
|
||||
run_success = {
|
||||
'proc': {'msg': 'msg', 'status': 0, 'proc_name': 'proc'}
|
||||
'proc': {'msg': ['msg'], 'status': 0, 'proc_name': 'proc'}
|
||||
}
|
||||
self.run_mock = mock.patch('rally.benchmark.utils.Tester.run',
|
||||
mock.Mock(return_value=run_success))
|
||||
@@ -63,13 +77,19 @@ class TestEngineTestCase(test.NoDBTestCase):
|
||||
self.fail("Unexpected exception in test config" +
|
||||
"verification: %s" % str(e))
|
||||
self.assertRaises(exceptions.NoSuchTestException,
|
||||
engine.TestEngine, self.invalid_test_config)
|
||||
engine.TestEngine,
|
||||
self.invalid_test_config_bad_test_name)
|
||||
self.assertRaises(exceptions.InvalidConfigException,
|
||||
engine.TestEngine,
|
||||
self.invalid_test_config_bad_key)
|
||||
|
||||
def test_bind(self):
|
||||
test_engine = engine.TestEngine(self.valid_test_config)
|
||||
with test_engine.bind(self.valid_cloud_config):
|
||||
self.assertTrue(os.path.exists(test_engine.cloud_config_path))
|
||||
self.assertTrue(os.path.exists(test_engine.test_config_path))
|
||||
self.assertFalse(os.path.exists(test_engine.cloud_config_path))
|
||||
self.assertFalse(os.path.exists(test_engine.test_config_path))
|
||||
|
||||
def test_verify(self):
|
||||
test_engine = engine.TestEngine(self.valid_test_config)
|
||||
@@ -79,3 +99,8 @@ class TestEngineTestCase(test.NoDBTestCase):
|
||||
except Exception as e:
|
||||
self.fail("Unexpected exception in TestEngine.verify: %s" %
|
||||
str(e))
|
||||
|
||||
def test_benchmark(self):
|
||||
test_engine = engine.TestEngine(self.valid_test_config)
|
||||
with test_engine.bind(self.valid_cloud_config):
|
||||
test_engine.benchmark()
|
||||
|
@@ -20,6 +20,8 @@ import mock
|
||||
import os
|
||||
|
||||
from rally.benchmark import config
|
||||
from rally.benchmark import engine
|
||||
from rally.benchmark import tests
|
||||
from rally.benchmark import utils
|
||||
from rally import test
|
||||
|
||||
@@ -59,8 +61,31 @@ class UtilsTestCase(test.NoDBTestCase):
|
||||
|
||||
def test_running_multiple_tests(self):
|
||||
tester = utils.Tester(self.cloud_config_path)
|
||||
tests = [['./tests/benchmark/test_utils.py', '-k', 'test_dummy'],
|
||||
['./tests/benchmark/test_utils.py', '-k', 'test_dummy_2']]
|
||||
for test_results in tester.run_all(tests):
|
||||
tests_dict = {
|
||||
'test1': ['./tests/benchmark/test_utils.py', '-k', 'test_dummy'],
|
||||
'test2': ['./tests/benchmark/test_utils.py', '-k', 'test_dummy_2']
|
||||
}
|
||||
for test_results in tester.run_all(tests_dict):
|
||||
for result in test_results.itervalues():
|
||||
self.assertEqual(result['status'], 0)
|
||||
|
||||
def test_parameterize_inside_class_from_test_config(self):
|
||||
old_benchmark_tests = tests.benchmark_tests.copy()
|
||||
tests.benchmark_tests.update({
|
||||
'fake.test_parameterize': ['--pyargs',
|
||||
'rally.benchmark.test_scenarios.fake',
|
||||
'-k', 'test_parameterize']
|
||||
})
|
||||
cloud_config = {}
|
||||
test_config = {
|
||||
'benchmark': {
|
||||
'tests_to_run': {
|
||||
'fake.test_parameterize': [{'args': {'arg': 5}}]
|
||||
}
|
||||
}
|
||||
}
|
||||
test_engine = engine.TestEngine(test_config)
|
||||
with test_engine.bind(cloud_config):
|
||||
res = test_engine.benchmark()
|
||||
self.assertEqual(res[0].values()[0]['status'], 0)
|
||||
tests.benchmark_tests = old_benchmark_tests
|
||||
|
Reference in New Issue
Block a user