Get rid of fuel-ostf-tests
As this verification doesn't work + I didn't like this approach from start. I am really happy to remove this sh** from Rally code. Soon we will use tempest to verify that cloud work. bp remove-fuel-ostf-tests Change-Id: If941d37fd128a4e86032cf2f973b3b67a29dd638
This commit is contained in:
@@ -1,12 +1,11 @@
|
|||||||
{
|
{
|
||||||
"verify": [],
|
"NovaServers.boot_and_delete_server": [
|
||||||
"benchmark": {
|
{
|
||||||
"NovaServers.boot_and_delete_server": [
|
"args": {"flavor_id": 1,
|
||||||
{"args": {"flavor_id": 1,
|
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
||||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
"execution": "periodic",
|
||||||
"execution": "periodic",
|
"config": {"times": 10, "period": 2, "tenants": 3,
|
||||||
"config": {"times": 10, "period": 2,
|
"users_per_tenant": 2}
|
||||||
"tenants": 3, "users_per_tenant": 2}}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -1,12 +1,11 @@
|
|||||||
{
|
{
|
||||||
"verify": [],
|
"NovaServers.boot_and_delete_server": [
|
||||||
"benchmark": {
|
{
|
||||||
"NovaServers.boot_and_delete_server": [
|
"args": {"flavor_id": 1,
|
||||||
{"args": {"flavor_id": 1,
|
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
||||||
"image_id": "73257560-c59b-4275-a1ec-ab140e5b9979"},
|
"execution": "continuous",
|
||||||
"execution": "continuous",
|
"config": {"times": 10, "active_users": 2, "tenants": 3,
|
||||||
"config": {"times": 10, "active_users": 2,
|
"users_per_tenant": 2}
|
||||||
"tenants": 3, "users_per_tenant": 2}}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +0,0 @@
|
|||||||
{
|
|
||||||
"verify": [
|
|
||||||
"sanity",
|
|
||||||
"smoke"
|
|
||||||
],
|
|
||||||
"benchmark": {}
|
|
||||||
}
|
|
@@ -1,11 +1,10 @@
|
|||||||
{
|
{
|
||||||
"verify": [],
|
"NovaServers.boot_and_bounce_server": [
|
||||||
"benchmark": {
|
{
|
||||||
"NovaServers.boot_and_bounce_server": [
|
"args": {"flavor_id": 1,
|
||||||
{"args": {"flavor_id": 1,
|
"image_id": "3fa4482f-677a-4488-adaf-c48befac5e5a",
|
||||||
"image_id": "3fa4482f-677a-4488-adaf-c48befac5e5a",
|
"actions": [{"rescue_unrescue": 1}]},
|
||||||
"actions": [{"rescue_unrescue": 1}] },
|
"config": {"times": 1, "active_users": 1}
|
||||||
"config": {"times": 1, "active_users": 1}}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
}
|
}
|
@@ -1,11 +1,11 @@
|
|||||||
{
|
{
|
||||||
"verify": [],
|
"NovaServers.boot_and_bounce_server": [
|
||||||
"benchmark": {
|
{
|
||||||
"NovaServers.boot_and_bounce_server": [
|
"args": {"flavor_id": 2,
|
||||||
{"args": {"flavor_id": 2, "image_id": "539ccae5-5982-4868-b176-23c41ff1195e",
|
"image_id": "539ccae5-5982-4868-b176-23c41ff1195e",
|
||||||
"actions": [{"soft_reboot": 4}]},
|
"actions": [{"soft_reboot": 4}]},
|
||||||
"execution": "continuous",
|
"execution": "continuous",
|
||||||
"config": {"times": 3, "active_users": 2}}
|
"config": {"times": 3, "active_users": 2}
|
||||||
]
|
}
|
||||||
}
|
]
|
||||||
}
|
}
|
||||||
|
@@ -1,165 +0,0 @@
|
|||||||
# Copyright 2013: Mirantis Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import ConfigParser
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class CloudConfigManager(ConfigParser.RawConfigParser, object):
|
|
||||||
|
|
||||||
_DEFAULT_CLOUD_CONFIG = {
|
|
||||||
'identity': {
|
|
||||||
'url': 'http://localhost/',
|
|
||||||
'uri': 'http://localhost:5000/v2.0/',
|
|
||||||
'admin_username': 'admin',
|
|
||||||
'admin_password': 'admin',
|
|
||||||
'admin_tenant_name': 'service',
|
|
||||||
'region': 'RegionOne',
|
|
||||||
'strategy': 'keystone',
|
|
||||||
'catalog_type': 'identity',
|
|
||||||
'disable_ssl_certificate_validation': False
|
|
||||||
},
|
|
||||||
'compute': {
|
|
||||||
'controller_nodes': 'localhost',
|
|
||||||
'controller_nodes_name': 'localhost',
|
|
||||||
'controller_node_ssh_user': 'root',
|
|
||||||
'controller_node_ssh_password': 'r00tme',
|
|
||||||
'compute_nodes': 'localhost',
|
|
||||||
'path_to_private_key': os.path.expanduser('~/.ssh/id_rsa'),
|
|
||||||
'image_name': 'cirros-0.3.1-x86_64-uec',
|
|
||||||
'image_ssh_user': 'cirros',
|
|
||||||
'image_alt_ssh_user': 'cirros',
|
|
||||||
'flavor_ref': 1,
|
|
||||||
'allow_tenant_isolation': True,
|
|
||||||
'ssh_timeout': 300,
|
|
||||||
'ssh_channel_timeout': 60,
|
|
||||||
'build_interval': 3,
|
|
||||||
'build_timeout': 300,
|
|
||||||
'enabled_services': 'nova-cert, nova-consoleauth, ' +
|
|
||||||
'nova-scheduler, nova-conductor, ' +
|
|
||||||
'nova-compute, nova-network, ' +
|
|
||||||
'nova-compute, nova-network',
|
|
||||||
'run_ssh': False,
|
|
||||||
'catalog_type': 'compute',
|
|
||||||
'allow_tenant_reuse': True,
|
|
||||||
'create_image_enabled': True
|
|
||||||
},
|
|
||||||
'network': {
|
|
||||||
'api_version': '2.0',
|
|
||||||
'tenant_network_mask_bits': 28,
|
|
||||||
'tenant_network_cidr': '10.0.0.0/24',
|
|
||||||
'tenant_networks_reachable': True,
|
|
||||||
'neutron_available': False,
|
|
||||||
'catalog_type': 'network'
|
|
||||||
},
|
|
||||||
'image': {
|
|
||||||
'api_version': '1',
|
|
||||||
'http_image': 'http://download.cirros-cloud.net/0.3.1/' +
|
|
||||||
'cirros-0.3.1-x86_64-uec.tar.gz',
|
|
||||||
'catalog_type': 'image'
|
|
||||||
},
|
|
||||||
'volume': {
|
|
||||||
'multi_backend_enabled': 'false',
|
|
||||||
'backend1_name': 'BACKEND_1',
|
|
||||||
'backend2_name': 'BACKEND_2',
|
|
||||||
'build_timeout': 300,
|
|
||||||
'build_interval': 3,
|
|
||||||
'catalog_type': 'volume'
|
|
||||||
},
|
|
||||||
'object-storage': {
|
|
||||||
'container_sync_interval': 5,
|
|
||||||
'container_sync_timeout': 120,
|
|
||||||
'catalog_type': 'object-store'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, config=None):
|
|
||||||
"""Initializes the cloud config manager with the default values and
|
|
||||||
(if given) with a config.
|
|
||||||
|
|
||||||
:param config: Path to the config file or a two-level dictionary
|
|
||||||
containing the config contents
|
|
||||||
"""
|
|
||||||
super(CloudConfigManager, self).__init__()
|
|
||||||
self.read_from_dict(self._DEFAULT_CLOUD_CONFIG)
|
|
||||||
if config:
|
|
||||||
if isinstance(config, basestring):
|
|
||||||
self.read(config)
|
|
||||||
elif isinstance(config, dict):
|
|
||||||
self.read_from_dict(config)
|
|
||||||
|
|
||||||
def read_from_dict(self, dct, replace=True):
|
|
||||||
"""Reads the config from a dictionary.
|
|
||||||
|
|
||||||
:param dct: The config represented as a two-level dictionary: the
|
|
||||||
top-level keys should be section names while the keys on
|
|
||||||
the second level should represent option names
|
|
||||||
:param replace: True to replace already existing options while reading
|
|
||||||
the config; False to keep old values
|
|
||||||
"""
|
|
||||||
for section_name, section in dct.iteritems():
|
|
||||||
if not self.has_section(section_name):
|
|
||||||
self.add_section(section_name)
|
|
||||||
for opt in section:
|
|
||||||
if not self.has_option(section_name, opt) or replace:
|
|
||||||
self.set(section_name, opt, section[opt])
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
res = {}
|
|
||||||
for section in self.sections():
|
|
||||||
res[section] = dict(self.items(section))
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
test_config_schema = {
|
|
||||||
"type": "object",
|
|
||||||
"$schema": "http://json-schema.org/draft-03/schema",
|
|
||||||
"properties": {
|
|
||||||
"verify": {
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
"benchmark": {
|
|
||||||
"type": "object",
|
|
||||||
"patternProperties": {
|
|
||||||
".*": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"args": {"type": "object"},
|
|
||||||
"init": {"type": "object"},
|
|
||||||
"execution": {"enum": ["continuous", "periodic"]},
|
|
||||||
"config": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"times": {"type": "number"},
|
|
||||||
"duration": {"type": "number"},
|
|
||||||
"active_users": {"type": "number"},
|
|
||||||
"period": {"type": "number"},
|
|
||||||
"tenants": {"type": "number"},
|
|
||||||
"users_per_tenant": {"type": "number"},
|
|
||||||
"timeout": {"type": "number"}
|
|
||||||
},
|
|
||||||
"additionalProperties": False
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": False
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": False
|
|
||||||
}
|
|
@@ -13,14 +13,10 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
import json
|
||||||
import jsonschema
|
import jsonschema
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from rally.benchmark import base
|
from rally.benchmark import base
|
||||||
from rally.benchmark import config
|
|
||||||
from rally.benchmark import utils
|
from rally.benchmark import utils
|
||||||
from rally import consts
|
from rally import consts
|
||||||
from rally import exceptions
|
from rally import exceptions
|
||||||
@@ -32,90 +28,83 @@ from rally import utils as rutils
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
CONFIG_SCHEMA = {
|
||||||
|
"type": "object",
|
||||||
|
"$schema": "http://json-schema.org/draft-03/schema",
|
||||||
|
"patternProperties": {
|
||||||
|
".*": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"args": {"type": "object"},
|
||||||
|
"init": {"type": "object"},
|
||||||
|
"execution": {"enum": ["continuous", "periodic"]},
|
||||||
|
"config": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"times": {"type": "integer"},
|
||||||
|
"duration": {"type": "number"},
|
||||||
|
"active_users": {"type": "integer"},
|
||||||
|
"period": {"type": "number"},
|
||||||
|
"tenants": {"type": "integer"},
|
||||||
|
"users_per_tenant": {"type": "integer"},
|
||||||
|
"timeout": {"type": "number"}
|
||||||
|
},
|
||||||
|
"additionalProperties": False
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": False
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class TestEngine(object):
|
class TestEngine(object):
|
||||||
"""The test engine class, an instance of which is initialized by the
|
"""The test engine class, an instance of which is initialized by the
|
||||||
Orchestrator with the test configuration and then is used to launch OSTF
|
Orchestrator with the benchmarks configuration and then is used to execute
|
||||||
tests and to benchmark the deployment.
|
all specified benchmark scnearios.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Typical usage:
|
Typical usage:
|
||||||
...
|
...
|
||||||
test = TestEngine(test_config)
|
tester = TestEngine(config, task)
|
||||||
# Deploying the cloud...
|
# Deploying the cloud...
|
||||||
with test.bind(cloud_config):
|
# cloud_endpoints - contains endpoints of deployed cloud
|
||||||
test.verify()
|
with tester.bind(cloud_endpoints):
|
||||||
test.benchmark()
|
tester.run()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, test_config, task):
|
def __init__(self, config, task):
|
||||||
"""TestEngine constructor.
|
"""TestEngine constructor.
|
||||||
|
:param config: The configuration with specified benchmark scenarios
|
||||||
:param test_config: Dictionary of form {
|
|
||||||
"verify": ["sanity", "smoke"]
|
|
||||||
"benchmark": {
|
|
||||||
"NovaServers.boot_and_delete_server": [
|
|
||||||
{"args": {"flavor_id": <flavor_id>,
|
|
||||||
"image_id": "<image_id>"},
|
|
||||||
"execution": "continuous",
|
|
||||||
"config": {"times": 1, "active_users": 1}},
|
|
||||||
{"args": {"flavor_id": <flavor_id>,
|
|
||||||
"image_id": "<image_id>"},
|
|
||||||
"execution": "continuous",
|
|
||||||
"config": {"times": 4, "active_users": 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
:param task: The current task which is being performed
|
:param task: The current task which is being performed
|
||||||
"""
|
"""
|
||||||
|
self.config = config
|
||||||
self.task = task
|
self.task = task
|
||||||
|
self._validate_config()
|
||||||
|
|
||||||
# NOTE(msdubov): self.verification_tests is a dict since it has
|
@rutils.log_task_wrapper(LOG.info, _("Benchmark configs validation."))
|
||||||
# to contain pytest running args, while
|
def _validate_config(self):
|
||||||
# self.benchmark_scenarios is just a list of names.
|
|
||||||
self.verification_tests = utils.Verifier.list_verification_tests()
|
|
||||||
self.benchmark_scenarios = base.Scenario.list_benchmark_scenarios()
|
|
||||||
|
|
||||||
self._validate_test_config(test_config)
|
|
||||||
test_config = self._format_test_config(test_config)
|
|
||||||
self.test_config = test_config
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info,
|
|
||||||
_("Benchmark & Verification configs validation."))
|
|
||||||
def _validate_test_config(self, test_config):
|
|
||||||
"""Checks whether the given test config is valid and can be used during
|
|
||||||
verification and benchmarking tests.
|
|
||||||
|
|
||||||
:param test_config: Dictionary in the same format as for the __init__
|
|
||||||
method.
|
|
||||||
|
|
||||||
:raises: Exception if the test config is not valid
|
|
||||||
"""
|
|
||||||
task_uuid = self.task['uuid']
|
task_uuid = self.task['uuid']
|
||||||
# Perform schema validation
|
# Perform schema validation
|
||||||
try:
|
try:
|
||||||
jsonschema.validate(test_config, config.test_config_schema)
|
jsonschema.validate(self.config, CONFIG_SCHEMA)
|
||||||
except jsonschema.ValidationError as e:
|
except jsonschema.ValidationError as e:
|
||||||
LOG.exception(_('Task %s: Error: %s') % (task_uuid, e.message))
|
LOG.exception(_('Task %s: Error: %s') % (task_uuid, e.message))
|
||||||
raise exceptions.InvalidConfigException(message=e.message)
|
raise exceptions.InvalidConfigException(message=e.message)
|
||||||
|
|
||||||
# Check for verification test names
|
|
||||||
for test in test_config['verify']:
|
|
||||||
if test not in self.verification_tests:
|
|
||||||
LOG.exception(_('Task %s: Error: the specified '
|
|
||||||
'verification test does not exist: %s') %
|
|
||||||
(task_uuid, test))
|
|
||||||
raise exceptions.NoSuchVerificationTest(test_name=test)
|
|
||||||
# Check for benchmark scenario names
|
# Check for benchmark scenario names
|
||||||
benchmark_scenarios_set = set(self.benchmark_scenarios)
|
available_scenarios = set(base.Scenario.list_benchmark_scenarios())
|
||||||
for scenario in test_config['benchmark']:
|
for scenario in self.config:
|
||||||
if scenario not in benchmark_scenarios_set:
|
if scenario not in available_scenarios:
|
||||||
LOG.exception(_('Task %s: Error: the specified '
|
LOG.exception(_('Task %s: Error: the specified '
|
||||||
'benchmark scenario does not exist: %s') %
|
'benchmark scenario does not exist: %s') %
|
||||||
(task_uuid, scenario))
|
(task_uuid, scenario))
|
||||||
raise exceptions.NoSuchScenario(name=scenario)
|
raise exceptions.NoSuchScenario(name=scenario)
|
||||||
# Check for conflicting config parameters
|
# Check for conflicting config parameters
|
||||||
for run in test_config['benchmark'][scenario]:
|
for run in self.config[scenario]:
|
||||||
if 'times' in run['config'] and 'duration' in run['config']:
|
if 'times' in run['config'] and 'duration' in run['config']:
|
||||||
message = _("'times' and 'duration' cannot be set "
|
message = _("'times' and 'duration' cannot be set "
|
||||||
"simultaneously for one continuous "
|
"simultaneously for one continuous "
|
||||||
@@ -131,80 +120,7 @@ class TestEngine(object):
|
|||||||
message))
|
message))
|
||||||
raise exceptions.InvalidConfigException(message=message)
|
raise exceptions.InvalidConfigException(message=message)
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.debug, _("Test config formatting."))
|
def run(self):
|
||||||
def _format_test_config(self, test_config):
|
|
||||||
"""Returns a formatted copy of the given valid test config so that
|
|
||||||
it can be used during verification and benchmarking tests.
|
|
||||||
|
|
||||||
:param test_config: Dictionary in the same format as for the __init__
|
|
||||||
method.
|
|
||||||
|
|
||||||
:returns: Dictionary
|
|
||||||
"""
|
|
||||||
formatted_test_config = copy.deepcopy(test_config)
|
|
||||||
# NOTE(msdubov): if 'verify' is not specified, just run all
|
|
||||||
# verification tests.
|
|
||||||
if 'verify' not in formatted_test_config:
|
|
||||||
formatted_test_config['verify'] = self.verification_tests.keys()
|
|
||||||
return formatted_test_config
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.debug,
|
|
||||||
_("Verification configs writing into temp file."))
|
|
||||||
def __enter__(self):
|
|
||||||
with os.fdopen(self.cloud_config_fd, 'w') as f:
|
|
||||||
self.cloud_config.write(f)
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.debug, _("Deleting the temp verification "
|
|
||||||
"config file & Finishing the task."))
|
|
||||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
|
||||||
os.remove(self.cloud_config_path)
|
|
||||||
if exc_type is not None:
|
|
||||||
self.task.update_status(consts.TaskStatus.FAILED)
|
|
||||||
else:
|
|
||||||
self.task.update_status(consts.TaskStatus.FINISHED)
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _('OS cloud binding to Rally.'))
|
|
||||||
def bind(self, cloud_config):
|
|
||||||
"""Binds an existing deployment configuration to the test engine.
|
|
||||||
|
|
||||||
:param cloud_config: The deployment configuration, which sould be
|
|
||||||
passed as a two-level dictionary: the top-level
|
|
||||||
keys should be section names while the keys on
|
|
||||||
the second level should represent option names.
|
|
||||||
E.g., see the default cloud configuration in the
|
|
||||||
rally.benchmark.config.CloudConfigManager class.
|
|
||||||
|
|
||||||
:returns: self (the method should be called in a 'with' statement)
|
|
||||||
"""
|
|
||||||
self.cloud_config = config.CloudConfigManager()
|
|
||||||
self.cloud_config.read_from_dict(cloud_config)
|
|
||||||
|
|
||||||
self.cloud_config_fd, self.cloud_config_path = tempfile.mkstemp(
|
|
||||||
suffix='rallycfg', text=True)
|
|
||||||
return self
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _('OpenStack cloud verification.'))
|
|
||||||
def verify(self):
|
|
||||||
"""Runs OSTF tests to verify the current cloud deployment.
|
|
||||||
|
|
||||||
:raises: VerificationException if some of the verification tests failed
|
|
||||||
"""
|
|
||||||
self.task.update_status(consts.TaskStatus.TEST_TOOL_VERIFY_OPENSTACK)
|
|
||||||
verifier = utils.Verifier(self.task, self.cloud_config_path)
|
|
||||||
tests_to_run = self.test_config['verify']
|
|
||||||
verification_tests = dict((test, self.verification_tests[test])
|
|
||||||
for test in tests_to_run)
|
|
||||||
test_run_results = verifier.run_all(verification_tests)
|
|
||||||
self.task.update_verification_log(json.dumps(test_run_results))
|
|
||||||
for result in test_run_results:
|
|
||||||
if result['status'] != 0:
|
|
||||||
params = {'task': self.task['uuid'], 'err': result['msg']}
|
|
||||||
LOG.exception(_('Task %(task)s: One of verification tests '
|
|
||||||
'failed: %(err)s') % params)
|
|
||||||
raise exceptions.DeploymentVerificationException(params['err'])
|
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _("Benchmarking."))
|
|
||||||
def benchmark(self):
|
|
||||||
"""Runs the benchmarks according to the test configuration
|
"""Runs the benchmarks according to the test configuration
|
||||||
the test engine was initialized with.
|
the test engine was initialized with.
|
||||||
|
|
||||||
@@ -212,15 +128,29 @@ class TestEngine(object):
|
|||||||
corresponding benchmark test launches
|
corresponding benchmark test launches
|
||||||
"""
|
"""
|
||||||
self.task.update_status(consts.TaskStatus.TEST_TOOL_BENCHMARKING)
|
self.task.update_status(consts.TaskStatus.TEST_TOOL_BENCHMARKING)
|
||||||
runer = utils.ScenarioRunner(self.task,
|
runer = utils.ScenarioRunner(self.task, self.endpoints)
|
||||||
self.cloud_config.to_dict()["identity"])
|
|
||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
scenarios = self.test_config['benchmark']
|
for name in self.config:
|
||||||
for name in scenarios:
|
for n, kwargs in enumerate(self.config[name]):
|
||||||
for n, kwargs in enumerate(scenarios[name]):
|
|
||||||
key = {'name': name, 'pos': n, 'kw': kwargs}
|
key = {'name': name, 'pos': n, 'kw': kwargs}
|
||||||
result = runer.run(name, kwargs)
|
result = runer.run(name, kwargs)
|
||||||
self.task.append_results(key, {"raw": result})
|
self.task.append_results(key, {"raw": result})
|
||||||
results[json.dumps(key)] = result
|
results[json.dumps(key)] = result
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
def bind(self, endpoints):
|
||||||
|
self.endpoints = endpoints["identity"]
|
||||||
|
# TODO(boris-42): Check cloud endpoints:
|
||||||
|
# 1) Try to access cloud via keystone client
|
||||||
|
# 2) Ensure that you are admin
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||||
|
if exc_type is not None:
|
||||||
|
self.task.update_status(consts.TaskStatus.FAILED)
|
||||||
|
else:
|
||||||
|
self.task.update_status(consts.TaskStatus.FINISHED)
|
||||||
|
@@ -16,15 +16,11 @@
|
|||||||
import collections
|
import collections
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from multiprocessing import pool as multiprocessing_pool
|
from multiprocessing import pool as multiprocessing_pool
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import fuel_health.cleanup as fuel_cleanup
|
|
||||||
|
|
||||||
from rally.benchmark import base
|
from rally.benchmark import base
|
||||||
from rally.benchmark import cleanup_utils
|
from rally.benchmark import cleanup_utils
|
||||||
from rally import exceptions as rally_exceptions
|
from rally import exceptions as rally_exceptions
|
||||||
@@ -383,92 +379,3 @@ class ScenarioRunner(object):
|
|||||||
self._delete_temp_tenants_and_users()
|
self._delete_temp_tenants_and_users()
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def _run_test(test_args, ostf_config, queue):
|
|
||||||
|
|
||||||
os.environ['CUSTOM_FUEL_CONFIG'] = ostf_config
|
|
||||||
|
|
||||||
with utils.StdOutCapture() as out:
|
|
||||||
status = pytest.main(test_args)
|
|
||||||
|
|
||||||
queue.put({'msg': out.getvalue(), 'status': status,
|
|
||||||
'proc_name': test_args[1]})
|
|
||||||
|
|
||||||
|
|
||||||
def _run_cleanup(config):
|
|
||||||
|
|
||||||
os.environ['CUSTOM_FUEL_CONFIG'] = config
|
|
||||||
fuel_cleanup.cleanup()
|
|
||||||
|
|
||||||
|
|
||||||
class Verifier(object):
|
|
||||||
|
|
||||||
def __init__(self, task, cloud_config_path):
|
|
||||||
self._cloud_config_path = os.path.abspath(cloud_config_path)
|
|
||||||
self.task = task
|
|
||||||
self._q = multiprocessing.Queue()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def list_verification_tests():
|
|
||||||
verification_tests_dict = {
|
|
||||||
'sanity': ['--pyargs', 'fuel_health.tests.sanity'],
|
|
||||||
'smoke': ['--pyargs', 'fuel_health.tests.smoke', '-k',
|
|
||||||
'not (test_007 or test_008 or test_009)'],
|
|
||||||
'no_compute_sanity': ['--pyargs', 'fuel_health.tests.sanity',
|
|
||||||
'-k', 'not infrastructure'],
|
|
||||||
'no_compute_smoke': ['--pyargs', 'fuel_health.tests.smoke',
|
|
||||||
'-k', 'user or flavor']
|
|
||||||
}
|
|
||||||
return verification_tests_dict
|
|
||||||
|
|
||||||
def run_all(self, tests):
|
|
||||||
"""Launches all the given tests, trying to parameterize the tests
|
|
||||||
using the test configuration.
|
|
||||||
|
|
||||||
:param tests: Dictionary of form {'test_name': [test_args]}
|
|
||||||
|
|
||||||
:returns: List of dicts, each dict containing the results of all
|
|
||||||
the run() method calls for the corresponding test
|
|
||||||
"""
|
|
||||||
task_uuid = self.task['uuid']
|
|
||||||
res = []
|
|
||||||
for test_name in tests:
|
|
||||||
res.append(self.run(tests[test_name]))
|
|
||||||
LOG.debug(_('Task %s: Completed test `%s`.') %
|
|
||||||
(task_uuid, test_name))
|
|
||||||
return res
|
|
||||||
|
|
||||||
def run(self, test_args):
|
|
||||||
"""Launches a test (specified by pytest args).
|
|
||||||
|
|
||||||
:param test_args: Arguments to be passed to pytest, e.g.
|
|
||||||
['--pyargs', 'fuel_health.tests.sanity']
|
|
||||||
|
|
||||||
:returns: Dict containing 'status', 'msg' and 'proc_name' fields
|
|
||||||
"""
|
|
||||||
task_uuid = self.task['uuid']
|
|
||||||
LOG.debug(_('Task %s: Running test: creating multiprocessing queue') %
|
|
||||||
task_uuid)
|
|
||||||
|
|
||||||
test = multiprocessing.Process(target=_run_test,
|
|
||||||
args=(test_args,
|
|
||||||
self._cloud_config_path, self._q))
|
|
||||||
test.start()
|
|
||||||
test.join()
|
|
||||||
result = self._q.get()
|
|
||||||
if result['status'] and 'Timeout' in result['msg']:
|
|
||||||
LOG.debug(_('Task %s: Test %s timed out.') %
|
|
||||||
(task_uuid, result['proc_name']))
|
|
||||||
else:
|
|
||||||
LOG.debug(_('Task %s: Process %s returned.') %
|
|
||||||
(task_uuid, result['proc_name']))
|
|
||||||
self._cleanup()
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _cleanup(self):
|
|
||||||
cleanup = multiprocessing.Process(target=_run_cleanup,
|
|
||||||
args=(self._cloud_config_path,))
|
|
||||||
cleanup.start()
|
|
||||||
cleanup.join()
|
|
||||||
return
|
|
||||||
|
@@ -94,10 +94,6 @@ class TestException(RallyException):
|
|||||||
msg_fmt = _("Test failed: %(test_message)s")
|
msg_fmt = _("Test failed: %(test_message)s")
|
||||||
|
|
||||||
|
|
||||||
class DeploymentVerificationException(TestException):
|
|
||||||
msg_fmt = _("Verification test failed: %(test_message)s")
|
|
||||||
|
|
||||||
|
|
||||||
class NotFoundException(RallyException):
|
class NotFoundException(RallyException):
|
||||||
msg_fmt = _("Not found.")
|
msg_fmt = _("Not found.")
|
||||||
|
|
||||||
@@ -110,10 +106,6 @@ class NoSuchVMProvider(NotFoundException):
|
|||||||
msg_fmt = _("There is no vm provider with name `%(vm_provider_name)s`.")
|
msg_fmt = _("There is no vm provider with name `%(vm_provider_name)s`.")
|
||||||
|
|
||||||
|
|
||||||
class NoSuchVerificationTest(NotFoundException):
|
|
||||||
msg_fmt = _("No such verification test: `%(test_name)s`.")
|
|
||||||
|
|
||||||
|
|
||||||
class NoSuchScenario(NotFoundException):
|
class NoSuchScenario(NotFoundException):
|
||||||
msg_fmt = _("There is no benchmark scenario with name `%(name)s`.")
|
msg_fmt = _("There is no benchmark scenario with name `%(name)s`.")
|
||||||
|
|
||||||
|
@@ -68,8 +68,8 @@ def recreate_deploy(deploy_uuid):
|
|||||||
def start_task(deploy_uuid, config):
|
def start_task(deploy_uuid, config):
|
||||||
"""Start a task.
|
"""Start a task.
|
||||||
|
|
||||||
A task is performed in two stages: a verification of a deployment
|
Taks is a list of benchmarks that will be called one by one, results of
|
||||||
and a benchmark.
|
exectuion will be stored in DB.
|
||||||
|
|
||||||
:param deploy_uuid: UUID of the deployment
|
:param deploy_uuid: UUID of the deployment
|
||||||
:param config: a dict with a task configuration
|
:param config: a dict with a task configuration
|
||||||
@@ -83,10 +83,7 @@ def start_task(deploy_uuid, config):
|
|||||||
endpoint = deployment['endpoint']
|
endpoint = deployment['endpoint']
|
||||||
with deployer:
|
with deployer:
|
||||||
with tester.bind(endpoint):
|
with tester.bind(endpoint):
|
||||||
# TODO(akscram): The verifications should be a part of
|
tester.run()
|
||||||
# deployment.
|
|
||||||
tester.verify()
|
|
||||||
tester.benchmark()
|
|
||||||
|
|
||||||
|
|
||||||
def abort_task(task_uuid):
|
def abort_task(task_uuid):
|
||||||
|
@@ -8,13 +8,11 @@ oslo.config>=1.2.0
|
|||||||
paramiko>=1.8.0
|
paramiko>=1.8.0
|
||||||
pbr>=0.5.21,<1.0
|
pbr>=0.5.21,<1.0
|
||||||
PrettyTable>=0.6,<0.8
|
PrettyTable>=0.6,<0.8
|
||||||
psutil
|
python-glanceclient>=0.9.0
|
||||||
pytest
|
python-keystoneclient>=0.4.1
|
||||||
pytest-timeout
|
python-novaclient>=2.15.0
|
||||||
|
python-neutronclient>=2.3.0,<3
|
||||||
|
python-cinderclient>=1.0.6
|
||||||
SQLAlchemy>=0.7.8,<0.7.99
|
SQLAlchemy>=0.7.8,<0.7.99
|
||||||
sh
|
sh
|
||||||
six
|
six
|
||||||
|
|
||||||
-e git+https://github.com/simpleranchero/fuel-ostf-tests#egg=fuel-ostf-tests
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,45 +0,0 @@
|
|||||||
# Copyright 2013: Mirantis Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Tests for config managers."""
|
|
||||||
from rally.benchmark import config
|
|
||||||
from rally import test
|
|
||||||
|
|
||||||
|
|
||||||
class CloudConfigManagerTestCase(test.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CloudConfigManagerTestCase, self).setUp()
|
|
||||||
self.manager = config.CloudConfigManager()
|
|
||||||
|
|
||||||
def test_defaults(self):
|
|
||||||
self.assertTrue(self.manager.has_section('identity'))
|
|
||||||
self.assertTrue(self.manager.has_section('compute'))
|
|
||||||
# TODO(msdubov): Don't know exactly which sections
|
|
||||||
# should always be there
|
|
||||||
|
|
||||||
def test_to_dict(self):
|
|
||||||
self.manager.add_section('dummy_section')
|
|
||||||
self.manager.set('dummy_section', 'dummy_option', 'dummy_value')
|
|
||||||
dct = self.manager.to_dict()
|
|
||||||
self.assertTrue('dummy_section' in dct)
|
|
||||||
self.assertEquals(dct['dummy_section']['dummy_option'], 'dummy_value')
|
|
||||||
|
|
||||||
def test_read_from_dict(self):
|
|
||||||
dct = {'dummy_section': {'dummy_option': 'dummy_value'}}
|
|
||||||
self.manager.read_from_dict(dct)
|
|
||||||
self.assertTrue(self.manager.has_section('dummy_section'))
|
|
||||||
self.assertEquals(self.manager.get('dummy_section', 'dummy_option'),
|
|
||||||
'dummy_value')
|
|
@@ -14,9 +14,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Tests for the Test engine."""
|
"""Tests for the Test engine."""
|
||||||
import json
|
|
||||||
import mock
|
import mock
|
||||||
import os
|
|
||||||
|
|
||||||
from rally.benchmark import engine
|
from rally.benchmark import engine
|
||||||
from rally import consts
|
from rally import consts
|
||||||
@@ -31,83 +30,59 @@ class TestEngineTestCase(test.TestCase):
|
|||||||
super(TestEngineTestCase, self).setUp()
|
super(TestEngineTestCase, self).setUp()
|
||||||
|
|
||||||
self.valid_test_config_continuous_times = {
|
self.valid_test_config_continuous_times = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'continuous',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'times': 10, 'active_users': 2,
|
||||||
'execution': 'continuous',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'times': 10, 'active_users': 2,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
self.valid_test_config_continuous_duration = {
|
self.valid_test_config_continuous_duration = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'continuous',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'duration': 4, 'active_users': 2,
|
||||||
'execution': 'continuous',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'duration': 4, 'active_users': 2,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.invalid_test_config_bad_test_name = {
|
|
||||||
'verify': ['sanity', 'some_not_existing_test'],
|
|
||||||
'benchmark': {}
|
|
||||||
}
|
|
||||||
self.invalid_test_config_bad_key = {
|
|
||||||
'verify': ['sanity', 'smoke'],
|
|
||||||
'benchmarck': {}
|
|
||||||
}
|
}
|
||||||
self.invalid_test_config_bad_execution_type = {
|
self.invalid_test_config_bad_execution_type = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'contitnuous',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'times': 10, 'active_users': 2,
|
||||||
'execution': 'contitnuous',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'times': 10, 'active_users': 2,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
self.invalid_test_config_bad_config_parameter = {
|
self.invalid_test_config_bad_config_parameter = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'continuous',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'times': 10, 'activeusers': 2,
|
||||||
'execution': 'continuous',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'times': 10, 'activeusers': 2,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
self.invalid_test_config_parameters_conflict = {
|
self.invalid_test_config_parameters_conflict = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'continuous',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'times': 10, 'duration': 100,
|
||||||
'execution': 'continuous',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'times': 10, 'duration': 100,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
self.invalid_test_config_bad_param_for_periodic = {
|
self.invalid_test_config_bad_param_for_periodic = {
|
||||||
'verify': ['sanity', 'smoke'],
|
'NovaServers.boot_and_delete_server': [
|
||||||
'benchmark': {
|
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
||||||
'NovaServers.boot_and_delete_server': [
|
'execution': 'periodic',
|
||||||
{'args': {'flavor_id': 1, 'image_id': 'img'},
|
'config': {'times': 10, 'active_users': 3,
|
||||||
'execution': 'periodic',
|
'tenants': 3, 'users_per_tenant': 2}}
|
||||||
'config': {'times': 10, 'active_users': 3,
|
]
|
||||||
'tenants': 3, 'users_per_tenant': 2}}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
self.valid_cloud_config = {
|
self.valid_cloud_config = {
|
||||||
'identity': {
|
'identity': {
|
||||||
'admin_name': 'admin',
|
'admin_username': 'admin',
|
||||||
'admin_password': 'admin'
|
'admin_password': 'admin',
|
||||||
|
"admin_tenant_name": 'admin',
|
||||||
|
"uri": 'http://127.0.0.1:5000/v2.0'
|
||||||
},
|
},
|
||||||
'compute': {
|
'compute': {
|
||||||
'controller_nodes': 'localhost'
|
'controller_nodes': 'localhost'
|
||||||
@@ -125,14 +100,6 @@ class TestEngineTestCase(test.TestCase):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.fail("Unexpected exception in test config" +
|
self.fail("Unexpected exception in test config" +
|
||||||
"verification: %s" % str(e))
|
"verification: %s" % str(e))
|
||||||
self.assertRaises(exceptions.NoSuchVerificationTest,
|
|
||||||
engine.TestEngine,
|
|
||||||
self.invalid_test_config_bad_test_name,
|
|
||||||
mock.MagicMock())
|
|
||||||
self.assertRaises(exceptions.InvalidConfigException,
|
|
||||||
engine.TestEngine,
|
|
||||||
self.invalid_test_config_bad_key,
|
|
||||||
mock.MagicMock())
|
|
||||||
self.assertRaises(exceptions.InvalidConfigException,
|
self.assertRaises(exceptions.InvalidConfigException,
|
||||||
engine.TestEngine,
|
engine.TestEngine,
|
||||||
self.invalid_test_config_bad_execution_type,
|
self.invalid_test_config_bad_execution_type,
|
||||||
@@ -154,56 +121,37 @@ class TestEngineTestCase(test.TestCase):
|
|||||||
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
||||||
mock.MagicMock())
|
mock.MagicMock())
|
||||||
with tester.bind(self.valid_cloud_config):
|
with tester.bind(self.valid_cloud_config):
|
||||||
self.assertTrue(os.path.exists(tester.cloud_config_path))
|
self.assertEqual(tester.endpoints,
|
||||||
self.assertFalse(os.path.exists(tester.cloud_config_path))
|
self.valid_cloud_config['identity'])
|
||||||
|
|
||||||
@mock.patch('rally.benchmark.utils.Verifier.run')
|
|
||||||
def test_verify(self, mock_run):
|
|
||||||
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
|
||||||
mock.MagicMock())
|
|
||||||
mock_run.return_value = self.run_success
|
|
||||||
with tester.bind(self.valid_cloud_config):
|
|
||||||
try:
|
|
||||||
tester.verify()
|
|
||||||
except Exception as e:
|
|
||||||
self.fail("Exception in TestEngine.verify: %s" % str(e))
|
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
||||||
@mock.patch("rally.benchmark.utils.osclients")
|
@mock.patch("rally.benchmark.utils.osclients")
|
||||||
def test_benchmark(self, mock_osclients, mock_run):
|
def test_run(self, mock_osclients, mock_run):
|
||||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||||
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
||||||
mock.MagicMock())
|
mock.MagicMock())
|
||||||
with tester.bind(self.valid_cloud_config):
|
with tester.bind(self.valid_cloud_config):
|
||||||
tester.benchmark()
|
tester.run()
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
||||||
@mock.patch("rally.benchmark.utils.Verifier.run")
|
|
||||||
@mock.patch("rally.benchmark.utils.osclients")
|
@mock.patch("rally.benchmark.utils.osclients")
|
||||||
def test_task_status_basic_chain(self, mock_osclients, mock_run,
|
def test_task_status_basic_chain(self, mock_osclients, mock_scenario_run):
|
||||||
mock_scenario_run):
|
|
||||||
fake_task = mock.MagicMock()
|
fake_task = mock.MagicMock()
|
||||||
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
||||||
fake_task)
|
fake_task)
|
||||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||||
mock_run.return_value = self.run_success
|
|
||||||
mock_scenario_run.return_value = {}
|
mock_scenario_run.return_value = {}
|
||||||
with tester.bind(self.valid_cloud_config):
|
with tester.bind(self.valid_cloud_config):
|
||||||
tester.verify()
|
tester.run()
|
||||||
tester.benchmark()
|
|
||||||
|
|
||||||
benchmark_name = 'NovaServers.boot_and_delete_server'
|
benchmark_name = 'NovaServers.boot_and_delete_server'
|
||||||
benchmark_results = {
|
benchmark_results = {
|
||||||
'name': benchmark_name, 'pos': 0,
|
'name': benchmark_name, 'pos': 0,
|
||||||
'kw': self.valid_test_config_continuous_times['benchmark']
|
'kw': self.valid_test_config_continuous_times[benchmark_name][0],
|
||||||
[benchmark_name][0],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s = consts.TaskStatus
|
s = consts.TaskStatus
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.update_status(s.TEST_TOOL_VERIFY_OPENSTACK),
|
|
||||||
mock.call.update_verification_log(json.dumps(
|
|
||||||
[self.run_success, self.run_success])),
|
|
||||||
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
||||||
mock.call.append_results(benchmark_results, {'raw': {}}),
|
mock.call.append_results(benchmark_results, {'raw': {}}),
|
||||||
mock.call.update_status(s.FINISHED)
|
mock.call.update_status(s.FINISHED)
|
||||||
@@ -214,28 +162,21 @@ class TestEngineTestCase(test.TestCase):
|
|||||||
self.assertEqual(mock_calls, expected)
|
self.assertEqual(mock_calls, expected)
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
@mock.patch("rally.benchmark.utils.ScenarioRunner.run")
|
||||||
@mock.patch("rally.benchmark.utils.Verifier.run")
|
|
||||||
@mock.patch("rally.benchmark.utils.osclients")
|
@mock.patch("rally.benchmark.utils.osclients")
|
||||||
def test_task_status_failed(self, mock_osclients, mock_run,
|
def test_task_status_failed(self, mock_osclients, mock_scenario_run):
|
||||||
mock_scenario_run):
|
|
||||||
fake_task = mock.MagicMock()
|
fake_task = mock.MagicMock()
|
||||||
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
tester = engine.TestEngine(self.valid_test_config_continuous_times,
|
||||||
fake_task)
|
fake_task)
|
||||||
mock_osclients.Clients.return_value = fakes.FakeClients()
|
mock_osclients.Clients.return_value = fakes.FakeClients()
|
||||||
mock_run.return_value = self.run_success
|
|
||||||
mock_scenario_run.side_effect = exceptions.TestException()
|
mock_scenario_run.side_effect = exceptions.TestException()
|
||||||
try:
|
try:
|
||||||
with tester.bind(self.valid_cloud_config):
|
with tester.bind(self.valid_cloud_config):
|
||||||
tester.verify()
|
tester.run()
|
||||||
tester.benchmark()
|
|
||||||
except exceptions.TestException:
|
except exceptions.TestException:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
s = consts.TaskStatus
|
s = consts.TaskStatus
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.update_status(s.TEST_TOOL_VERIFY_OPENSTACK),
|
|
||||||
mock.call.update_verification_log(json.dumps(
|
|
||||||
[self.run_success, self.run_success])),
|
|
||||||
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
|
||||||
mock.call.update_status(s.FAILED)
|
mock.call.update_status(s.FAILED)
|
||||||
]
|
]
|
||||||
@@ -243,15 +184,3 @@ class TestEngineTestCase(test.TestCase):
|
|||||||
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
||||||
fake_task.mock_calls)
|
fake_task.mock_calls)
|
||||||
self.assertEqual(mock_calls, expected)
|
self.assertEqual(mock_calls, expected)
|
||||||
|
|
||||||
def test_task_status_invalid_config(self):
|
|
||||||
fake_task = mock.MagicMock()
|
|
||||||
try:
|
|
||||||
engine.TestEngine(self.invalid_test_config_bad_key, fake_task)
|
|
||||||
except exceptions.InvalidConfigException:
|
|
||||||
pass
|
|
||||||
expected = []
|
|
||||||
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
|
|
||||||
mock_calls = filter(lambda call: '__getitem__' not in call[0],
|
|
||||||
fake_task.mock_calls)
|
|
||||||
self.assertEqual(mock_calls, expected)
|
|
||||||
|
@@ -16,11 +16,7 @@
|
|||||||
"""Tests for utils."""
|
"""Tests for utils."""
|
||||||
import mock
|
import mock
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
|
|
||||||
from rally.benchmark import config
|
|
||||||
from rally.benchmark import utils
|
from rally.benchmark import utils
|
||||||
from rally import test
|
from rally import test
|
||||||
from tests import fakes
|
from tests import fakes
|
||||||
@@ -435,66 +431,3 @@ class ScenarioTestCase(test.TestCase):
|
|||||||
for image in nova.images.list():
|
for image in nova.images.list():
|
||||||
self.assertEqual("DELETED", image.status,
|
self.assertEqual("DELETED", image.status,
|
||||||
"image not purged: %s" % (image))
|
"image not purged: %s" % (image))
|
||||||
|
|
||||||
|
|
||||||
def test_dummy_1():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def test_dummy_2():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def test_dummy_timeout():
|
|
||||||
time.sleep(1.1)
|
|
||||||
|
|
||||||
|
|
||||||
class VerifierTestCase(test.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(VerifierTestCase, self).setUp()
|
|
||||||
self.cloud_config_manager = config.CloudConfigManager()
|
|
||||||
self.cloud_config_fd, self.cloud_config_path = tempfile.mkstemp(
|
|
||||||
suffix='rallycfg', text=True)
|
|
||||||
with os.fdopen(self.cloud_config_fd, 'w') as f:
|
|
||||||
self.cloud_config_manager.write(f)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
if os.path.exists(self.cloud_config_path):
|
|
||||||
os.remove(self.cloud_config_path)
|
|
||||||
super(VerifierTestCase, self).tearDown()
|
|
||||||
|
|
||||||
def test_running_test(self):
|
|
||||||
verifier = utils.Verifier(mock.MagicMock(), self.cloud_config_path)
|
|
||||||
with mock.patch('rally.benchmark.utils.fuel_cleanup.cleanup'):
|
|
||||||
test = ['./tests/benchmark/test_utils.py', '-k', 'test_dummy_1']
|
|
||||||
result = verifier.run(test)
|
|
||||||
self.assertEqual(result['status'], 0)
|
|
||||||
|
|
||||||
def test_running_multiple_tests(self):
|
|
||||||
verifier = utils.Verifier(mock.MagicMock(), self.cloud_config_path)
|
|
||||||
tests_dict = {
|
|
||||||
'test1': ['./tests/benchmark/test_utils.py', '-k', 'test_dummy_1'],
|
|
||||||
'test2': ['./tests/benchmark/test_utils.py', '-k', 'test_dummy_2']
|
|
||||||
}
|
|
||||||
with mock.patch('rally.benchmark.utils.fuel_cleanup.cleanup'):
|
|
||||||
for result in verifier.run_all(tests_dict):
|
|
||||||
self.assertEqual(result['status'], 0)
|
|
||||||
|
|
||||||
def test_verifier_timeout(self):
|
|
||||||
verifier = utils.Verifier(mock.MagicMock(), self.cloud_config_path)
|
|
||||||
test = ['./tests/benchmark/test_utils.py', '-k',
|
|
||||||
'test_dummy_timeout', '--timeout', '1']
|
|
||||||
with mock.patch('rally.benchmark.utils.fuel_cleanup.cleanup'):
|
|
||||||
result = verifier.run(test)
|
|
||||||
self.assertTrue('Timeout' in result['msg'])
|
|
||||||
self.assertTrue(result['status'] != 0)
|
|
||||||
|
|
||||||
def test_verifier_no_timeout(self):
|
|
||||||
verifier = utils.Verifier(mock.MagicMock(), self.cloud_config_path)
|
|
||||||
test = ['./tests/benchmark/test_utils.py', '-k',
|
|
||||||
'test_dummy_timeout', '--timeout', '2']
|
|
||||||
with mock.patch('rally.benchmark.utils.fuel_cleanup.cleanup'):
|
|
||||||
result = verifier.run(test)
|
|
||||||
self.assertTrue('Timeout' not in result['msg'])
|
|
||||||
self.assertTrue(result['status'] == 0)
|
|
||||||
|
@@ -40,22 +40,19 @@ FAKE_DEPLOY_CONFIG = {
|
|||||||
|
|
||||||
|
|
||||||
FAKE_TASK_CONFIG = {
|
FAKE_TASK_CONFIG = {
|
||||||
'verify': ['fake_test'],
|
'FakeScenario.fake': [
|
||||||
'benchmark': {
|
{
|
||||||
'FakeScenario.fake': [
|
'args': {},
|
||||||
{
|
'execution': 'continuous',
|
||||||
'args': {},
|
'config': {
|
||||||
'execution': 'continuous',
|
'timeout': 10000,
|
||||||
'config': {
|
'times': 1,
|
||||||
'timeout': 10000,
|
'active_users': 1,
|
||||||
'times': 1,
|
'tenants': 1,
|
||||||
'active_users': 1,
|
'users_per_tenant': 1,
|
||||||
'tenants': 1,
|
}
|
||||||
'users_per_tenant': 1,
|
}
|
||||||
}
|
]
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -87,26 +84,17 @@ class APITestCase(test.TestCase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
@mock.patch('rally.benchmark.engine.utils.ScenarioRunner')
|
@mock.patch('rally.benchmark.engine.utils.ScenarioRunner')
|
||||||
@mock.patch('rally.benchmark.engine.utils.Verifier')
|
|
||||||
@mock.patch('rally.objects.deploy.db.deployment_get')
|
@mock.patch('rally.objects.deploy.db.deployment_get')
|
||||||
@mock.patch('rally.objects.task.db.task_result_create')
|
@mock.patch('rally.objects.task.db.task_result_create')
|
||||||
@mock.patch('rally.objects.task.db.task_update')
|
@mock.patch('rally.objects.task.db.task_update')
|
||||||
@mock.patch('rally.objects.task.db.task_create')
|
@mock.patch('rally.objects.task.db.task_create')
|
||||||
def test_start_task(self, mock_task_create, mock_task_update,
|
def test_start_task(self, mock_task_create, mock_task_update,
|
||||||
mock_task_result_create, mock_deploy_get,
|
mock_task_result_create, mock_deploy_get,
|
||||||
mock_utils_verifier, mock_utils_runner):
|
mock_utils_runner):
|
||||||
mock_task_create.return_value = self.task
|
mock_task_create.return_value = self.task
|
||||||
mock_task_update.return_value = self.task
|
mock_task_update.return_value = self.task
|
||||||
mock_deploy_get.return_value = self.deployment
|
mock_deploy_get.return_value = self.deployment
|
||||||
|
|
||||||
mock_utils_verifier.return_value = mock_verifier = mock.Mock()
|
|
||||||
mock_utils_verifier.list_verification_tests.return_value = {
|
|
||||||
'fake_test': mock.Mock(),
|
|
||||||
}
|
|
||||||
mock_verifier.run_all.return_value = [{
|
|
||||||
'status': 0,
|
|
||||||
}]
|
|
||||||
|
|
||||||
mock_utils_runner.return_value = mock_runner = mock.Mock()
|
mock_utils_runner.return_value = mock_runner = mock.Mock()
|
||||||
mock_runner.run.return_value = ['fake_result']
|
mock_runner.run.return_value = ['fake_result']
|
||||||
|
|
||||||
@@ -117,10 +105,6 @@ class APITestCase(test.TestCase):
|
|||||||
'deployment_uuid': self.deploy_uuid,
|
'deployment_uuid': self.deploy_uuid,
|
||||||
})
|
})
|
||||||
mock_task_update.assert_has_calls([
|
mock_task_update.assert_has_calls([
|
||||||
mock.call(self.task_uuid,
|
|
||||||
{'status': 'test_tool->verify_openstack'}),
|
|
||||||
mock.call(self.task_uuid,
|
|
||||||
{'verification_log': '[{"status": 0}]'}),
|
|
||||||
mock.call(self.task_uuid,
|
mock.call(self.task_uuid,
|
||||||
{'status': 'test_tool->benchmarking'})
|
{'status': 'test_tool->benchmarking'})
|
||||||
])
|
])
|
||||||
|
Reference in New Issue
Block a user