Files
rally/rally/benchmark/engine.py
Boris Pavlovic d7635776d7 New task config and verification refactoring
*) Change task config format
   . Split "context" & "runner" stuff

*) Refactor Verification
   . Move validation to context.base, runner.base and scenario.base
   . Validate whole config fully before starting any of tasks
   .  Optimize scenario args validation (create only one time clients)
   . Optimize order of validation:
     1) Validate names of benchmarks
     2) Validate all static parameters, e.g. configuration of runner
        and context
     3) If everything is ok in all benchmarks, then start validation
        of scenario args.
   . Store validation result (exception) in task["verification_log"]
   . Remove verification logic from BenchmarkEngine.__exit__
   . Remove scenario args verification results from task["results"]

*) Fix & Swtich to new format doc/samples/tasks
   . Switch to new fromat
   . Add missing task configratuion
   . Better formatting
   . json & yaml samples

*) Refactored unit tests
   . tests.rally.benchmark.test_engine
   . tests.rally.benchmark.context.base
   . tests.orcestrator.test_api.start_task
       cover validation step as well and new change format

*) Refactor orchestrator api start task
   . Remove benchmark engine context
   . Call verify explicity
   . Do not raise any excpetion in case of validation error
   . Catch in start task any unexcepted Exceptions a set deployment
     in incosistance state

*) Refactor CLI
   . Properly handle new behaviour of verification
   . Replace table on task start to just message
   . Add HINTs to task detailed command

*) Add unit test for checking doc samples

*) Improve benchmark engine LOGing

blueprint benchmark-new-task-config

Change-Id: I23d3f6b3439fdb44946a7c2491d5a9b3559dc671
2014-03-17 17:19:53 +04:00

188 lines
7.6 KiB
Python

# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import jsonschema
import traceback
from rally.benchmark.context import base as base_ctx
from rally.benchmark.context import users as users_ctx
from rally.benchmark.runners import base as base_runner
from rally.benchmark.scenarios import base as base_scenario
from rally import consts
from rally import exceptions
from rally.objects import endpoint
from rally.openstack.common.gettextutils import _
from rally.openstack.common import log as logging
from rally import osclients
from rally import utils as rutils
LOG = logging.getLogger(__name__)
CONFIG_SCHEMA = {
"type": "object",
"$schema": "http://json-schema.org/draft-04/schema#",
"patternProperties": {
".*": {
"type": "array",
"items": {
"type": "object",
"properties": {
"args": {
"type": "object"
},
"runner": {
"type": "object",
"properties": {
"type": {"type": "string"}
},
"required": ["type"]
},
"context": {
"type": "object"
}
},
"additionalProperties": False
}
}
}
}
class BenchmarkEngine(object):
"""The Benchmark engine class, an instance of which is initialized by the
Orchestrator with the benchmarks configuration and then is used to execute
all specified benchmark scenarios.
.. note::
Typical usage:
...
benchmark_engine = BenchmarkEngine(config, task)
# Deploying the cloud...
# endpoint - is a dict with data on endpoint of deployed cloud
with benchmark_engine.bind(endpoints):
benchmark_engine.run()
"""
def __init__(self, config, task):
"""BenchmarkEngine constructor.
:param config: The configuration with specified benchmark scenarios
:param task: The current task which is being performed
"""
self.config = config
self.task = task
@rutils.log_task_wrapper(LOG.info,
_("Task validation of scenarios names."))
def _validate_config_scenarios_name(self, config):
available = set(base_scenario.Scenario.list_benchmark_scenarios())
specified = set(config.iterkeys())
if not specified.issubset(available):
names = ", ".join(specified - available)
raise exceptions.NotFoundScenarios(names=names)
@rutils.log_task_wrapper(LOG.info, _("Task validation of syntax."))
def _validate_config_syntax(self, config):
for scenario, values in config.iteritems():
for pos, kw in enumerate(values):
try:
base_runner.ScenarioRunner.validate(kw.get("runner", {}))
base_ctx.Context.validate(kw.get("context", {}))
except (exceptions.RallyException,
jsonschema.ValidationError) as e:
raise exceptions.InvalidBenchmarkConfig(name=scenario,
pos=pos, args=kw,
reason=e.message)
def _validate_config_sematic_helper(self, admin, user, name, pos, kwargs):
args = {} if not kwargs else kwargs.get("args", {})
try:
base_scenario.Scenario.validate(name, args, admin=admin,
users=[user])
except exceptions.InvalidScenarioArgument as e:
kw = {"name": name, "pos": pos, "args": args, "reason": e.message}
raise exceptions.InvalidBenchmarkConfig(**kw)
@rutils.log_task_wrapper(LOG.info, _("Task validation of semantic."))
def _validate_config_semantic(self, config):
# NOTE(boris-42): In future we will have more complex context, because
# we will have pre-created users mode as well.
context = {
"task": self.task,
"admin": {"endpoint": self.admin_endpoint}
}
with users_ctx.UserGenerator(context) as ctx:
ctx.setup()
admin = osclients.Clients(self.admin_endpoint)
user = osclients.Clients(context["users"][0]["endpoint"])
for name, values in config.iteritems():
for pos, kwargs in enumerate(values):
self._validate_config_sematic_helper(admin, user, name,
pos, kwargs)
@rutils.log_task_wrapper(LOG.info, _("Task validation."))
def validate(self):
"""Perform full task configuration validation."""
self.task.update_status(consts.TaskStatus.VERIFYING)
try:
jsonschema.validate(self.config, CONFIG_SCHEMA)
self._validate_config_scenarios_name(self.config)
self._validate_config_syntax(self.config)
self._validate_config_semantic(self.config)
except Exception as e:
log = [str(type(e)), str(e), json.dumps(traceback.format_exc())]
self.task.set_failed(log=log)
raise exceptions.InvalidTaskException(message=str(e))
@rutils.log_task_wrapper(LOG.info, _("Benchmarking."))
def run(self):
"""Runs the benchmarks according to the test configuration
the benchmark engine was initialized with.
:returns: List of dicts, each dict containing the results of all the
corresponding benchmark test launches
"""
self.task.update_status(consts.TaskStatus.RUNNING)
results = {}
for name in self.config:
for n, kwargs in enumerate(self.config[name]):
key = {'name': name, 'pos': n, 'kw': kwargs}
runner = kwargs.get("runner", {}).get("type", "continuous")
scenario_runner = base_runner.ScenarioRunner.get_runner(
self.task, self.endpoints, runner)
result = scenario_runner.run(name, kwargs)
self.task.append_results(key, {"raw": result})
results[json.dumps(key)] = result
self.task.update_status(consts.TaskStatus.FINISHED)
return results
@rutils.log_task_wrapper(LOG.info, _("Check cloud."))
def bind(self, endpoints):
self.endpoints = [endpoint.Endpoint(**endpoint_dict)
for endpoint_dict in endpoints]
# NOTE(msdubov): Passing predefined user endpoints hasn't been
# implemented yet, so the scenario runner always gets
# a single admin endpoint here.
self.admin_endpoint = self.endpoints[0]
self.admin_endpoint.permission = consts.EndpointPermission.ADMIN
# Try to access cloud via keystone client
clients = osclients.Clients(self.admin_endpoint)
clients.verified_keystone()
return self