Merge "Improve plugins and apply loading for runners and contexts plugins"

This commit is contained in:
Jenkins 2014-10-24 13:09:02 +00:00 committed by Gerrit Code Review
commit f37164082b
17 changed files with 294 additions and 110 deletions

View File

@ -0,0 +1,78 @@
from oslo.config import cfg
from rally.benchmark.context import base
from rally.openstack.common import log as logging
from rally import osclients
from rally import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class CreateFlavorContext(base.Context):
"""This sample create flavor with specified options before task starts and
delete it after task completion.
To create your own context plugin, inherit it from
rally.benchmark.context.base.Context
"""
__ctx_name__ = "create_flavor"
__ctx_order__ = 1000
__ctx_hidden__ = False
CONFIG_SCHEMA = {
"type": "object",
"$schema": utils.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"flavor_name": {
"type": "string",
},
"ram": {
"type": "integer",
"minimum": 1
},
"vcpus": {
"type": "integer",
"minimum": 1
},
"disk": {
"type": "integer",
"minimum": 1
}
}
}
def setup(self):
"""This method is called before task start"""
try:
# use rally.osclients to get nessesary client instance
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
# and than do what you need with this client
self.context["flavor"] = nova.flavors.create(
# context settings are stored in self.config
name=self.config.get("flavor_name", "rally_test_flavor"),
ram=self.config.get("ram", 1),
vcpus=self.config.get("vcpus", 1),
disk=self.config.get("disk", 1)).to_dict()
LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't create flavor: %s" % e.message
if CONF.debug:
LOG.exception(msg)
else:
LOG.warning(msg)
def cleanup(self):
"""This method is called after task finish"""
try:
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
nova.flavors.delete(self.context["flavor"]["id"])
LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't delete flavor: %s" % e.message
if CONF.debug:
LOG.exception(msg)
else:
LOG.warning(msg)

View File

@ -0,0 +1,23 @@
{
"Dummy.dummy": [
{
"args": {
"sleep": 0.01
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"create_flavor": {
"ram": 1024
}
}
}
]
}

View File

@ -0,0 +1,12 @@
---
Dummy.dummy:
-
args:
sleep: 0.01
runner:
type: "constant"
times: 5
concurrency: 1
context:
create_flavor:
ram: 512

View File

@ -0,0 +1,45 @@
import random
from rally.benchmark.runners import base
from rally import utils
class RandomTimesScenarioRunner(base.ScenarioRunner):
"""Sample of scenario runner plugin.
Run scenario random number of times, which is choosen between min_times and
max_times.
"""
__execution_type__ = "random_times"
CONFIG_SCHEMA = {
"type": "object",
"$schema": utils.JSON_SCHEMA,
"properties": {
"type": {
"type": "string"
},
"min_times": {
"type": "integer",
"minimum": 1
},
"max_times": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": True
}
def _run_scenario(self, cls, method_name, context, args):
# runners settings are stored in self.config
min_times = self.config.get('min_times', 1)
max_times = self.config.get('max_times', 1)
for i in range(random.randrange(min_times, max_times)):
run_args = (i, cls, method_name,
base._get_scenario_context(context), args)
result = base._run_scenario_once(run_args)
# use self.send_result for result of each iteration
self._send_result(result)

View File

@ -0,0 +1,17 @@
{
"Dummy.dummy": [
{
"runner": {
"type": "random_times",
"min_times": 10,
"max_times": 20,
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -0,0 +1,13 @@
---
Dummy.dummy:
-
args:
sleep: 2
runner:
type: "random_times"
min_times: 10
max_times: 20
context:
users:
tenants: 1
users_per_tenant: 1

View File

@ -0,0 +1,23 @@
from rally.benchmark.scenarios import base
class ScenarioPlugin(base.Scenario):
"""Sample of plugin which lists flavors"""
@base.atomic_action_timer("list_flavors")
def _list_flavors(self):
"""Sample of usage clients - list flavors
You can use self.context, self.admin_clients and self.clients which are
initialized on scenario instanse creation"""
self.clients("nova").flavors.list()
@base.atomic_action_timer("list_flavors_as_admin")
def _list_flavors_as_admin(self):
"""The same with admin clients"""
self.admin_clients("nova").flavors.list()
@base.scenario()
def list_flavors(self):
self._list_flavors()
self._list_flavors_as_admin()

View File

@ -0,0 +1,15 @@
{
"ScenarioPlugin.list_flavors": [
{
"runner": {
"type": "serial",
"times": 5,
},
"context": {
"create_flavor": {
"ram": 512,
}
}
}
]
}

View File

@ -0,0 +1,10 @@
---
ScenarioPlugin.list_flavors:
-
runner:
type: "serial"
times: 5
context:
users:
tenants: 1
users_per_tenant: 1

View File

@ -0,0 +1,11 @@
#!/bin/bash
samples_unpacked_dir="$(dirname "${BASH_SOURCE[0]}" )"
dirs=( $(find "$samples_unpacked_dir" -maxdepth 1 -type d -printf '%P\n') )
samples=~/.rally/plugins/samples
mkdir -p "$samples"
for dir in "${dirs[@]}"; do
cp -r $samples_unpacked_dir/$dir $samples
printf "\nTo test $dir plugin run next command:\n"
printf "rally task start --task $samples/$dir/test_$dir.yaml\n"
printf "or \nrally task start --task $samples/$dir/test_$dir.json\n"
done

View File

@ -30,7 +30,7 @@ The concept of **benchmark scenarios** is a central one in Rally. Benchmark scen
User's view User's view
^^^^^^^^^^^ ^^^^^^^^^^^
From user's point of view, Rally launches different benchmark scenarios while performing some benchmark task. **Benchmark task** is essentially a set of benchmark scenarios run against some OpenStack deployment in a specific (and customizable) manner by the CLI command: From the user's point of view, Rally launches different benchmark scenarios while performing some benchmark task. **Benchmark task** is essentially a set of benchmark scenarios run against some OpenStack deployment in a specific (and customizable) manner by the CLI command:
**rally task start --task=<task_config.json>** **rally task start --task=<task_config.json>**
@ -74,10 +74,11 @@ In this example, the task configuration file specifies two benchmarks to be run,
Note that inside each scenario configuration, the benchmark scenario is actually launched **3 times** (that is specified in the **"runner"** field). It can be specified in **"runner"** in more detail how exactly the benchmark scenario should be launched; we elaborate on that in the *"Sceario Runners"* section below. Note that inside each scenario configuration, the benchmark scenario is actually launched **3 times** (that is specified in the **"runner"** field). It can be specified in **"runner"** in more detail how exactly the benchmark scenario should be launched; we elaborate on that in the *"Sceario Runners"* section below.
.. _ScenariosDevelopment:
Developer's view Developer's view
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
From developer's prospective, a benchmark scenario is a method marked by a **@scenario** decorator and placed in a class that inherits from the base `Scenario <https://github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/base.py#L40>`_ class and located in some subpackage of `rally.benchmark.scenarios <https://github.com/stackforge/rally/tree/master/rally/benchmark/scenarios>`_. There may be arbitrary many benchmark scenarios in a scenario class; each of them should be referenced to (in the task configuration file) as *ScenarioClassName.method_name*. From the developer's perspective, a benchmark scenario is a method marked by a **@scenario** decorator and placed in a class that inherits from the base `Scenario <https://github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/base.py#L40>`_ class and located in some subpackage of `rally.benchmark.scenarios <https://github.com/stackforge/rally/tree/master/rally/benchmark/scenarios>`_. There may be arbitrary many benchmark scenarios in a scenario class; each of them should be referenced to (in the task configuration file) as *ScenarioClassName.method_name*.
In a toy example below, we define a scenario class *MyScenario* with one benchmark scenario *MyScenario.scenario*. This benchmark scenario tests the performance of a sequence of 2 actions, implemented via private methods in the same class. Both methods are marked with the **@atomic_action_timer** decorator. This allows Rally to handle those actions in a special way and, after benchmarks complete, show runtime statistics not only for the whole scenarios, but for separate actions as well. In a toy example below, we define a scenario class *MyScenario* with one benchmark scenario *MyScenario.scenario*. This benchmark scenario tests the performance of a sequence of 2 actions, implemented via private methods in the same class. Both methods are marked with the **@atomic_action_timer** decorator. This allows Rally to handle those actions in a special way and, after benchmarks complete, show runtime statistics not only for the whole scenarios, but for separate actions as well.
@ -160,6 +161,7 @@ The scenario running strategy is specified by its **type** and also by some type
Also, all scenario runners can be provided (again, through the **"runner"** section in the config file) with an optional *"timeout"* parameter, which specifies the timeout for each single benchmark scenario run (in seconds). Also, all scenario runners can be provided (again, through the **"runner"** section in the config file) with an optional *"timeout"* parameter, which specifies the timeout for each single benchmark scenario run (in seconds).
.. _RunnersDevelopment:
Developer's view Developer's view
^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
@ -220,7 +222,7 @@ The notion of **contexts** in Rally is essentially used to define different type
User's view User's view
^^^^^^^^^^^ ^^^^^^^^^^^
From user's prospective, contexts in Rally are manageable via the **task configuration files**. In a typical configuration file, each benchmark scenario to be run is not only supplied by the information about its arguments and how many times it should be launched, but also with a special **"context"** section. In this section, the user may configure a number of contexts he needs his scenarios to be run within. From the user's prospective, contexts in Rally are manageable via the **task configuration files**. In a typical configuration file, each benchmark scenario to be run is not only supplied by the information about its arguments and how many times it should be launched, but also with a special **"context"** section. In this section, the user may configure a number of contexts he needs his scenarios to be run within.
In the example below, the **"users" context** specifies that the *"NovaServers.boot_server"* scenario should be run from **1 tenant** having **3 users** in it. Bearing in mind that the default quota for the number of instances is 10 instances pro tenant, it is also reasonable to extend it to, say, **20 instances** in the **"quotas" context**. Otherwise the scenario would eventually fail, since it tries to boot a server 15 times from a single tenant. In the example below, the **"users" context** specifies that the *"NovaServers.boot_server"* scenario should be run from **1 tenant** having **3 users** in it. Bearing in mind that the default quota for the number of instances is 10 instances pro tenant, it is also reasonable to extend it to, say, **20 instances** in the **"quotas" context**. Otherwise the scenario would eventually fail, since it tries to boot a server 15 times from a single tenant.
@ -254,10 +256,11 @@ In the example below, the **"users" context** specifies that the *"NovaServers.b
} }
.. _ContextDevelopment:
Developer's view Developer's view
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
From developer's view, contexts management is implemented via **Context classes**. Each context type that can be specified in the task configuration file corresponds to a certain subclass of the base [https://github.com/stackforge/rally/blob/master/rally/benchmark/context/base.py **Context**] class, located in the [https://github.com/stackforge/rally/tree/master/rally/benchmark/context **rally.benchmark.context**] module. Every context class should implement a fairly simple **interface**: From the developer's view, contexts management is implemented via **Context classes**. Each context type that can be specified in the task configuration file corresponds to a certain subclass of the base [https://github.com/stackforge/rally/blob/master/rally/benchmark/context/base.py **Context**] class, located in the [https://github.com/stackforge/rally/tree/master/rally/benchmark/context **rally.benchmark.context**] module. Every context class should implement a fairly simple **interface**:
.. parsed-literal:: .. parsed-literal::
@ -316,92 +319,25 @@ The *__ctx_hidden__* attribute defines whether the context should be a *hidden*
If you want to dive deeper, also see the context manager (:mod:`rally.benchmark.context.base`) class that actually implements the algorithm described above. If you want to dive deeper, also see the context manager (:mod:`rally.benchmark.context.base`) class that actually implements the algorithm described above.
Scenarios Plugins Plugins
----------------- -------
Rally provides an opportunity to create and use a custom benchmark scenario as Rally provides an opportunity to create and use a custom benchmark scenario, runner or context as a plugin. The plugins mechanism can be used to simplify some experiments with new scenarios and to facilitate their creation by users who don't want to edit the actual Rally code.
a plugin. The plugins mechanism can be used to simplify some experiments with
new scenarios and to facilitate their creation by users who don't want to edit
the actual Rally code.
Placement Placement
^^^^^^^^^ ^^^^^^^^^
Put the plugin into the **/etc/rally/plugins/scenarios** or Put the plugin into the **/opt/rally/plugins** or **~/.rally/plugins** directory or it's subdirectories and it will be autoloaded. The corresponding module should have ".py" extension. Directories are not created automatically, you should create them by hand or you can use script **unpack_plugins_samles.sh** from **doc/samples/plugins** which will internally create directory **~/.rally/plugins** (see more about this script into **Samples** section).
**~/.rally/plugins/scenarios** directory and it will be autoloaded (they are
not created automatically, you should create them manually). The corresponding
module should have ".py" extension.
Creation Creation
^^^^^^^^ ^^^^^^^^
Inherit a class containing the scenario method(s) from Inherit a class for you plugin from base class for scenario, runner or context depends on what type of plugin you want create.
`rally.benchmark.scenarios.base.Scenario` or its subclasses.
Place every atomic action in separate function and wrap it with decorator
**atomic_action_timer** from `rally.benchmark.scenarios.utils`. Pass
action name as a string argument to decorator. This name should be unique for
every atomic action. It also will be used to show and store results.
Combine atomic actions into your benchmark method and wrap it with the
**scenario** decorator from `rally.benchmark.scenarios.base`.
Sample See more information about `scenarios <ScenariosDevelopment>`_, `runnres <RunnersDevelopment>`_ and `contexts <ContextDevelopment>`_ creation.
~~~~~~
You can run this sample to test whether the plugin has been loaded and
benchmark scenario results have been stored correctly.
::
import random
import time
from rally.benchmark.scenarios import base
class PluginClass(base.Scenario):
@base.atomic_action_timer("test1")
def _test1(self, factor):
time.sleep(random.random() * factor)
@base.atomic_action_timer("test2")
def _test2(self, factor):
time.sleep(random.random() * factor * 10)
@base.scenario()
def testplugin(self, factor=1):
self._test1(factor)
self._test2(factor)
Usage Usage
^^^^^ ^^^^^
Specify the class and the benchmark method of your plugin at the top level of Specify your plugin's information into a task configuration file. See `how to work with task configuration file <https://github.com/stackforge/rally/blob/master/doc/samples/tasks/README.rst>`_. You can find samples of configuration files for different types of plugins in corresponded folders `here <https://github.com/stackforge/rally/tree/master/doc/samples/plugins>`_.
the benchmark task configuration file.
If you need to pass some arguments to the benchmark method, place it in the
**args** section of the task configuration file.
Sample
~~~~~~
::
{
"PluginClass.testplugin": [
{
"args": {
"factor": 2
},
"runner": {
"type": "constant",
"times": 3,
"concurrency": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
}
}
}
]
}

View File

@ -22,5 +22,5 @@ rutils.import_modules_from_package("rally.benchmark.context")
rutils.import_modules_from_package("rally.benchmark.runners") rutils.import_modules_from_package("rally.benchmark.runners")
rutils.import_modules_from_package("rally.benchmark.scenarios") rutils.import_modules_from_package("rally.benchmark.scenarios")
rutils.load_plugins("/etc/rally/plugins/scenarios/") rutils.load_plugins("/opt/rally/plugins/")
rutils.load_plugins(os.path.expanduser("~/.rally/plugins/scenarios/")) rutils.load_plugins(os.path.expanduser("~/.rally/plugins/"))

View File

@ -264,6 +264,8 @@ def run(argv, categories):
break break
return(1) return(1)
try: try:
utils.load_plugins("/opt/rally/plugins/")
utils.load_plugins(os.path.expanduser("~/.rally/plugins/"))
ret = fn(*fn_args, **fn_kwargs) ret = fn(*fn_args, **fn_kwargs)
return(ret) return(ret)
except IOError as e: except IOError as e:

View File

@ -183,10 +183,11 @@ def log_verification_wrapper(log, msg, **kw):
def load_plugins(directory): def load_plugins(directory):
if os.path.exists(directory): if os.path.exists(directory):
plugins = (pl[:-3] for pl in os.listdir(directory) to_load = []
if pl.endswith(".py") and for root, dirs, files in os.walk(directory):
os.path.isfile(os.path.join(directory, pl))) to_load.extend((plugin[:-3], root)
for plugin in plugins: for plugin in files if plugin.endswith(".py"))
for plugin, directory in to_load:
fullpath = os.path.join(directory, plugin) fullpath = os.path.join(directory, plugin)
try: try:
fp, pathname, descr = imp.find_module(plugin, [directory]) fp, pathname, descr = imp.find_module(plugin, [directory])

View File

@ -19,7 +19,7 @@ SCENARIO=$BASE/new/$PROJECT/rally-scenarios/${RALLY_SCENARIO}.yaml
PLUGINS_DIR=$BASE/new/$PROJECT/rally-scenarios/plugins PLUGINS_DIR=$BASE/new/$PROJECT/rally-scenarios/plugins
EXTRA_DIR=$BASE/new/$PROJECT/rally-scenarios/extra EXTRA_DIR=$BASE/new/$PROJECT/rally-scenarios/extra
RALLY_PLUGINS_DIR=~/.rally/plugins/scenarios/ RALLY_PLUGINS_DIR=~/.rally/plugins
mkdir -p $RALLY_PLUGINS_DIR mkdir -p $RALLY_PLUGINS_DIR
if [ -d $PLUGINS_DIR ]; then if [ -d $PLUGINS_DIR ]; then

View File

@ -62,7 +62,11 @@ class TaskSampleTestCase(test.TestCase):
# TODO(boris-42): We should refactor scenarios framework add "_" to # TODO(boris-42): We should refactor scenarios framework add "_" to
# all non-benchmark methods.. Then this test will pass. # all non-benchmark methods.. Then this test will pass.
missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios
self.assertEqual(missing, set([]), # check missing scenario is not from plugin
missing = [scenario for scenario in list(missing) if
base.Scenario.get_by_name(scenario.split(".")[0]).
__module__.startswith("rally")]
self.assertEqual(missing, [],
"These scenarios don't have samples: %s" % missing) "These scenarios don't have samples: %s" % missing)
def test_json_correct_syntax(self): def test_json_correct_syntax(self):

View File

@ -172,31 +172,26 @@ class LogTestCase(test.TestCase):
class LoadExtraModulesTestCase(test.TestCase): class LoadExtraModulesTestCase(test.TestCase):
@mock.patch("rally.utils.imp.load_module") @mock.patch("rally.utils.imp.load_module")
@mock.patch("rally.utils.imp.find_module") @mock.patch("rally.utils.imp.find_module", return_value=(mock.MagicMock(),
None, None))
@mock.patch("rally.utils.os.walk", return_value=[
('/somewhere', ('/subdir', ), ('plugin1.py', )),
('/somewhere/subdir', ('/subsubdir', ), ('plugin2.py',
'withoutextension')),
('/somewhere/subdir/subsubdir', [], ('plugin3.py', ))])
@mock.patch("rally.utils.os.path.exists", return_value=True) @mock.patch("rally.utils.os.path.exists", return_value=True)
@mock.patch("rally.utils.os.path.isfile") def test_load_plugins_successfull(self, mock_exists,
@mock.patch("rally.utils.os.listdir") mock_oswalk, mock_find_module,
def test_load_plugins_successfull(self, mock_listdir, mock_isfile,
mock_exists, mock_find_module,
mock_load_module): mock_load_module):
mock_listdir.return_value = ["plugin1.py", "plugin2.py",
"somethingnotpythonmodule",
"somestrangedir.py"]
# check we don't try to load something that is not file
def isfile_side_effect(*args):
return args[0] != "/somewhere/somestrangedir.py"
mock_isfile.side_effect = isfile_side_effect
mock_find_module.return_value = (mock.MagicMock(), None, None)
test_path = "/somewhere" test_path = "/somewhere"
utils.load_plugins(test_path) utils.load_plugins(test_path)
expected = [ expected = [
mock.call("plugin1", ["/somewhere"]), mock.call("plugin1", ["/somewhere"]),
mock.call("plugin2", ["/somewhere"]) mock.call("plugin2", ["/somewhere/subdir"]),
mock.call("plugin3", ["/somewhere/subdir/subsubdir"])
] ]
self.assertEqual(mock_find_module.mock_calls, expected) self.assertEqual(mock_find_module.mock_calls, expected)
self.assertEqual(len(mock_load_module.mock_calls), 2) self.assertEqual(len(mock_load_module.mock_calls), 3)
@mock.patch("rally.utils.os") @mock.patch("rally.utils.os")
def test_load_plugins_from_nonexisting_and_empty_dir(self, mock_os): def test_load_plugins_from_nonexisting_and_empty_dir(self, mock_os):
@ -205,17 +200,16 @@ class LoadExtraModulesTestCase(test.TestCase):
utils.load_plugins("/somewhere") utils.load_plugins("/somewhere")
# test no fails for empty directory # test no fails for empty directory
mock_os.path.exists.return_value = True mock_os.path.exists.return_value = True
mock_os.listdir.return_value = [] mock_os.walk.return_value = []
utils.load_plugins("/somewhere") utils.load_plugins("/somewhere")
@mock.patch("rally.utils.imp.load_module") @mock.patch("rally.utils.imp.load_module", side_effect=Exception())
@mock.patch("rally.utils.imp.find_module") @mock.patch("rally.utils.imp.find_module")
@mock.patch("rally.utils.os.path.exists", return_value=True) @mock.patch("rally.utils.os.path", return_value=True)
@mock.patch("rally.utils.os.listdir") @mock.patch("rally.utils.os.walk", return_value=[('/etc/.rally/plugins',
def test_load_plugins_fails(self, mock_oslistdir, mock_ospath, [], ('load_it.py', ))])
def test_load_plugins_fails(self, mock_oswalk, mock_ospath,
mock_load_module, mock_find_module): mock_load_module, mock_find_module):
mock_oslistdir.return_value = ["somebrokenplugin.py", ]
mock_load_module.side_effect = Exception()
# test no fails if module is broken # test no fails if module is broken
# TODO(olkonami): check exception is handled correct # TODO(olkonami): check exception is handled correct
utils.load_plugins("/somwhere") utils.load_plugins("/somwhere")