Switch scenarios plugins to plugin base

This patch finishes work on switching all plugin types
to the single code base. This reduce amount of code and unifies
all plugins which makes it simple to work on any kind of common
plugin functionality: Deprecation, Info, Validation and so on..
In this patch:
* Scenario.meta was removed and now MetaMixin is used
* ScenarioGroups are removed
  This reduce framework complexity. One thing less that you
  should know and understand to start.
* Scenario plugins can be explit set through configure()
  ** Unified with other kind of plugins
  ** Flexibility it is simpler to move and rename plugins
     with deprecation
* Remove all unused Scenario methods

* Improve rally.info substitution
it's okay if we specify each part of name of the plguin

Future steps (not in this patch) are:
* Refactor plugin info and reduce copy paste inside that module
* Refactor atomic action (make the work for context too)
* Refactor validation (make scenario validation generic enough
to work with all types of plugins)

Change-Id: Ibecb8006ebb5d670bcf4519e9f0d6a505e385a1a
This commit is contained in:
Boris Pavlovic 2015-08-18 16:29:08 -07:00
parent a065516bb6
commit 8f69f06fa0
20 changed files with 346 additions and 484 deletions

View File

@ -17,7 +17,7 @@
Samples:
$ rally info find create_meter_and_get_stats
$ rally info find CeilometerStats.create_meter_and_get_stats
CeilometerStats.create_meter_and_get_stats (benchmark scenario).
Test creating a meter and fetching its statistics.
@ -28,20 +28,6 @@ Samples:
- name_length: length of generated (random) part of meter name
- kwargs: contains optional arguments to create a meter
$ rally info find Authenticate
Authenticate (benchmark scenario group).
This class should contain authentication mechanism.
Benchmark scenarios:
---------------------------------------------------------
Name Description
---------------------------------------------------------
Authenticate.keystone
Authenticate.validate_cinder Check Cinder Client ...
Authenticate.validate_glance Check Glance Client ...
Authenticate.validate_heat Check Heat Client ...
$ rally info find some_non_existing_benchmark
Failed to find any docs for query: 'some_non_existing_benchmark'
@ -52,7 +38,6 @@ from __future__ import print_function
import inspect
from rally.cli import cliutils
from rally.common.plugin import discover
from rally.common import utils
from rally.deployment import engine
from rally.deployment.serverprovider import provider
@ -106,7 +91,7 @@ class InfoCommands(object):
def list(self):
"""List main entities in Rally for which rally info find works.
Lists benchmark scenario groups, deploy engines and server providers.
Lists task scenario groups, deploy engines and server providers.
"""
self.BenchmarkScenarios()
self.SLA()
@ -116,44 +101,29 @@ class InfoCommands(object):
@plugins.ensure_plugins_are_loaded
def BenchmarkScenarios(self):
"""Get information about benchmark scenarios available in Rally."""
def scenarios_filter(scenario_cls):
return any(scenario.Scenario.is_scenario(scenario_cls, m)
for m in dir(scenario_cls))
scenarios = self._get_descriptions(scenario.Scenario, scenarios_filter)
info = (self._make_header("Rally - Benchmark scenarios") +
scenarios = self._get_descriptions(scenario.Scenario)
info = (self._make_header("Rally - Task scenarios") +
"\n\n"
"Benchmark scenarios are what Rally actually uses to test "
"the performance of an OpenStack deployment.\nEach Benchmark "
"Task scenarios are what Rally actually uses to test "
"the performance of an OpenStack deployment.\nEach Task "
"scenario implements a sequence of atomic operations "
"(server calls) to simulate\ninteresing user/operator/"
"client activity in some typical use case, usually that of "
"a specific OpenStack\nproject. Iterative execution of this "
"sequence produces some kind of load on the target cloud.\n"
"Benchmark scenarios play the role of building blocks in "
"benchmark task configuration files."
"\n\n"
"Scenarios in Rally are put together in groups. Each "
"scenario group is concentrated on some specific \nOpenStack "
"functionality. For example, the 'NovaServers' scenario "
"group contains scenarios that employ\nseveral basic "
"operations available in Nova."
"Task scenarios play the role of building blocks in "
"task configuration files."
"\n\n" +
self._compose_table("List of Benchmark scenario groups",
scenarios) +
"To get information about benchmark scenarios inside "
"each scenario group, run:\n"
" $ rally info find <ScenarioGroupName>\n\n")
self._compose_table("List of Task scenarios", scenarios) +
"To get information about benchmark scenarios: "
" $ rally info find <scenario_name>\n\n")
print(info)
@plugins.ensure_plugins_are_loaded
def SLA(self):
"""Get information about SLA available in Rally."""
sla_descrs = self._get_descriptions(sla.SLA)
slas = self._get_descriptions(sla.SLA)
# NOTE(msdubov): Add config option names to the "Name" column
for i in range(len(sla_descrs)):
description = sla_descrs[i]
sla_cls = sla.SLA.get(description[0])
sla_descrs[i] = (sla_cls.get_name(), description[1])
info = (self._make_header("Rally - SLA checks "
"(Service-Level Agreements)") +
"\n\n"
@ -172,7 +142,7 @@ class InfoCommands(object):
" failure_rate:\n"
" max: 1"
"\n\n" +
self._compose_table("List of SLA checks", sla_descrs) +
self._compose_table("List of SLA checks", slas) +
"To get information about specific SLA checks, run:\n"
" $ rally info find <sla_check_name>\n")
print(info)
@ -245,12 +215,9 @@ class InfoCommands(object):
" $ rally info find <ServerProviderName>\n")
print(info)
def _get_descriptions(self, base_cls, subclass_filter=None):
def _get_descriptions(self, base_cls):
descriptions = []
subclasses = discover.itersubclasses(base_cls)
if subclass_filter:
subclasses = filter(subclass_filter, subclasses)
for entity in subclasses:
for entity in base_cls.get_all():
name = entity.get_name()
doc = utils.parse_docstring(entity.__doc__)
description = doc["short_description"] or ""
@ -259,65 +226,33 @@ class InfoCommands(object):
return descriptions
def _find_info(self, query):
return (self._get_scenario_group_info(query) or
self._get_scenario_info(query) or
return (self._get_scenario_info(query) or
self._get_sla_info(query) or
self._get_deploy_engine_info(query) or
self._get_server_provider_info(query))
def _find_substitution(self, query):
max_distance = min(3, len(query) / 4)
scenarios = scenario.Scenario.list_benchmark_scenarios()
scenario_groups = list(set(s.split(".")[0] for s in scenarios))
scenario_methods = list(set(s.split(".")[1] for s in scenarios))
scenarios = [s.get_name() for s in scenario.Scenario.get_all()]
sla_info = [cls.get_name() for cls in sla.SLA.get_all()]
deploy_engines = [cls.get_name() for cls in
engine.Engine.get_all()]
server_providers = [cls.get_name() for cls in
provider.ProviderFactory.get_all()]
candidates = (scenarios + scenario_groups + scenario_methods +
sla_info + deploy_engines + server_providers)
candidates = (scenarios + sla_info + deploy_engines + server_providers)
suggestions = []
# NOTE(msdubov): Incorrect query may either have typos or be truncated.
for candidate in candidates:
if ((utils.distance(query, candidate) <= max_distance or
candidate.startswith(query))):
query.lower() in candidate.lower())):
suggestions.append(candidate)
return suggestions
def _get_scenario_group_info(self, query):
try:
scenario_group = scenario.Scenario.get_by_name(query)
if not any(scenario.Scenario.is_scenario(scenario_group, m)
for m in dir(scenario_group)):
return None
info = self._make_header("%s (benchmark scenario group)" %
scenario_group.get_name())
info += "\n\n"
info += utils.format_docstring(scenario_group.__doc__)
scenarios = scenario_group.list_benchmark_scenarios()
descriptions = []
for scenario_name in scenarios:
cls, method_name = scenario_name.split(".")
if hasattr(scenario_group, method_name):
scenario_inst = getattr(scenario_group, method_name)
doc = utils.parse_docstring(scenario_inst.__doc__)
descr = doc["short_description"] or ""
descriptions.append((scenario_name, descr))
info += self._compose_table("Benchmark scenarios", descriptions)
return info
except exceptions.NoSuchScenario:
return None
def _get_scenario_info(self, query):
try:
scenario_inst = scenario.Scenario.get_scenario_by_name(query)
scenario_gr_name = utils.get_method_class(scenario_inst).get_name()
header = ("%(scenario_group)s.%(scenario_name)s "
"(benchmark scenario)" %
{"scenario_group": scenario_gr_name,
"scenario_name": scenario_inst.__name__})
scenario_inst = scenario.Scenario.get(query)
header = "%s (task scenario)" % scenario_inst.get_name()
info = self._make_header(header)
info += "\n\n"
doc = utils.parse_docstring(scenario_inst.__doc__)
@ -347,7 +282,7 @@ class InfoCommands(object):
if doc["returns"]:
info += "Returns: %s" % doc["returns"]
return info
except exceptions.NoSuchScenario:
except exceptions.PluginNotFound:
return None
def _get_sla_info(self, query):

View File

@ -61,6 +61,11 @@ class MetaMixin(object):
"""Initialize meta for this class."""
cls._meta = {}
@classmethod
def _meta_clear(cls):
cls._meta.clear() # NOTE(boris-42): make sure that meta is deleted
delattr(cls, "_meta")
@classmethod
def _meta_is_inited(cls, raise_exc=True):
"""Check if meta is initialized.
@ -93,3 +98,9 @@ class MetaMixin(object):
"""Set value for key in meta."""
cls._meta_is_inited()
cls._meta[key] = value
@classmethod
def _meta_setdefault(cls, key, value):
"""Set default value for key in meta."""
cls._meta_is_inited()
cls._meta.setdefault(key, value)

View File

@ -130,7 +130,16 @@ class Plugin(meta.MetaMixin):
are in various namespaces.
"""
cls._meta_init()
cls._set_name_and_namespace(name, namespace)
return cls
@classmethod
def unregister(cls):
"""Removes all pluign meta information and makes it indiscoverable."""
cls._meta_clear()
@classmethod
def _set_name_and_namespace(cls, name, namespace):
try:
Plugin.get(name, namespace=namespace)
except exceptions.PluginNotFound:
@ -139,7 +148,6 @@ class Plugin(meta.MetaMixin):
else:
raise exceptions.PluginWithSuchNameExists(name=name,
namespace=namespace)
return cls
@classmethod
def _set_deprecated(cls, reason, rally_version):
@ -172,7 +180,8 @@ class Plugin(meta.MetaMixin):
if p.get_name() == name:
return p
raise exceptions.PluginNotFound(name=name, namespace=namespace)
raise exceptions.PluginNotFound(
name=name, namespace=namespace or "any of")
@classmethod
def get_all(cls, namespace=None):

View File

@ -117,17 +117,13 @@ class NotFoundException(RallyException):
class PluginNotFound(NotFoundException):
msg_fmt = _("There is no plugin with name: `%(name)s` in "
"`%(namespace)s` namespace.")
msg_fmt = _("There is no plugin with name: %(name)s in "
"%(namespace)s namespace.")
class PluginWithSuchNameExists(RallyException):
msg_fmt = _("Plugin with such name: `%(name)s` already exists in "
"`%(namespace)s` namespace")
class NoSuchScenario(NotFoundException):
msg_fmt = _("There is no benchmark scenario with name `%(name)s`.")
msg_fmt = _("Plugin with such name: %(name)s already exists in "
"%(namespace)s namespace")
class NoSuchConfigField(NotFoundException):

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import threading
import time
@ -179,7 +180,7 @@ class BenchmarkEngine(object):
@rutils.log_task_wrapper(LOG.info,
_("Task validation of scenarios names."))
def _validate_config_scenarios_name(self, config):
available = set(scenario.Scenario.list_benchmark_scenarios())
available = set(s.get_name() for s in scenario.Scenario.get_all())
specified = set(six.iterkeys(config))
if not specified.issubset(available):
@ -206,8 +207,8 @@ class BenchmarkEngine(object):
def _validate_config_semantic_helper(self, admin, user, name, pos,
deployment, kwargs):
try:
scenario.Scenario.validate(name, kwargs, admin=admin,
users=[user], deployment=deployment)
scenario.Scenario.validate(
name, kwargs, admin=admin, users=[user], deployment=deployment)
except exceptions.InvalidScenarioArgument as e:
kw = {"name": name, "pos": pos,
"config": kwargs, "reason": six.text_type(e)}
@ -265,7 +266,8 @@ class BenchmarkEngine(object):
return runner.ScenarioRunner.get(cfg["type"])(self.task, cfg)
def _prepare_context(self, ctx, name, endpoint):
scenario_context = scenario.Scenario.meta(name, "context")
scenario_context = copy.deepcopy(
scenario.Scenario.get(name)._meta_get("default_context"))
if self.existing_users and "users" not in ctx:
scenario_context.setdefault("existing_users", self.existing_users)
elif "users" not in ctx:

View File

@ -200,12 +200,12 @@ class ScenarioRunner(plugin.Plugin):
def run(self, name, context, args):
cls_name, method_name = name.split(".", 1)
cls = scenario.Scenario.get_by_name(cls_name)
cls = scenario.Scenario.get(name)._meta_get("cls_ref")
self.aborted.clear()
# NOTE(boris-42): processing @types decorators
args = types.preprocess(cls, method_name, context, args)
args = types.preprocess(name, context, args)
with rutils.Timer() as timer:
self._run_scenario(cls, method_name, context, args)

View File

@ -13,15 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import itertools
import random
import time
import six
from rally.common import costilius
from rally.common import log as logging
from rally.common.plugin import discover
from rally.common.plugin import plugin
from rally.common import utils
from rally import consts
from rally import exceptions
@ -31,23 +31,67 @@ from rally.task import functional
LOG = logging.getLogger(__name__)
def configure(context=None):
"""Make from plain python method benchmark.
def configure(name=None, namespace="default", context=None):
"""Make from plain python method task scenario plugin.
It sets 2 attributes to function:
is_scenario = True # that is used during discovering
func.context = context # default context for benchmark
:param context: Default benchmark context
:param name: Plugin name
:param namespace: Plugin namespace
:param context: Default task context that is created for this scenario.
If there are custom user specified contexts this one
will be updated by provided contexts.
"""
def wrapper(func):
func.is_scenario = True
func.context = context or {}
plugin.from_func(Scenario)(func)
func._meta_init()
if name:
func._set_name_and_namespace(name, namespace)
else:
func._meta_set("namespace", namespace)
func._meta_set("default_context", context or {})
return func
return wrapper
class Scenario(functional.FunctionalMixin):
class ConfigurePluginMeta(type):
"""Finish Scenario plugin configuration.
After @scenario.configure() is performed to cls.method, method.im_class is
pointing to FuncPlugin class instead of original cls. There is no way to
fix this, mostly because im_class is add to method when it's called via
cls, e.g. cls.method. Decorator is different case so there is no
information about cls. method._plugin is pointing to FuncPlugin that has
FuncPlugin pointer to method. What should be done is to set properly
FuncPluing.func_ref to the cls.method
This metaclass iterates over all cls methods and fix func_ref of FuncPlugin
class so func_ref will be cls.method instead of FuncPlugin.method.
Additionally this metaclass sets plugin names if they were not set explicit
via configure(). Default name is <cls_name>.<method_name>
As well we need to keep cls_ref inside of _meta because Python3 loves us.
Viva black magic and dirty hacks.
"""
def __init__(cls, name, bases, namespaces):
super(ConfigurePluginMeta, cls).__init__(name, bases, namespaces)
for name, field in six.iteritems(namespaces):
if callable(field) and hasattr(field, "_plugin"):
field._plugin._meta_set("cls_ref", cls)
if not field._meta_get("name", None):
field._set_name_and_namespace(
"%s.%s" % (cls.__name__, field.__name__),
field.get_namespace())
field._plugin.func_ref = getattr(
cls, field._plugin.func_ref.__name__)
@six.add_metaclass(ConfigurePluginMeta)
class Scenario(plugin.Plugin, functional.FunctionalMixin):
"""This is base class for any benchmark scenario.
You should create subclass of this class. And your test scenarios will
@ -68,60 +112,6 @@ class Scenario(functional.FunctionalMixin):
length = length or cls.RESOURCE_NAME_LENGTH
return utils.generate_random_name(prefix, length)
@staticmethod
def get_by_name(name):
"""Returns Scenario class by name."""
for scenario in discover.itersubclasses(Scenario):
if name == scenario.__name__:
return scenario
raise exceptions.NoSuchScenario(name=name)
# TODO(boris-42): Remove after switching to plugin base.
@classmethod
def get_name(cls):
return cls.__name__
@staticmethod
def get_scenario_by_name(name):
"""Return benchmark scenario method by name.
:param name: name of the benchmark scenario being searched for (either
a full name (e.g, 'NovaServers.boot_server') or just
a method name (e.g., 'boot_server')
:returns: function object
"""
if "." in name:
scenario_group, scenario_name = name.split(".", 1)
scenario_cls = Scenario.get_by_name(scenario_group)
if Scenario.is_scenario(scenario_cls, scenario_name):
return getattr(scenario_cls, scenario_name)
else:
for scenario_cls in discover.itersubclasses(Scenario):
if Scenario.is_scenario(scenario_cls, name):
return getattr(scenario_cls, name)
raise exceptions.NoSuchScenario(name=name)
@classmethod
def list_benchmark_scenarios(scenario_cls):
"""List all scenarios in the benchmark scenario class & its subclasses.
Returns the method names in format <Class name>.<Method name>, which
is used in the test config.
:param scenario_cls: the base class for searching scenarios in
:returns: List of strings
"""
scenario_classes = (list(discover.itersubclasses(scenario_cls)) +
[scenario_cls])
benchmark_scenarios = [
["%s.%s" % (scenario.__name__, func)
for func in dir(scenario) if Scenario.is_scenario(scenario, func)]
for scenario in scenario_classes
]
benchmark_scenarios_flattened = list(
itertools.chain.from_iterable(benchmark_scenarios))
return benchmark_scenarios_flattened
@staticmethod
def _validate_helper(validators, clients, config, deployment):
for validator in validators:
@ -138,7 +128,7 @@ class Scenario(functional.FunctionalMixin):
@classmethod
def validate(cls, name, config, admin=None, users=None, deployment=None):
"""Semantic check of benchmark arguments."""
validators = cls.meta(name, "validators", default=[])
validators = Scenario.get(name)._meta_get("validators", default=[])
if not validators:
return
@ -156,37 +146,6 @@ class Scenario(functional.FunctionalMixin):
for user in users:
cls._validate_helper(user_validators, user, config, deployment)
@staticmethod
def meta(cls, attr_name, method_name=None, default=None):
"""Extract the named meta information out of the scenario name.
:param cls: Scenario (sub)class or string of form 'class.method'
:param attr_name: Name of method attribute holding meta information.
:param method_name: Name of method queried for meta information.
:param default: Value returned if no meta information is attached.
:returns: Meta value bound to method attribute or default.
"""
if isinstance(cls, str):
cls_name, method_name = cls.split(".", 1)
cls = Scenario.get_by_name(cls_name)
method = getattr(cls, method_name)
return copy.deepcopy(getattr(method, attr_name, default))
@staticmethod
def is_scenario(cls, method_name):
"""Check whether a given method in scenario class is a scenario.
:param cls: scenario class
:param method_name: method name
:returns: True if the method is a benchmark scenario, False otherwise
"""
try:
getattr(cls, method_name)
except Exception:
return False
return Scenario.meta(cls, "is_scenario", method_name, default=False)
def sleep_between(self, min_sleep, max_sleep):
"""Performs a time.sleep() call for a random amount of seconds.
@ -276,8 +235,8 @@ class AtomicAction(utils.Timer):
atomic_action_iteration += 1
return name_template % atomic_action_iteration
def __exit__(self, type, value, tb):
super(AtomicAction, self).__exit__(type, value, tb)
if type is None:
def __exit__(self, type_, value, tb):
super(AtomicAction, self).__exit__(type_, value, tb)
if type_ is None:
self.scenario_instance._add_atomic_actions(self.name,
self.duration())

View File

@ -34,17 +34,16 @@ def set(**kwargs):
used to perform a transformation on the value of that key.
"""
def wrapper(func):
func.preprocessors = getattr(func, "preprocessors", {})
func.preprocessors.update(kwargs)
func._meta_setdefault("preprocessors", {})
func._meta_get("preprocessors").update(kwargs)
return func
return wrapper
def preprocess(cls, method_name, context, args):
def preprocess(name, context, args):
"""Run preprocessor on scenario arguments.
:param cls: class name of benchmark scenario
:param method_name: name of benchmark scenario method
:param name: Plugin name
:param context: dictionary object that must have admin and endpoint entries
:param args: args section of benchmark specification in rally task file
@ -52,9 +51,8 @@ def preprocess(cls, method_name, context, args):
and resource configuration
"""
preprocessors = scenario.Scenario.meta(cls, method_name=method_name,
attr_name="preprocessors",
default={})
preprocessors = scenario.Scenario.get(name)._meta_get("preprocessors",
default={})
clients = osclients.Clients(context["admin"]["endpoint"])
processed_args = copy.deepcopy(args)

View File

@ -58,7 +58,6 @@ def validator(fn):
:param kwargs: the keyword arguments of the decorator of the scenario
ex. @my_decorator(kwarg1="kwarg1"), then kwargs = {"kwarg1": "kwarg1"}
"""
@functools.wraps(fn)
def wrap_validator(config, clients, deployment):
# NOTE(amaretskiy): validator is successful by default
@ -69,9 +68,9 @@ def validator(fn):
# TODO(boris-42): remove this in future.
wrap_validator.permission = getattr(fn, "permission",
consts.EndpointPermission.USER)
if not hasattr(scenario, "validators"):
scenario.validators = []
scenario.validators.append(wrap_validator)
scenario._meta_setdefault("validators", [])
scenario._meta_get("validators").append(wrap_validator)
return scenario
return wrap_scenario

View File

@ -25,23 +25,13 @@ class InfoTestCase(unittest.TestCase):
super(InfoTestCase, self).setUp()
self.rally = utils.Rally()
def test_find_scenario_group(self):
output = self.rally("info find Dummy")
self.assertIn("(benchmark scenario group)", output)
self.assertIn("Dummy.dummy_exception", output)
self.assertIn("Dummy.dummy_random_fail_in_atomic", output)
def test_find_scenario_group_base_class(self):
# NOTE(msdubov): We shouldn't display info about base scenario classes
# containing no end-user scenarios
self.assertRaises(utils.RallyCliError, self.rally,
("info find CeilometerScenario"))
def test_find_scenario(self):
self.assertIn("(benchmark scenario)", self.rally("info find dummy"))
self.assertIn("(task scenario)",
self.rally("info find Dummy.dummy"))
def test_find_scenario_misspelling_typos(self):
self.assertIn("(benchmark scenario)", self.rally("info find dummi"))
self.assertIn("(task scenario)",
self.rally("info find Dummy.dummi"))
def test_find_sla(self):
expected = "failure_rate (SLA)"
@ -73,7 +63,7 @@ class InfoTestCase(unittest.TestCase):
def test_find_misspelling_truncated(self):
marker_string = ("NovaServers.boot_and_list_server "
"(benchmark scenario)")
"(task scenario)")
self.assertIn(marker_string,
self.rally("info find boot_and_list"))
@ -92,7 +82,6 @@ class InfoTestCase(unittest.TestCase):
def test_list(self):
output = self.rally("info list")
self.assertIn("Benchmark scenario groups:", output)
self.assertIn("NovaServers", output)
self.assertIn("SLA checks:", output)
self.assertIn("failure_rate", output)
@ -103,7 +92,6 @@ class InfoTestCase(unittest.TestCase):
def test_BenchmarkScenarios(self):
output = self.rally("info BenchmarkScenarios")
self.assertIn("Benchmark scenario groups:", output)
self.assertIn("NovaServers", output)
self.assertNotIn("NovaScenario", output)

View File

@ -16,16 +16,12 @@
import mock
from rally.cli.commands import info
from rally.deployment import engine
from rally.deployment.engines import existing as existing_cloud
from rally.deployment.serverprovider import provider
from rally.deployment.serverprovider.providers import (
existing as existing_servers)
from rally import exceptions
from rally.plugins.common.scenarios.dummy import dummy
from rally.plugins.common.sla import failure_rate
from rally.task import scenario
from rally.task import sla
from tests.unit import test
@ -43,28 +39,18 @@ class InfoCommandsTestCase(test.TestCase):
super(InfoCommandsTestCase, self).setUp()
self.info = info.InfoCommands()
@mock.patch(SCENARIO + ".get_by_name",
return_value=dummy.Dummy)
def test_find_dummy_scenario_group(self, mock_scenario_get_by_name):
query = "Dummy"
status = self.info.find(query)
mock_scenario_get_by_name.assert_called_once_with(query)
self.assertIsNone(status)
@mock.patch(SCENARIO + ".get_scenario_by_name",
return_value=dummy.Dummy.dummy)
def test_find_dummy_scenario(self, mock_scenario_get_scenario_by_name):
@mock.patch(SCENARIO + ".get", return_value=dummy.Dummy.dummy)
def test_find_dummy_scenario(self, mock_scenario_get):
query = "Dummy.dummy"
status = self.info.find(query)
mock_scenario_get_scenario_by_name.assert_called_once_with(query)
mock_scenario_get.assert_called_once_with(query)
self.assertIsNone(status)
@mock.patch(SCENARIO + ".get_scenario_by_name",
side_effect=exceptions.NoSuchScenario)
def test_find_failure_status(self, mock_scenario_get_scenario_by_name):
@mock.patch(SCENARIO + ".get", side_effect=exceptions.PluginNotFound)
def test_find_failure_status(self, mock_scenario_get):
query = "Dummy.non_existing"
status = self.info.find(query)
mock_scenario_get_scenario_by_name.assert_called_once_with(query)
mock_scenario_get.assert_called_once_with(query)
self.assertEqual(1, status)
@mock.patch(SLA + ".get", return_value=failure_rate.FailureRate)
@ -103,29 +89,28 @@ class InfoCommandsTestCase(test.TestCase):
mock_server_providers.assert_called_once_with()
self.assertIsNone(status)
@mock.patch(DISCOVER + ".itersubclasses", return_value=[dummy.Dummy])
def test_BenchmarkScenarios(self, mock_itersubclasses):
@mock.patch(SCENARIO + ".get_all", return_value=[dummy.Dummy.dummy])
def test_BenchmarkScenarios(self, mock_scenario_get_all):
status = self.info.BenchmarkScenarios()
mock_itersubclasses.assert_called_with(scenario.Scenario)
mock_scenario_get_all.assert_called_with()
self.assertIsNone(status)
@mock.patch(DISCOVER + ".itersubclasses",
return_value=[failure_rate.FailureRate])
def test_SLA(self, mock_itersubclasses):
@mock.patch(SLA + ".get_all", return_value=[failure_rate.FailureRate])
def test_SLA(self, mock_sla_get_all):
status = self.info.SLA()
mock_itersubclasses.assert_called_with(sla.SLA)
mock_sla_get_all.assert_called_with()
self.assertIsNone(status)
@mock.patch(DISCOVER + ".itersubclasses",
@mock.patch(ENGINE + ".get_all",
return_value=[existing_cloud.ExistingCloud])
def test_DeploymentEngines(self, mock_itersubclasses):
def test_DeploymentEngines(self, mock_engine_get_all):
status = self.info.DeploymentEngines()
mock_itersubclasses.assert_called_with(engine.Engine)
mock_engine_get_all.assert_called_with()
self.assertIsNone(status)
@mock.patch(DISCOVER + ".itersubclasses",
@mock.patch(PROVIDER + ".get_all",
return_value=[existing_servers.ExistingServers])
def test_ServerProviders(self, mock_itersubclasses):
def test_ServerProviders(self, mock_provider_factory_get_all):
status = self.info.ServerProviders()
mock_itersubclasses.assert_called_with(provider.ProviderFactory)
mock_provider_factory_get_all.assert_called_with()
self.assertIsNone(status)

View File

@ -32,6 +32,19 @@ class TestMetaMixinTestCase(test.TestCase):
self.assertTrue(Meta._meta_is_inited())
self.assertTrue(Meta._meta_is_inited(raise_exc=False))
def test_meta_clear(self):
class Meta(meta.MetaMixin):
pass
Meta._meta_init()
Meta._meta_set("aaa", 42)
meta_ref = Meta._meta
Meta._meta_clear()
self.assertRaises(AttributeError, getattr, Meta, "_meta")
self.assertEqual({}, meta_ref)
def test_meta_set_and_get(self):
class Meta(meta.MetaMixin):
@ -62,3 +75,16 @@ class TestMetaMixinTestCase(test.TestCase):
pass
self.assertRaises(ReferenceError, Meta._meta_set, "a", 1)
def test_meta_setdefault(self):
class Meta(meta.MetaMixin):
pass
self.assertRaises(ReferenceError, Meta._meta_setdefault, "any", 42)
Meta._meta_init()
Meta._meta_setdefault("any", 42)
self.assertEqual(42, Meta._meta_get("any"))
Meta._meta_setdefault("any", 2)
self.assertEqual(42, Meta._meta_get("any"))

View File

@ -116,6 +116,16 @@ class NotInitedPlugin(BasePlugin):
class PluginTestCase(test.TestCase):
def test_unregister(self):
@plugin.configure(name="test_some_temp_plugin")
class SomeTempPlugin(BasePlugin):
pass
SomeTempPlugin.unregister()
self.assertRaises(exceptions.PluginNotFound,
BasePlugin.get, "test_some_temp_plugin")
def test_get(self):
self.assertEqual(SomePlugin,
BasePlugin.get("test_some_plugin"))

View File

@ -59,13 +59,11 @@ class TaskSampleTestCase(test.TestCase):
else:
scenarios.update(task_config.keys())
# TODO(boris-42): We should refactor scenarios framework add "_" to
# all non-benchmark methods.. Then this test will pass.
missing = set(scenario.Scenario.list_benchmark_scenarios()) - scenarios
missing = set(s.get_name() for s in scenario.Scenario.get_all())
missing -= scenarios
# check missing scenario is not from plugin
missing = [s for s in list(missing)
if scenario.Scenario.get_by_name(s.split(".")[0]).
__module__.startswith("rally")]
if scenario.Scenario.get(s).__module__.startswith("rally")]
self.assertEqual(missing, [],
"These scenarios don't have samples: %s" % missing)

View File

@ -103,13 +103,18 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.scenario.Scenario")
def test__validate_config_scenarios_name(self, mock_scenario):
@mock.patch("rally.task.engine.scenario.Scenario.get_all")
def test__validate_config_scenarios_name(self, mock_scenario_get_all):
config = {
"a": [],
"b": []
}
mock_scenario.list_benchmark_scenarios.return_value = ["e", "b", "a"]
mock_scenario_get_all.return_value = [
mock.MagicMock(get_name=lambda: "e"),
mock.MagicMock(get_name=lambda: "b"),
mock.MagicMock(get_name=lambda: "a")
]
eng = engine.BenchmarkEngine(config, mock.MagicMock())
eng._validate_config_scenarios_name(config)
@ -333,10 +338,10 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertEqual(2, mock_log.exception.call_count)
@mock.patch("rally.task.engine.scenario.Scenario.meta")
def test__prepare_context(self, mock_scenario_meta):
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context(self, mock_scenario_get):
default_context = {"a": 1, "b": 2}
mock_scenario_meta.return_value = default_context
mock_scenario_get.return_value._meta_get.return_value = default_context
task = mock.MagicMock()
name = "a.benchmark"
context = {"b": 3, "c": 4}
@ -356,11 +361,14 @@ class BenchmarkEngineTestCase(test.TestCase):
"config": expected_context
}
self.assertEqual(result, expected_result)
mock_scenario_meta.assert_called_once_with(name, "context")
mock_scenario_get.assert_called_once_with(name)
mock_scenario_get.return_value._meta_get.assert_called_once_with(
"default_context"
)
@mock.patch("rally.task.engine.scenario.Scenario.meta")
def test__prepare_context_with_existing_users(self, mock_scenario_meta):
mock_scenario_meta.return_value = {}
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context_with_existing_users(self, mock_scenario_get):
mock_scenario_get.return_value._meta_get.return_value = {}
task = mock.MagicMock()
name = "a.benchmark"
context = {"b": 3, "c": 4}
@ -380,7 +388,9 @@ class BenchmarkEngineTestCase(test.TestCase):
"config": expected_context
}
self.assertEqual(result, expected_result)
mock_scenario_meta.assert_called_once_with(name, "context")
mock_scenario_get.assert_called_once_with(name)
mock_scenario_get.return_value._meta_get.assert_called_once_with(
"default_context")
class ResultConsumerTestCase(test.TestCase):

View File

@ -186,7 +186,7 @@ class ScenarioRunnerTestCase(test.TestCase):
self.assertEqual(list(runner_obj.result_queue), [])
cls_name, method_name = scenario_name.split(".", 1)
cls = scenario.Scenario.get_by_name(cls_name)
cls = scenario.Scenario.get(scenario_name)._meta_get("cls_ref")
expected_config_kwargs = {"image": 1, "flavor": 1}
runner_obj._run_scenario.assert_called_once_with(

View File

@ -20,7 +20,6 @@ import six
from rally import consts
from rally import exceptions
from rally.plugins.common.scenarios.dummy import dummy
from rally.task import context
from rally.task import scenario
from rally.task import validation
@ -28,39 +27,47 @@ from tests.unit import fakes
from tests.unit import test
class ScenarioConfigureTestCase(test.TestCase):
def test_configure(self):
@scenario.configure("test_configure", "testing")
def some_func():
pass
self.assertEqual("test_configure", some_func.get_name())
self.assertEqual("testing", some_func.get_namespace())
some_func.unregister()
def test_configure_default_name(self):
@scenario.configure(namespace="testing", context={"any": 42})
def some_func():
pass
self.assertIsNone(some_func._meta_get("name"))
self.assertEqual("testing", some_func.get_namespace())
self.assertEqual({"any": 42}, some_func._meta_get("default_context"))
some_func.unregister()
def test_configure_cls(self):
class ScenarioPluginCls(scenario.Scenario):
@scenario.configure(namespace="any", context={"any": 43})
def some(self):
pass
self.assertEqual("ScenarioPluginCls.some",
ScenarioPluginCls.some.get_name())
self.assertEqual("any", ScenarioPluginCls.some.get_namespace())
self.assertEqual({"any": 43},
ScenarioPluginCls.some._meta_get("default_context"))
ScenarioPluginCls.some.unregister()
class ScenarioTestCase(test.TestCase):
def test_get_by_name(self):
self.assertEqual(dummy.Dummy, scenario.Scenario.get_by_name("Dummy"))
def test_get_by_name_not_found(self):
self.assertRaises(exceptions.NoSuchScenario,
scenario.Scenario.get_by_name,
"non existing scenario")
def test_get_scenario_by_name(self):
scenario_method = scenario.Scenario.get_scenario_by_name("Dummy.dummy")
self.assertEqual(dummy.Dummy.dummy, scenario_method)
def test_get_scenario_by_name_shortened(self):
scenario_method = scenario.Scenario.get_scenario_by_name("dummy")
self.assertEqual(dummy.Dummy.dummy, scenario_method)
def test_get_scenario_by_name_shortened_not_found(self):
self.assertRaises(exceptions.NoSuchScenario,
scenario.Scenario.get_scenario_by_name,
"dumy")
def test_get_scenario_by_name_bad_group_name(self):
self.assertRaises(exceptions.NoSuchScenario,
scenario.Scenario.get_scenario_by_name,
"Dumy.dummy")
def test_get_scenario_by_name_bad_scenario_name(self):
self.assertRaises(exceptions.NoSuchScenario,
scenario.Scenario.get_scenario_by_name,
"Dummy.dumy")
def test__validate_helper(self):
validators = [
mock.MagicMock(return_value=validation.ValidationResult(True)),
@ -98,120 +105,78 @@ class ScenarioTestCase(test.TestCase):
scenario.Scenario._validate_helper,
validators, clients, args, "fake_uuid")
@mock.patch("rally.task.scenario.Scenario.get_by_name")
def test_validate__no_validators(self, mock_scenario_get_by_name):
@mock.patch("rally.task.scenario.Scenario.get")
def test_validate__no_validators(self, mock_scenario_get):
class FakeScenario(fakes.FakeScenario):
pass
class Testing(fakes.FakeScenario):
FakeScenario.do_it = mock.MagicMock()
FakeScenario.do_it.validators = []
mock_scenario_get_by_name.return_value = FakeScenario
@scenario.configure()
def validate__no_validators(self):
pass
scenario.Scenario.validate("FakeScenario.do_it", {"a": 1, "b": 2})
mock_scenario_get_by_name.assert_called_once_with("FakeScenario")
mock_scenario_get.return_value = Testing.validate__no_validators
scenario.Scenario.validate("Testing.validate__no_validators",
{"a": 1, "b": 2})
mock_scenario_get.assert_called_once_with(
"Testing.validate__no_validators")
Testing.validate__no_validators.unregister()
@mock.patch("rally.task.scenario.Scenario._validate_helper")
@mock.patch("rally.task.scenario.Scenario.get_by_name")
def test_validate__admin_validators(self, mock_scenario_get_by_name,
@mock.patch("rally.task.scenario.Scenario.get")
def test_validate__admin_validators(self, mock_scenario_get,
mock_scenario__validate_helper):
class FakeScenario(fakes.FakeScenario):
pass
class Testing(fakes.FakeScenario):
FakeScenario.do_it = mock.MagicMock()
mock_scenario_get_by_name.return_value = FakeScenario
@scenario.configure(namespace="testing")
def validate_admin_validators(self):
pass
mock_scenario_get.return_value = Testing.validate_admin_validators
validators = [mock.MagicMock(), mock.MagicMock()]
for validator in validators:
validator.permission = consts.EndpointPermission.ADMIN
FakeScenario.do_it.validators = validators
Testing.validate_admin_validators._meta_set(
"validators", validators)
deployment = mock.MagicMock()
args = {"a": 1, "b": 2}
scenario.Scenario.validate(
"FakeScenario.do_it", args, admin="admin", deployment=deployment)
scenario.Scenario.validate("Testing.validate_admin_validators",
args, admin="admin", deployment=deployment)
mock_scenario__validate_helper.assert_called_once_with(
validators, "admin", args, deployment)
Testing.validate_admin_validators.unregister()
@mock.patch("rally.task.scenario.Scenario._validate_helper")
@mock.patch("rally.task.scenario.Scenario.get_by_name")
def test_validate_user_validators(self, mock_scenario_get_by_name,
@mock.patch("rally.task.scenario.Scenario.get")
def test_validate_user_validators(self, mock_scenario_get,
mock_scenario__validate_helper):
class FakeScenario(fakes.FakeScenario):
pass
class Testing(fakes.FakeScenario):
FakeScenario.do_it = mock.MagicMock()
mock_scenario_get_by_name.return_value = FakeScenario
@scenario.configure()
def validate_user_validators(self):
pass
mock_scenario_get.return_value = Testing.validate_user_validators
validators = [mock.MagicMock(), mock.MagicMock()]
for validator in validators:
validator.permission = consts.EndpointPermission.USER
FakeScenario.do_it.validators = validators
Testing.validate_user_validators._meta_set("validators", validators)
args = {"a": 1, "b": 2}
scenario.Scenario.validate(
"FakeScenario.do_it", args, users=["u1", "u2"])
"Testing.validate_user_validators", args, users=["u1", "u2"])
mock_scenario__validate_helper.assert_has_calls([
mock.call(validators, "u1", args, None),
mock.call(validators, "u2", args, None)
])
def test_meta_string_returns_non_empty_list(self):
class MyFakeScenario(fakes.FakeScenario):
pass
attr_name = "preprocessors"
preprocessors = [mock.MagicMock(), mock.MagicMock()]
MyFakeScenario.do_it.__dict__[attr_name] = preprocessors
inst = MyFakeScenario()
self.assertEqual(inst.meta(cls="MyFakeScenario.do_it",
attr_name=attr_name), preprocessors)
def test_meta_class_returns_non_empty_list(self):
class MyFakeScenario(fakes.FakeScenario):
pass
attr_name = "preprocessors"
preprocessors = [mock.MagicMock(), mock.MagicMock()]
MyFakeScenario.do_it.__dict__[attr_name] = preprocessors
inst = MyFakeScenario()
self.assertEqual(inst.meta(cls=fakes.FakeScenario,
method_name="do_it",
attr_name=attr_name), preprocessors)
def test_meta_string_returns_empty_list(self):
empty_list = []
inst = fakes.FakeScenario()
self.assertEqual(inst.meta(cls="FakeScenario.do_it",
attr_name="foo", default=empty_list),
empty_list)
def test_meta_class_returns_empty_list(self):
empty_list = []
inst = fakes.FakeScenario()
self.assertEqual(inst.meta(cls=fakes.FakeScenario,
method_name="do_it", attr_name="foo",
default=empty_list),
empty_list)
def test_is_scenario_success(self):
self.assertTrue(scenario.Scenario.is_scenario(dummy.Dummy(), "dummy"))
def test_is_scenario_not_scenario(self):
self.assertFalse(scenario.Scenario.is_scenario(dummy.Dummy(),
"_random_fail_emitter"))
def test_is_scenario_non_existing(self):
self.assertFalse(scenario.Scenario.is_scenario(dummy.Dummy(),
"non_existing"))
Testing.validate_user_validators.unregister()
def test_sleep_between_invalid_args(self):
self.assertRaises(exceptions.InvalidArgumentsException,
@ -246,19 +211,10 @@ class ScenarioTestCase(test.TestCase):
self.assertEqual(scenario_inst.idle_duration(),
mock_uniform.return_value)
def test_context(self):
ctx = mock.MagicMock()
self.assertEqual(ctx, scenario.Scenario(context=ctx).context)
def test_scenario_context_are_valid(self):
scenarios = scenario.Scenario.list_benchmark_scenarios()
for name in scenarios:
cls_name, method_name = name.split(".", 1)
cls = scenario.Scenario.get_by_name(cls_name)
ctx = getattr(cls, method_name).context
for s in scenario.Scenario.get_all():
try:
context.ContextManager.validate(ctx)
context.ContextManager.validate(s._meta_get("default_context"))
except Exception:
print(traceback.format_exc())
self.assertTrue(False,

View File

@ -334,11 +334,12 @@ class NeutronNetworkResourceTypeTestCase(test.TestCase):
class PreprocessTestCase(test.TestCase):
@mock.patch("rally.task.types.scenario.Scenario.meta")
@mock.patch("rally.task.types.scenario.Scenario.get")
@mock.patch("rally.task.types.osclients")
def test_preprocess(self, mock_osclients, mock_scenario_meta):
cls = "some_class"
method_name = "method_name"
def test_preprocess(self, mock_osclients, mock_scenario_get):
name = "some_plugin"
context = {
"a": 1,
"b": 2,
@ -352,11 +353,14 @@ class PreprocessTestCase(test.TestCase):
def transform(cls, clients, resource_config):
return resource_config * 2
mock_scenario_meta.return_value = {"a": Preprocessor}
result = types.preprocess(cls, method_name, context, args)
mock_scenario_meta.assert_called_once_with(
cls, default={}, method_name=method_name,
attr_name="preprocessors")
mock_scenario_get.return_value._meta_get.return_value = {
"a": Preprocessor
}
result = types.preprocess(name, context, args)
mock_scenario_get.assert_called_once_with(name)
mock_scenario_get.return_value._meta_get.assert_called_once_with(
"preprocessors", default={})
mock_osclients.Clients.assert_called_once_with(
context["admin"]["endpoint"])
self.assertEqual({"a": 20, "b": 20}, result)

View File

@ -21,6 +21,7 @@ import mock
from novaclient import exceptions as nova_exc
import six
from rally.common.plugin import plugin
from rally import consts
from rally import exceptions
import rally.osclients
@ -34,41 +35,25 @@ MODULE = "rally.task.validation."
class ValidationUtilsTestCase(test.TestCase):
def _get_scenario_validators(self, func_, scenario_, reset=True):
"""Unwrap scenario validators created by validation.validator()."""
if reset:
if hasattr(scenario_, "validators"):
del scenario_.validators
scenario = validation.validator(func_)()(scenario_)
return scenario.validators
def test_validator(self):
failure = validation.ValidationResult(False)
func = lambda *args, **kv: kv
scenario = lambda: None
@plugin.from_func()
def scenario():
pass
# Check arguments passed to validator
wrap = validation.validator(func)
wrap_args = ["foo", "bar"]
wrap_kwargs = {"foo": "spam"}
wrap_scenario = wrap(*wrap_args, **wrap_kwargs)
wrap_validator = wrap_scenario(scenario)
validators = wrap_validator.validators
self.assertEqual(1, len(validators))
validator, = validators
self.assertEqual(wrap_kwargs, validator(None, None, None))
self.assertEqual(wrap_validator, scenario)
scenario._meta_init()
# Default result
func_success = lambda *a, **kv: None
validator, = self._get_scenario_validators(func_success, scenario)
self.assertTrue(validator(None, None, None).is_valid)
def validator_func(config, clients, deployment, a, b, c, d):
return (config, clients, deployment, a, b, c, d)
# Failure result
func_failure = lambda *a, **kv: failure
validator, = self._get_scenario_validators(func_failure, scenario)
self.assertFalse(validator(None, None, None).is_valid)
validator = validation.validator(validator_func)
self.assertEqual(scenario, validator("a", "b", "c", d=1)(scenario))
self.assertEqual(1, len(scenario._meta_get("validators")))
self.assertEqual(
("conf", "client", "deploy", "a", "b", "c", 1),
scenario._meta_get("validators")[0]("conf", "client", "deploy"))
@ddt.ddt
@ -76,11 +61,14 @@ class ValidatorsTestCase(test.TestCase):
def _unwrap_validator(self, validator, *args, **kwargs):
@validator(*args, **kwargs)
@plugin.from_func()
def func():
pass
return func.validators[0]
func._meta_init()
validator(*args, **kwargs)(func)
return func._meta_get("validators")[0]
def test_number_not_nullable(self):
validator = self._unwrap_validator(validation.number, param_name="n")

View File

@ -42,43 +42,31 @@ class DocstringsTestCase(test.TestCase):
def test_all_scenarios_have_docstrings(self):
ignored_params = ["self", "scenario_obj"]
for scenario_group in discover.itersubclasses(scenario.Scenario):
if scenario_group.__module__.startswith("tests."):
continue
for method in dir(scenario_group):
if scenario.Scenario.is_scenario(scenario_group, method):
scenario_inst = getattr(scenario_group, method)
scenario_name = scenario_group.__name__ + "." + method
self.assertIsNotNone(scenario_inst.__doc__,
"%s doensn't have a docstring." %
scenario_name)
doc = utils.parse_docstring(scenario_inst.__doc__)
short_description = doc["short_description"]
self.assertIsNotNone(short_description,
"Docstring for %s should have "
"at least a one-line description." %
scenario_name)
self.assertFalse(short_description.startswith("Test"),
"One-line description for %s "
"should be declarative and not start "
"with 'Test(s) ...'" % scenario_name)
params_count = scenario_inst.__code__.co_argcount
params = scenario_inst.__code__.co_varnames[:params_count]
documented_params = [p["name"] for p in doc["params"]]
for param in params:
if param not in ignored_params:
self.assertIn(param, documented_params,
"Docstring for %(scenario)s should "
"describe the '%(param)s' parameter "
"in the :param <name>: clause." %
{"scenario": scenario_name,
"param": param})
def test_all_scenario_groups_have_docstrings(self):
for scenario_group in discover.itersubclasses(scenario.Scenario):
self._assert_class_has_docstrings(scenario_group,
long_description=False)
for scenario_inst in scenario.Scenario.get_all():
self.assertIsNotNone(scenario_inst.__doc__,
"%s doensn't have a docstring." %
scenario_inst.get_name())
doc = utils.parse_docstring(scenario_inst.__doc__)
short_description = doc["short_description"]
self.assertIsNotNone(short_description,
"Docstring for %s should have "
"at least a one-line description." %
scenario_inst.get_name())
self.assertFalse(short_description.startswith("Test"),
"One-line description for %s "
"should be declarative and not start "
"with 'Test(s) ...'" % scenario_inst.get_name())
params_count = scenario_inst.__code__.co_argcount
params = scenario_inst.__code__.co_varnames[:params_count]
documented_params = [p["name"] for p in doc["params"]]
for param in params:
if param not in ignored_params:
self.assertIn(param, documented_params,
"Docstring for %(scenario)s should "
"describe the '%(param)s' parameter "
"in the :param <name>: clause." %
{"scenario": scenario_inst.get_name(),
"param": param})
def test_all_deploy_engines_have_docstrings(self):
for deploy_engine in engine.Engine.get_all():