diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index bb7122f1a1..e6d0548a6e 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -37,7 +37,7 @@ Changed
* *path_or_url* plugin follows redirects while validating urls now.
-* *rally task sla-check` fails if there is no data.
+* *rally task sla-check* fails if there is no data.
Deprecated
~~~~~~~~~~
@@ -45,6 +45,30 @@ Deprecated
* Module *rally.common.sshutils* is deprecated. Use *rally.utils.sshutils*
instead.
+* All modules from *rally.plugins.common.contexts* are deprecated. Use
+ *rally.plugins.task.contexts* instead.
+
+* All modules from *rally.plugins.common.exporters* are deprecated. Use
+ *rally.plugins.task.exporters* instead.
+
+* Module *rally.plugins.common.hook.sys_call* is deprecated. Use
+ *rally.plugins.task.hooks.sys_call* instead.
+
+* All modules from *rally.plugins.common.hook.triggers* are deprecated. Use
+ *rally.plugins.task.hook_triggers* instead.
+
+* All modules from *rally.plugins.common.runners* are deprecated. Use
+ *rally.plugins.task.runners* instead.
+
+* All modules from *rally.plugins.common.scenarios* are deprecated. Use
+ *rally.plugins.task.scenarios* instead.
+
+* All modules from *rally.plugins.common.sla* are deprecated. Use
+ *rally.plugins.task.sla* instead.
+
+* All modules from *rally.plugins.common.verification* are deprecated. Use
+ *rally.plugins.verification* instead.
+
Removed
~~~~~~~
diff --git a/rally/common/logging.py b/rally/common/logging.py
index dc4b8df854..f978fb8344 100644
--- a/rally/common/logging.py
+++ b/rally/common/logging.py
@@ -15,6 +15,7 @@
import functools
import traceback
+import warnings
from oslo_log import handlers
from oslo_log import log as oslogging
@@ -331,5 +332,13 @@ def log_deprecated_args(message, rally_version, deprecated_args,
return decorator
+def log_deprecated_module(target, new_module, release):
+ warnings.warn(
+ f"Module `{target}` moved to `{new_module}` since Rally v{release}. "
+ f"The import from old place is deprecated and may be removed in "
+ f"further releases."
+ )
+
+
def is_debug():
return CONF.debug or CONF.rally_debug
diff --git a/rally/common/sshutils.py b/rally/common/sshutils.py
index 605e4f475e..3951a89385 100644
--- a/rally/common/sshutils.py
+++ b/rally/common/sshutils.py
@@ -13,11 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from rally.utils.sshutils import * # noqa
+from rally.utils.sshutils import * # noqa: F401,F403
+from rally.utils import sshutils as _new
+# import it as last item to be sure that we use the right module
from rally.common import logging
-logging.getLogger(__name__).warning(
- f"Module {__name__} moved to rally.utils.sshutils. "
- f"Please correct your import."
+
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
)
diff --git a/rally/plugins/__init__.py b/rally/plugins/__init__.py
index 4e765a105d..5ea7975199 100644
--- a/rally/plugins/__init__.py
+++ b/rally/plugins/__init__.py
@@ -31,7 +31,15 @@ def load():
opts.register()
- discover.import_modules_from_package("rally.plugins.common")
+ # NOTE(andreykurilin): `rally.plugins.common` includes deprecated
+ # modules. As soon as they will be removed the direct import of
+ # validators should be replaced by
+ #
+ # discover.import_modules_from_package("rally.plugins.common")
+ from rally.plugins.common import validators # noqa: F401
+
+ discover.import_modules_from_package("rally.plugins.task")
+ discover.import_modules_from_package("rally.plugins.verification")
packages = discover.find_packages_by_entry_point()
for package in packages:
diff --git a/rally/plugins/common/exporters/elastic/client.py b/rally/plugins/common/exporters/elastic/client.py
index 36ecc676ac..0c798a7673 100755
--- a/rally/plugins/common/exporters/elastic/client.py
+++ b/rally/plugins/common/exporters/elastic/client.py
@@ -12,148 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-
-import requests
+from rally.plugins.task.exporters.elastic.client import * # noqa: F401,F403
+from rally.plugins.task.exporters.elastic import client as _new
+# import it as last item to be sure that we use the right module
from rally.common import logging
-from rally import exceptions
-
-LOG = logging.getLogger(__name__)
-class ElasticSearchClient(object):
- """The helper class for communication with ElasticSearch 2.*, 5.*, 6.*"""
-
- # a number of documents to push to the cluster at once.
- CHUNK_LENGTH = 10000
-
- def __init__(self, url):
- self._url = url.rstrip("/") if url else "http://localhost:9200"
- self._version = None
-
- @staticmethod
- def _check_response(resp, action=None):
- if resp.status_code in (200, 201):
- return
- # it is an error. let's try to find the reason
- reason = None
- try:
- data = resp.json()
- except ValueError:
- # it is ok
- pass
- else:
- if "error" in data:
- if isinstance(data["error"], dict):
- reason = data["error"].get("reason", "")
- else:
- reason = data["error"]
- reason = reason or resp.text or "n/a"
- action = action or "connect to"
- raise exceptions.RallyException(
- "[HTTP %s] Failed to %s ElasticSearch cluster: %s" %
- (resp.status_code, action, reason))
-
- def version(self):
- """Get version of the ElasticSearch cluster."""
- if self._version is None:
- self.info()
- return self._version
-
- def info(self):
- """Retrieve info about the ElasticSearch cluster."""
- resp = requests.get(self._url)
- self._check_response(resp)
- err_msg = "Failed to retrieve info about the ElasticSearch cluster: %s"
- try:
- data = resp.json()
- except ValueError:
- LOG.debug("Return data from %s: %s" % (self._url, resp.text))
- raise exceptions.RallyException(
- err_msg % "The return data doesn't look like a json.")
- version = data.get("version", {}).get("number")
- if not version:
- LOG.debug("Return data from %s: %s" % (self._url, resp.text))
- raise exceptions.RallyException(
- err_msg % "Failed to parse the received data.")
- self._version = version
- if self._version.startswith("2"):
- data["version"]["build_date"] = data["version"].pop(
- "build_timestamp")
- return data
-
- def push_documents(self, documents):
- """Push documents to the ElasticSearch cluster using bulk API.
-
- :param documents: a list of documents to push
- """
- LOG.debug("Pushing %s documents by chunks (up to %s documents at once)"
- " to ElasticSearch." %
- # dividing numbers by two, since each documents has 2 lines
- # in `documents` (action and document itself).
- (len(documents) / 2, self.CHUNK_LENGTH / 2))
-
- for pos in range(0, len(documents), self.CHUNK_LENGTH):
- data = "\n".join(documents[pos:pos + self.CHUNK_LENGTH]) + "\n"
-
- raw_resp = requests.post(
- self._url + "/_bulk", data=data,
- headers={"Content-Type": "application/x-ndjson"}
- )
- self._check_response(raw_resp, action="push documents to")
-
- LOG.debug("Successfully pushed %s documents." %
- len(raw_resp.json()["items"]))
-
- def list_indices(self):
- """List all indices."""
- resp = requests.get(self._url + "/_cat/indices?v")
- self._check_response(resp, "list the indices at")
-
- return resp.text.rstrip().split(" ")
-
- def create_index(self, name, doc_type, properties):
- """Create an index.
-
- There are two very different ways to search strings. You can either
- search whole values, that we often refer to as keyword search, or
- individual tokens, that we usually refer to as full-text search.
- In ElasticSearch 2.x `string` data type is used for these cases whereas
- ElasticSearch 5.0 the `string` data type was replaced by two new types:
- `keyword` and `text`. Since it is hard to predict the destiny of
- `string` data type and support of 2 formats of input data, the
- properties should be transmitted in ElasticSearch 5.x format.
- """
- if self.version().startswith("2."):
- properties = copy.deepcopy(properties)
- for spec in properties.values():
- if spec.get("type", None) == "text":
- spec["type"] = "string"
- elif spec.get("type", None) == "keyword":
- spec["type"] = "string"
- spec["index"] = "not_analyzed"
-
- resp = requests.put(
- self._url + "/%s" % name,
- json={"mappings": {doc_type: {"properties": properties}}})
- self._check_response(resp, "create index at")
-
- def check_document(self, index, doc_id, doc_type="data"):
- """Check for the existence of a document.
-
- :param index: The index of a document
- :param doc_id: The ID of a document
- :param doc_type: The type of a document (Defaults to data)
- """
- resp = requests.head("%(url)s/%(index)s/%(type)s/%(id)s" %
- {"url": self._url,
- "index": index,
- "type": doc_type,
- "id": doc_id})
- if resp.status_code == 200:
- return True
- elif resp.status_code == 404:
- return False
- else:
- self._check_response(resp, "check the index at")
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/elastic/exporter.py b/rally/plugins/common/exporters/elastic/exporter.py
index 6acdcf7934..dad7663681 100755
--- a/rally/plugins/common/exporters/elastic/exporter.py
+++ b/rally/plugins/common/exporters/elastic/exporter.py
@@ -12,375 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
-import datetime as dt
-import itertools
-import json
-import os
+from rally.plugins.task.exporters.elastic.exporter import * # noqa: F401,F403
+from rally.plugins.task.exporters.elastic import exporter as _new
+# import it as last item to be sure that we use the right module
from rally.common import logging
-from rally.common import validation
-from rally import consts
-from rally import exceptions
-from rally.plugins.common.exporters.elastic import client
-from rally.plugins.common.exporters.elastic import flatten
-from rally.task import exporter
-
-LOG = logging.getLogger(__name__)
-@validation.configure("es_exporter_destination")
-class Validator(validation.Validator):
- """Validates the destination for ElasticSearch exporter.
-
- In case when the destination is ElasticSearch cluster, the version of it
- should be 2.* or 5.*
- """
- def validate(self, context, config, plugin_cls, plugin_cfg):
- destination = plugin_cfg["destination"]
- if destination and (not destination.startswith("http://")
- and not destination.startswith("https://")):
- # it is a path to a local file
- return
- es = client.ElasticSearchClient(destination)
- try:
- version = es.version()
- except exceptions.RallyException as e:
- # re-raise a proper exception to hide redundant traceback
- self.fail(e.format_message())
- if not (version.startswith("2.")
- or version.startswith("5.")
- or version.startswith("6.")):
- self.fail("The unsupported version detected %s." % version)
-
-
-@validation.add("es_exporter_destination")
-@exporter.configure("elastic")
-class ElasticSearchExporter(exporter.TaskExporter):
- """Exports task results to the ElasticSearch 2.x, 5.x or 6.x clusters.
-
- The exported data includes:
-
- * Task basic information such as title, description, status,
- deployment uuid, etc.
- See rally_task_v1_data index.
-
- * Workload information such as scenario name and configuration, runner
- type and configuration, time of the start load, success rate, sla
- details in case of errors, etc.
- See rally_workload_v1_data index.
-
- * Separate documents for all atomic actions.
- See rally_atomic_action_data_v1 index.
-
- The destination can be a remote server. In this case specify it like:
-
- https://elastic:changeme@example.com
-
- Or we can dump documents to the file. The destination should look like:
-
- /home/foo/bar.txt
-
- In case of an empty destination, the http://localhost:9200 destination
- will be used.
- """
-
- TASK_INDEX = "rally_task_data_v1"
- WORKLOAD_INDEX = "rally_workload_data_v1"
- AA_INDEX = "rally_atomic_action_data_v1"
- INDEX_SCHEMAS = {
- TASK_INDEX: {
- "task_uuid": {"type": "keyword"},
- "deployment_uuid": {"type": "keyword"},
- "deployment_name": {"type": "keyword"},
- "title": {"type": "text"},
- "description": {"type": "text"},
- "status": {"type": "keyword"},
- "pass_sla": {"type": "boolean"},
- "tags": {"type": "keyword"}
- },
- WORKLOAD_INDEX: {
- "deployment_uuid": {"type": "keyword"},
- "deployment_name": {"type": "keyword"},
- "scenario_name": {"type": "keyword"},
- "scenario_cfg": {"type": "keyword"},
- "description": {"type": "text"},
- "runner_name": {"type": "keyword"},
- "runner_cfg": {"type": "keyword"},
- "contexts": {"type": "keyword"},
- "task_uuid": {"type": "keyword"},
- "subtask_uuid": {"type": "keyword"},
- "started_at": {"type": "date"},
- "load_duration": {"type": "long"},
- "full_duration": {"type": "long"},
- "pass_sla": {"type": "boolean"},
- "success_rate": {"type": "float"},
- "sla_details": {"type": "text"}
- },
- AA_INDEX: {
- "deployment_uuid": {"type": "keyword"},
- "deployment_name": {"type": "keyword"},
- "action_name": {"type": "keyword"},
- "workload_uuid": {"type": "keyword"},
- "scenario_cfg": {"type": "keyword"},
- "contexts": {"type": "keyword"},
- "runner_name": {"type": "keyword"},
- "runner_cfg": {"type": "keyword"},
- "success": {"type": "boolean"},
- "duration": {"type": "float"},
- "started_at": {"type": "date"},
- "finished_at": {"type": "date"},
- "parent": {"type": "keyword"},
- "error": {"type": "keyword"}
- }
- }
-
- def __init__(self, tasks_results, output_destination, api=None):
- super(ElasticSearchExporter, self).__init__(tasks_results,
- output_destination,
- api=api)
- self._report = []
- self._remote = (
- output_destination is None or (
- output_destination.startswith("http://")
- or self.output_destination.startswith("https://")))
- if self._remote:
- self._client = client.ElasticSearchClient(self.output_destination)
-
- def _add_index(self, index, body, doc_id=None, doc_type="data"):
- """Create a document for the specified index with specified id.
-
- :param index: The name of the index
- :param body: The document. Here is the report of (sla,
- scenario, iteration and atomic action)
- :param doc_id: Document ID. Here we use task/subtask/workload uuid
- :param doc_type: The type of document
-
- """
- self._report.append(
- json.dumps(
- # use OrderedDict to make the report more unified
- {"index": collections.OrderedDict([
- ("_index", index),
- ("_type", doc_type),
- ("_id", doc_id)])},
- sort_keys=False))
- self._report.append(json.dumps(body))
-
- def _ensure_indices(self):
- """Check available indices and create require ones if they missed."""
- available_index = set(self._client.list_indices())
- missed_index = {self.TASK_INDEX, self.WORKLOAD_INDEX,
- self.AA_INDEX} - available_index
- for index in missed_index:
- LOG.debug("Creating '%s' index." % index)
- self._client.create_index(index, doc_type="data",
- properties=self.INDEX_SCHEMAS[index])
-
- @staticmethod
- def _make_action_report(name, workload_id, workload, duration,
- started_at, finished_at, parent, error):
- # NOTE(andreykurilin): actually, this method just creates a dict object
- # but we need to have the same format at two places, so the template
- # transformed into a method.
- parent = parent[0] if parent else None
- return {
- "deployment_uuid": workload["deployment_uuid"],
- "deployment_name": workload["deployment_name"],
- "action_name": name,
- "workload_uuid": workload_id,
- "scenario_cfg": workload["scenario_cfg"],
- "contexts": workload["contexts"],
- "runner_name": workload["runner_name"],
- "runner_cfg": workload["runner_cfg"],
- "success": not bool(error),
- "duration": duration,
- "started_at": started_at,
- "finished_at": finished_at,
- "parent": parent,
- "error": error
- }
-
- def _process_atomic_actions(self, itr, workload, workload_id,
- atomic_actions=None, _parent=None, _depth=0,
- _cache=None):
- """Process atomic actions of an iteration
-
- :param atomic_actions: A list with an atomic actions
- :param itr: The iteration data
- :param workload: The workload report
- :param workload_id: The workload UUID
- :param _parent: An inner parameter which is used for pointing to the
- parent atomic action
- :param _depth: An inner parameter which is used to mark the level of
- depth while parsing atomic action children
- :param _cache: An inner parameter which is used to avoid conflicts in
- IDs of atomic actions of a single iteration.
- """
-
- if _depth >= 3:
- return
- cache = _cache or {}
-
- if atomic_actions is None:
- atomic_actions = itr["atomic_actions"]
-
- act_id_tmpl = "%(itr_id)s_action_%(action_name)s_%(num)s"
- for i, action in enumerate(atomic_actions, 1):
- cache.setdefault(action["name"], 0)
- act_id = act_id_tmpl % {
- "itr_id": itr["id"],
- "action_name": action["name"],
- "num": cache[action["name"]]}
- cache[action["name"]] += 1
-
- started_at = dt.datetime.utcfromtimestamp(action["started_at"])
- finished_at = dt.datetime.utcfromtimestamp(action["finished_at"])
- started_at = started_at.strftime(consts.TimeFormat.ISO8601)
- finished_at = finished_at.strftime(consts.TimeFormat.ISO8601)
-
- action_report = self._make_action_report(
- name=action["name"],
- workload_id=workload_id,
- workload=workload,
- duration=(action["finished_at"] - action["started_at"]),
- started_at=started_at,
- finished_at=finished_at,
- parent=_parent,
- error=(itr["error"] if action.get("failed", False) else None)
- )
-
- self._add_index(self.AA_INDEX, action_report,
- doc_id=act_id)
-
- self._process_atomic_actions(
- atomic_actions=action["children"],
- itr=itr,
- workload=workload,
- workload_id=workload_id,
- _parent=(act_id, action_report),
- _depth=(_depth + 1),
- _cache=cache)
-
- if itr["error"] and (
- # the case when it is a top level of the scenario and the
- # first fails the item which is not wrapped by AtomicTimer
- (not _parent and not atomic_actions)
- # the case when it is a top level of the scenario and and
- # the item fails after some atomic actions completed
- or (not _parent and atomic_actions
- and not atomic_actions[-1].get("failed", False))):
- act_id = act_id_tmpl % {
- "itr_id": itr["id"],
- "action_name": "no-name-action",
- "num": 0}
-
- # Since the action had not be wrapped by AtomicTimer, we cannot
- # make any assumption about it's duration (start_time) so let's use
- # finished_at timestamp of iteration with 0 duration
- timestamp = (itr["timestamp"] + itr["duration"]
- + itr["idle_duration"])
- timestamp = dt.datetime.utcfromtimestamp(timestamp)
- timestamp = timestamp.strftime(consts.TimeFormat.ISO8601)
- action_report = self._make_action_report(
- name="no-name-action",
- workload_id=workload_id,
- workload=workload,
- duration=0,
- started_at=timestamp,
- finished_at=timestamp,
- parent=_parent,
- error=itr["error"]
- )
- self._add_index(self.AA_INDEX, action_report, doc_id=act_id)
-
- def generate(self):
- if self._remote:
- self._ensure_indices()
-
- for task in self.tasks_results:
- if self._remote:
- if self._client.check_document(self.TASK_INDEX, task["uuid"]):
- raise exceptions.RallyException(
- "Failed to push the task %s to the ElasticSearch "
- "cluster. The document with such UUID already exists" %
- task["uuid"])
-
- task_report = {
- "task_uuid": task["uuid"],
- "deployment_uuid": task["env_uuid"],
- "deployment_name": task["env_name"],
- "title": task["title"],
- "description": task["description"],
- "status": task["status"],
- "pass_sla": task["pass_sla"],
- "tags": task["tags"]
- }
- self._add_index(self.TASK_INDEX, task_report,
- doc_id=task["uuid"])
-
- # NOTE(andreykurilin): The subtasks do not have much logic now, so
- # there is no reason to save the info about them.
- for workload in itertools.chain(
- *[s["workloads"] for s in task["subtasks"]]):
-
- durations = workload["statistics"]["durations"]
- success_rate = durations["total"]["data"]["success"]
- if success_rate == "n/a":
- success_rate = 0.0
- else:
- # cut the % char and transform to the float value
- success_rate = float(success_rate[:-1]) / 100.0
-
- started_at = workload["start_time"]
- if started_at:
- started_at = dt.datetime.utcfromtimestamp(started_at)
- started_at = started_at.strftime(consts.TimeFormat.ISO8601)
- workload_report = {
- "task_uuid": workload["task_uuid"],
- "subtask_uuid": workload["subtask_uuid"],
- "deployment_uuid": task["env_uuid"],
- "deployment_name": task["env_name"],
- "scenario_name": workload["name"],
- "scenario_cfg": flatten.transform(workload["args"]),
- "description": workload["description"],
- "runner_name": workload["runner_type"],
- "runner_cfg": flatten.transform(workload["runner"]),
- "contexts": flatten.transform(workload["contexts"]),
- "started_at": started_at,
- "load_duration": workload["load_duration"],
- "full_duration": workload["full_duration"],
- "pass_sla": workload["pass_sla"],
- "success_rate": success_rate,
- "sla_details": [s["detail"]
- for s in workload["sla_results"]["sla"]
- if not s["success"]]}
-
- # do we need to store hooks ?!
- self._add_index(self.WORKLOAD_INDEX, workload_report,
- doc_id=workload["uuid"])
-
- # Iterations
- for idx, itr in enumerate(workload.get("data", []), 1):
- itr["id"] = "%(uuid)s_iter_%(num)s" % {
- "uuid": workload["uuid"],
- "num": str(idx)}
-
- self._process_atomic_actions(
- itr=itr,
- workload=workload_report,
- workload_id=workload["uuid"])
- if self._remote:
- LOG.debug("The info of ElasticSearch cluster to which the results "
- "will be exported: %s" % self._client.info())
- self._client.push_documents(self._report)
-
- msg = ("Successfully exported results to ElasticSearch at url "
- "'%s'" % self.output_destination)
- return {"print": msg}
- else:
- # a new line is required in the end of the file.
- report = "\n".join(self._report) + "\n"
- return {"files": {self.output_destination: report},
- "open": "file://" + os.path.abspath(
- self.output_destination)}
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/elastic/flatten.py b/rally/plugins/common/exporters/elastic/flatten.py
index bbca29c9e6..226ff8af66 100644
--- a/rally/plugins/common/exporters/elastic/flatten.py
+++ b/rally/plugins/common/exporters/elastic/flatten.py
@@ -12,54 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.exporters.elastic.flatten import * # noqa: F401,F403
+from rally.plugins.task.exporters.elastic import flatten as _new
-def _join_keys(first, second):
- if not second:
- return first
- elif second.startswith("["):
- return "%s%s" % (first, second)
- else:
- return "%s.%s" % (first, second)
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-def _process(obj):
- if isinstance(obj, (str, bytes)):
- yield "", obj
- elif isinstance(obj, dict):
- for first, tmp_value in obj.items():
- for second, value in _process(tmp_value):
- yield _join_keys(first, second), value
- elif isinstance(obj, (list, tuple)):
- for i, tmp_value in enumerate(obj):
- for second, value in _process(tmp_value):
- yield _join_keys("[%s]" % i, second), value
- else:
- try:
- yield "", "%s" % obj
- except Exception:
- raise ValueError("Cannot transform obj of '%s' type to flatten "
- "structure." % type(obj))
-
-
-def transform(obj):
- """Transform object to a flatten structure.
-
- Example:
- IN:
- {"foo": ["xxx", "yyy", {"bar": {"zzz": ["Hello", "World!"]}}]}
- OUTPUT:
- [
- "foo[0]=xxx",
- "foo[1]=yyy",
- "foo[2].bar.zzz[0]=Hello",
- "foo[2].bar.zzz[1]=World!"
- ]
-
- """
- result = []
- for key, value in _process(obj):
- if key:
- result.append("%s=%s" % (key, value))
- else:
- result.append(value)
- return sorted(result)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/html.py b/rally/plugins/common/exporters/html.py
index 02c7184558..cbb853f47c 100644
--- a/rally/plugins/common/exporters/html.py
+++ b/rally/plugins/common/exporters/html.py
@@ -12,45 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import itertools
-import os
+from rally.plugins.task.exporters.html import * # noqa: F401,F403
+from rally.plugins.task.exporters import html as _new
-from rally.task import exporter
-from rally.task.processing import plot
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@exporter.configure("html")
-class HTMLExporter(exporter.TaskExporter):
- """Generates task report in HTML format."""
- INCLUDE_LIBS = False
-
- def _generate_results(self):
- results = []
- processed_names = {}
- for task in self.tasks_results:
- for workload in itertools.chain(
- *[s["workloads"] for s in task["subtasks"]]):
- if workload["name"] in processed_names:
- processed_names[workload["name"]] += 1
- workload["position"] = processed_names[workload["name"]]
- else:
- processed_names[workload["name"]] = 0
- results.append(task)
- return results
-
- def generate(self):
- report = plot.plot(self._generate_results(),
- include_libs=self.INCLUDE_LIBS)
-
- if self.output_destination:
- return {"files": {self.output_destination: report},
- "open": "file://" + os.path.abspath(
- self.output_destination)}
- else:
- return {"print": report}
-
-
-@exporter.configure("html-static")
-class HTMLStaticExporter(HTMLExporter):
- """Generates task report in HTML format with embedded JS/CSS."""
- INCLUDE_LIBS = True
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/json_exporter.py b/rally/plugins/common/exporters/json_exporter.py
index ad1bfb5bf4..6fb4ed7854 100644
--- a/rally/plugins/common/exporters/json_exporter.py
+++ b/rally/plugins/common/exporters/json_exporter.py
@@ -12,112 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
-import datetime as dt
-import json
+from rally.plugins.task.exporters.json_exporter import * # noqa: F401,F403
+from rally.plugins.task.exporters import json_exporter as _new
-from rally.common import version as rally_version
-from rally.task import exporter
-
-TIMEFORMAT = "%Y-%m-%dT%H:%M:%S"
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@exporter.configure("json")
-class JSONExporter(exporter.TaskExporter):
- """Generates task report in JSON format."""
-
- # Revisions:
- # 1.0 - the json report v1
- # 1.1 - add `contexts_results` key with contexts execution results of
- # workloads.
- # 1.2 - add `env_uuid` and `env_uuid` which represent environment name
- # and UUID where task was executed
- REVISION = "1.2"
-
- def _generate_tasks(self):
- tasks = []
- for task in self.tasks_results:
- subtasks = []
- for subtask in task["subtasks"]:
- workloads = []
- for workload in subtask["workloads"]:
- hooks = [{
- "config": {"action": dict([h["config"]["action"]]),
- "trigger": dict([h["config"]["trigger"]]),
- "description": h["config"]["description"]},
- "results": h["results"],
- "summary": h["summary"], } for h in workload["hooks"]]
- workloads.append(
- collections.OrderedDict(
- [("uuid", workload["uuid"]),
- ("description", workload["description"]),
- ("runner", {
- workload["runner_type"]: workload["runner"]}),
- ("hooks", hooks),
- ("scenario", {
- workload["name"]: workload["args"]}),
- ("min_duration", workload["min_duration"]),
- ("max_duration", workload["max_duration"]),
- ("start_time", workload["start_time"]),
- ("load_duration", workload["load_duration"]),
- ("full_duration", workload["full_duration"]),
- ("statistics", workload["statistics"]),
- ("data", workload["data"]),
- ("failed_iteration_count",
- workload["failed_iteration_count"]),
- ("total_iteration_count",
- workload["total_iteration_count"]),
- ("created_at", workload["created_at"]),
- ("updated_at", workload["updated_at"]),
- ("contexts", workload["contexts"]),
- ("contexts_results",
- workload["contexts_results"]),
- ("position", workload["position"]),
- ("pass_sla", workload["pass_sla"]),
- ("sla_results", workload["sla_results"]),
- ("sla", workload["sla"])]
- )
- )
- subtasks.append(
- collections.OrderedDict(
- [("uuid", subtask["uuid"]),
- ("title", subtask["title"]),
- ("description", subtask["description"]),
- ("status", subtask["status"]),
- ("created_at", subtask["created_at"]),
- ("updated_at", subtask["updated_at"]),
- ("sla", subtask["sla"]),
- ("workloads", workloads)]
- )
- )
- tasks.append(
- collections.OrderedDict(
- [("uuid", task["uuid"]),
- ("title", task["title"]),
- ("description", task["description"]),
- ("status", task["status"]),
- ("tags", task["tags"]),
- ("env_uuid", task.get("env_uuid", "n\a")),
- ("env_name", task.get("env_name", "n\a")),
- ("created_at", task["created_at"]),
- ("updated_at", task["updated_at"]),
- ("pass_sla", task["pass_sla"]),
- ("subtasks", subtasks)]
- )
- )
- return tasks
-
- def generate(self):
- results = {"info": {"rally_version": rally_version.version_string(),
- "generated_at": dt.datetime.strftime(
- dt.datetime.utcnow(), TIMEFORMAT),
- "format_version": self.REVISION},
- "tasks": self._generate_tasks()}
-
- results = json.dumps(results, sort_keys=False, indent=4)
-
- if self.output_destination:
- return {"files": {self.output_destination: results},
- "open": "file://" + self.output_destination}
- else:
- return {"print": results}
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/junit.py b/rally/plugins/common/exporters/junit.py
index 99b09f672a..57cfd4117b 100644
--- a/rally/plugins/common/exporters/junit.py
+++ b/rally/plugins/common/exporters/junit.py
@@ -12,85 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime as dt
-import itertools
-import os
+from rally.plugins.task.exporters.junit import * # noqa: F401,F403
+from rally.plugins.task.exporters import junit as _new
-from rally.common.io import junit
-from rally.task import exporter
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@exporter.configure("junit-xml")
-class JUnitXMLExporter(exporter.TaskExporter):
- """Generates task report in JUnit-XML format.
-
- An example of the report (All dates, numbers, names appearing in this
- example are fictitious. Any resemblance to real things is purely
- coincidental):
-
- .. code-block:: xml
-
-
-
-
-
-
- ooops
-
-
-
- """
-
- def generate(self):
- root = junit.JUnitXML()
-
- for t in self.tasks_results:
- created_at = dt.datetime.strptime(t["created_at"],
- "%Y-%m-%dT%H:%M:%S")
- updated_at = dt.datetime.strptime(t["updated_at"],
- "%Y-%m-%dT%H:%M:%S")
- test_suite = root.add_test_suite(
- id=t["uuid"],
- time="%.2f" % (updated_at - created_at).total_seconds(),
- timestamp=t["created_at"]
- )
- for workload in itertools.chain(
- *[s["workloads"] for s in t["subtasks"]]):
- class_name, name = workload["name"].split(".", 1)
- test_case = test_suite.add_test_case(
- id=workload["uuid"],
- time="%.2f" % workload["full_duration"],
- classname=class_name,
- name=name,
- timestamp=workload["created_at"]
- )
- if not workload["pass_sla"]:
- details = "\n".join(
- [s["detail"]
- for s in workload["sla_results"]["sla"]
- if not s["success"]]
- )
- test_case.mark_as_failed(details)
-
- raw_report = root.to_string()
-
- if self.output_destination:
- return {"files": {self.output_destination: raw_report},
- "open": "file://" + os.path.abspath(
- self.output_destination)}
- else:
- return {"print": raw_report}
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/exporters/trends.py b/rally/plugins/common/exporters/trends.py
index be19c54c8c..86a35b3f86 100644
--- a/rally/plugins/common/exporters/trends.py
+++ b/rally/plugins/common/exporters/trends.py
@@ -1,4 +1,3 @@
-# Copyright 2018: ZTE Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,28 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
+from rally.plugins.task.exporters.trends import * # noqa: F401,F403
+from rally.plugins.task.exporters import trends as _new
-from rally.task import exporter
-from rally.task.processing import plot
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@exporter.configure("trends-html")
-class TrendsExporter(exporter.TaskExporter):
- """Generates task trends report in HTML format."""
- INCLUDE_LIBS = False
-
- def generate(self):
- report = plot.trends(self.tasks_results, self.INCLUDE_LIBS)
- if self.output_destination:
- return {"files": {self.output_destination: report},
- "open": "file://" + os.path.abspath(
- self.output_destination)}
- else:
- return {"print": report}
-
-
-@exporter.configure("trends-html-static")
-class TrendsStaticExport(TrendsExporter):
- """Generates task trends report in HTML format with embedded JS/CSS."""
- INCLUDE_LIBS = True
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/hook/sys_call.py b/rally/plugins/common/hook/sys_call.py
index a7f5f5f89d..d19cbba2e7 100644
--- a/rally/plugins/common/hook/sys_call.py
+++ b/rally/plugins/common/hook/sys_call.py
@@ -1,4 +1,3 @@
-# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,56 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-import shlex
-import subprocess
+from rally.plugins.task.hooks.sys_call import * # noqa: F401,F403
+from rally.plugins.task.hooks import sys_call as _new
+# import it as last item to be sure that we use the right module
from rally.common import logging
-from rally import consts
-from rally import exceptions
-from rally.task import hook
-LOG = logging.getLogger(__name__)
-
-
-@hook.configure(name="sys_call")
-class SysCallHook(hook.HookAction):
- """Performs system call."""
-
- CONFIG_SCHEMA = {
- "$schema": consts.JSON_SCHEMA,
- "type": "string",
- "description": "Command to execute."
- }
-
- def run(self):
- LOG.debug("sys_call hook: Running command %s" % self.config)
- proc = subprocess.Popen(shlex.split(self.config),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- universal_newlines=True)
- out, err = proc.communicate()
- LOG.debug("sys_call hook: Command %s returned %s"
- % (self.config, proc.returncode))
- if proc.returncode:
- self.set_error(
- exception_name="n/a", # no exception class
- description="Subprocess returned %s" % proc.returncode,
- details=(err or "stdout: %s" % out))
-
- # NOTE(amaretskiy): Try to load JSON for charts,
- # otherwise save output as-is
- try:
- output = json.loads(out)
- for arg in ("additive", "complete"):
- for out_ in output.get(arg, []):
- self.add_output(**{arg: out_})
- except (TypeError, ValueError, exceptions.RallyException):
- self.add_output(
- complete={"title": "System call",
- "chart_plugin": "TextArea",
- "description": "Args: %s" % self.config,
- "data": ["RetCode: %i" % proc.returncode,
- "StdOut: %s" % (out or "(empty)"),
- "StdErr: %s" % (err or "(empty)")]})
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/hook/triggers/event.py b/rally/plugins/common/hook/triggers/event.py
index aefe0eb026..687e33653d 100644
--- a/rally/plugins/common/hook/triggers/event.py
+++ b/rally/plugins/common/hook/triggers/event.py
@@ -1,4 +1,3 @@
-# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,62 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from rally import consts
-from rally.task import hook
+from rally.plugins.task.hook_triggers.event import * # noqa: F401,F403
+from rally.plugins.task.hook_triggers import event as _new
+
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@hook.configure(name="event")
-class EventTrigger(hook.HookTrigger):
- """Triggers hook on specified event and list of values."""
-
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "oneOf": [
- {
- "description": "Triage hook based on specified seconds after "
- "start of workload.",
- "properties": {
- "unit": {"enum": ["time"]},
- "at": {
- "type": "array",
- "minItems": 1,
- "uniqueItems": True,
- "items": {
- "type": "integer",
- "minimum": 0
- }
- },
- },
- "required": ["unit", "at"],
- "additionalProperties": False,
- },
- {
- "description": "Triage hook based on specific iterations.",
- "properties": {
- "unit": {"enum": ["iteration"]},
- "at": {
- "type": "array",
- "minItems": 1,
- "uniqueItems": True,
- "items": {
- "type": "integer",
- "minimum": 1,
- }
- },
- },
- "required": ["unit", "at"],
- "additionalProperties": False,
- },
- ]
- }
-
- def get_listening_event(self):
- return self.config["unit"]
-
- def on_event(self, event_type, value=None):
- if not (event_type == self.get_listening_event()
- and value in self.config["at"]):
- # do nothing
- return
- super(EventTrigger, self).on_event(event_type, value)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/hook/triggers/periodic.py b/rally/plugins/common/hook/triggers/periodic.py
index b7bd720dc4..7d272cebb8 100644
--- a/rally/plugins/common/hook/triggers/periodic.py
+++ b/rally/plugins/common/hook/triggers/periodic.py
@@ -1,4 +1,3 @@
-# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,57 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from rally import consts
-from rally.task import hook
+from rally.plugins.task.hook_triggers.periodic import * # noqa: F401,F403
+from rally.plugins.task.hook_triggers import periodic as _new
+
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@hook.configure(name="periodic")
-class PeriodicTrigger(hook.HookTrigger):
- """Periodically triggers hook with specified range and step."""
-
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "oneOf": [
- {
- "description": "Periodically triage hook based on elapsed time"
- " after start of workload.",
- "properties": {
- "unit": {"enum": ["time"]},
- "start": {"type": "integer", "minimum": 0},
- "end": {"type": "integer", "minimum": 1},
- "step": {"type": "integer", "minimum": 1},
- },
- "required": ["unit", "step"],
- "additionalProperties": False,
- },
- {
- "description": "Periodically triage hook based on iterations.",
- "properties": {
- "unit": {"enum": ["iteration"]},
- "start": {"type": "integer", "minimum": 1},
- "end": {"type": "integer", "minimum": 1},
- "step": {"type": "integer", "minimum": 1},
- },
- "required": ["unit", "step"],
- "additionalProperties": False,
- },
- ]
- }
-
- def __init__(self, context, task, hook_cls):
- super(PeriodicTrigger, self).__init__(context, task, hook_cls)
- self.config.setdefault(
- "start", 0 if self.config["unit"] == "time" else 1)
- self.config.setdefault("end", float("Inf"))
-
- def get_listening_event(self):
- return self.config["unit"]
-
- def on_event(self, event_type, value=None):
- if not (event_type == self.get_listening_event()
- and self.config["start"] <= value <= self.config["end"]
- and (value - self.config["start"]) % self.config["step"] == 0):
- # do nothing
- return
- super(PeriodicTrigger, self).on_event(event_type, value)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/runners/constant.py b/rally/plugins/common/runners/constant.py
index 5feb1fee18..397dc6299c 100644
--- a/rally/plugins/common/runners/constant.py
+++ b/rally/plugins/common/runners/constant.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,330 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
-import multiprocessing
-import queue as Queue
-import threading
-import time
+from rally.plugins.task.runners.constant import * # noqa: F401,F403
+from rally.plugins.task.runners import constant as _new
-from rally.common import utils
-from rally.common import validation
-from rally import consts
-from rally.task import runner
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-def _worker_process(queue, iteration_gen, timeout, concurrency, times,
- duration, context, cls, method_name, args, event_queue,
- aborted, info):
- """Start the scenario within threads.
-
- Spawn threads to support scenario execution.
- Scenario is ran for a fixed number of times if times is specified
- Scenario is ran for fixed duration if duration is specified.
- This generates a constant load on the cloud under test by executing each
- scenario iteration without pausing between iterations. Each thread runs
- the scenario method once with passed scenario arguments and context.
- After execution the result is appended to the queue.
-
- :param queue: queue object to append results
- :param iteration_gen: next iteration number generator
- :param timeout: operation's timeout
- :param concurrency: number of concurrently running scenario iterations
- :param times: total number of scenario iterations to be run
- :param duration: total duration in seconds of the run
- :param context: scenario context object
- :param cls: scenario class
- :param method_name: scenario method name
- :param args: scenario args
- :param event_queue: queue object to append events
- :param aborted: multiprocessing.Event that aborts load generation if
- the flag is set
- :param info: info about all processes count and counter of launched process
- """
- def _to_be_continued(iteration, current_duration, aborted, times=None,
- duration=None):
- if times is not None:
- return iteration < times and not aborted.is_set()
- elif duration is not None:
- return current_duration < duration and not aborted.is_set()
- else:
- return False
-
- if times is None and duration is None:
- raise ValueError("times or duration must be specified")
-
- pool = collections.deque()
- alive_threads_in_pool = 0
- finished_threads_in_pool = 0
-
- runner._log_worker_info(times=times, duration=duration,
- concurrency=concurrency, timeout=timeout, cls=cls,
- method_name=method_name, args=args)
-
- if timeout:
- timeout_queue = Queue.Queue()
- collector_thr_by_timeout = threading.Thread(
- target=utils.timeout_thread,
- args=(timeout_queue, )
- )
- collector_thr_by_timeout.start()
-
- iteration = next(iteration_gen)
- start_time = time.time()
- # NOTE(msimonin): keep the previous behaviour
- # > when duration is 0, scenario executes exactly 1 time
- current_duration = -1
- while _to_be_continued(iteration, current_duration, aborted,
- times=times, duration=duration):
-
- scenario_context = runner._get_scenario_context(iteration, context)
- worker_args = (
- queue, cls, method_name, scenario_context, args, event_queue)
-
- thread = threading.Thread(target=runner._worker_thread,
- args=worker_args)
-
- thread.start()
- if timeout:
- timeout_queue.put((thread, time.time() + timeout))
- pool.append(thread)
- alive_threads_in_pool += 1
-
- while alive_threads_in_pool == concurrency:
- prev_finished_threads_in_pool = finished_threads_in_pool
- finished_threads_in_pool = 0
- for t in pool:
- if not t.is_alive():
- finished_threads_in_pool += 1
-
- alive_threads_in_pool -= finished_threads_in_pool
- alive_threads_in_pool += prev_finished_threads_in_pool
-
- if alive_threads_in_pool < concurrency:
- # NOTE(boris-42): cleanup pool array. This is required because
- # in other case array length will be equal to times which
- # is unlimited big
- while pool and not pool[0].is_alive():
- pool.popleft().join()
- finished_threads_in_pool -= 1
- break
-
- # we should wait to not create big noise with these checks
- time.sleep(0.001)
- iteration = next(iteration_gen)
- current_duration = time.time() - start_time
-
- # Wait until all threads are done
- while pool:
- pool.popleft().join()
-
- if timeout:
- timeout_queue.put((None, None,))
- collector_thr_by_timeout.join()
-
-
-@validation.configure("check_constant")
-class CheckConstantValidator(validation.Validator):
- """Additional schema validation for constant runner"""
-
- def validate(self, context, config, plugin_cls, plugin_cfg):
- if plugin_cfg.get("concurrency", 1) > plugin_cfg.get("times", 1):
- return self.fail(
- "Parameter 'concurrency' means a number of parallel "
- "executions of iterations. Parameter 'times' means total "
- "number of iteration executions. It is redundant "
- "(and restricted) to have number of parallel iterations "
- "bigger then total number of iterations.")
-
-
-@validation.add("check_constant")
-@runner.configure(name="constant")
-class ConstantScenarioRunner(runner.ScenarioRunner):
- """Creates constant load executing a scenario a specified number of times.
-
- This runner will place a constant load on the cloud under test by
- executing each scenario iteration without pausing between iterations
- up to the number of times specified in the scenario config.
-
- The concurrency parameter of the scenario config controls the
- number of concurrent iterations which execute during a single
- scenario in order to simulate the activities of multiple users
- placing load on the cloud under test.
- """
-
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "properties": {
- "concurrency": {
- "type": "integer",
- "minimum": 1,
- "description": "The number of parallel iteration executions."
- },
- "times": {
- "type": "integer",
- "minimum": 1,
- "description": "Total number of iteration executions."
- },
- "timeout": {
- "type": "number",
- "description": "Operation's timeout."
- },
- "max_cpu_count": {
- "type": "integer",
- "minimum": 1,
- "description": "The maximum number of processes to create load"
- " from."
- }
- },
- "additionalProperties": False
- }
-
- def _run_scenario(self, cls, method_name, context, args):
- """Runs the specified scenario with given arguments.
-
- This method generates a constant load on the cloud under test by
- executing each scenario iteration using a pool of processes without
- pausing between iterations up to the number of times specified
- in the scenario config.
-
- :param cls: The Scenario class where the scenario is implemented
- :param method_name: Name of the method that implements the scenario
- :param context: context that contains users, admin & other
- information, that was created before scenario
- execution starts.
- :param args: Arguments to call the scenario method with
-
- :returns: List of results fore each single scenario iteration,
- where each result is a dictionary
- """
- timeout = self.config.get("timeout", 0) # 0 means no timeout
- times = self.config.get("times", 1)
- concurrency = self.config.get("concurrency", 1)
- iteration_gen = utils.RAMInt()
-
- cpu_count = multiprocessing.cpu_count()
- max_cpu_used = min(cpu_count,
- self.config.get("max_cpu_count", cpu_count))
-
- processes_to_start = min(max_cpu_used, times, concurrency)
- concurrency_per_worker, concurrency_overhead = divmod(
- concurrency, processes_to_start)
-
- self._log_debug_info(times=times, concurrency=concurrency,
- timeout=timeout, max_cpu_used=max_cpu_used,
- processes_to_start=processes_to_start,
- concurrency_per_worker=concurrency_per_worker,
- concurrency_overhead=concurrency_overhead)
-
- result_queue = multiprocessing.Queue()
- event_queue = multiprocessing.Queue()
-
- def worker_args_gen(concurrency_overhead):
- while True:
- yield (result_queue, iteration_gen, timeout,
- concurrency_per_worker + (concurrency_overhead and 1),
- times, None, context, cls, method_name, args,
- event_queue, self.aborted)
- if concurrency_overhead:
- concurrency_overhead -= 1
-
- process_pool = self._create_process_pool(
- processes_to_start, _worker_process,
- worker_args_gen(concurrency_overhead))
- self._join_processes(process_pool, result_queue, event_queue)
-
-
-@runner.configure(name="constant_for_duration")
-class ConstantForDurationScenarioRunner(runner.ScenarioRunner):
- """Creates constant load executing a scenario for an interval of time.
-
- This runner will place a constant load on the cloud under test by
- executing each scenario iteration without pausing between iterations
- until a specified interval of time has elapsed.
-
- The concurrency parameter of the scenario config controls the
- number of concurrent iterations which execute during a single
- sceanario in order to simulate the activities of multiple users
- placing load on the cloud under test.
- """
-
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "properties": {
- "concurrency": {
- "type": "integer",
- "minimum": 1,
- "description": "The number of parallel iteration executions."
- },
- "duration": {
- "type": "number",
- "minimum": 0.0,
- "description": "The number of seconds during which to generate"
- " a load. If the duration is 0, the scenario"
- " will run once per parallel execution."
- },
- "timeout": {
- "type": "number",
- "minimum": 1,
- "description": "Operation's timeout."
- }
- },
- "required": ["duration"],
- "additionalProperties": False
- }
-
- def _run_scenario(self, cls, method_name, context, args):
- """Runs the specified scenario with given arguments.
-
- This method generates a constant load on the cloud under test by
- executing each scenario iteration using a pool of processes without
- pausing between iterations up to the number of times specified
- in the scenario config.
-
- :param cls: The Scenario class where the scenario is implemented
- :param method_name: Name of the method that implements the scenario
- :param context: context that contains users, admin & other
- information, that was created before scenario
- execution starts.
- :param args: Arguments to call the scenario method with
-
- :returns: List of results fore each single scenario iteration,
- where each result is a dictionary
- """
- timeout = self.config.get("timeout", 600)
- duration = self.config.get("duration", 0)
- concurrency = self.config.get("concurrency", 1)
- iteration_gen = utils.RAMInt()
-
- cpu_count = multiprocessing.cpu_count()
- max_cpu_used = min(cpu_count,
- self.config.get("max_cpu_count", cpu_count))
-
- processes_to_start = min(max_cpu_used, concurrency)
- concurrency_per_worker, concurrency_overhead = divmod(
- concurrency, processes_to_start)
-
- self._log_debug_info(duration=duration, concurrency=concurrency,
- timeout=timeout, max_cpu_used=max_cpu_used,
- processes_to_start=processes_to_start,
- concurrency_per_worker=concurrency_per_worker,
- concurrency_overhead=concurrency_overhead)
-
- result_queue = multiprocessing.Queue()
- event_queue = multiprocessing.Queue()
-
- def worker_args_gen(concurrency_overhead):
- while True:
- yield (result_queue, iteration_gen, timeout,
- concurrency_per_worker + (concurrency_overhead and 1),
- None, duration, context, cls, method_name, args,
- event_queue, self.aborted)
- if concurrency_overhead:
- concurrency_overhead -= 1
-
- process_pool = self._create_process_pool(
- processes_to_start, _worker_process,
- worker_args_gen(concurrency_overhead))
- self._join_processes(process_pool, result_queue, event_queue)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/runners/rps.py b/rally/plugins/common/runners/rps.py
index 99c869c79e..2b3b3ae0be 100644
--- a/rally/plugins/common/runners/rps.py
+++ b/rally/plugins/common/runners/rps.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,285 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
-import multiprocessing
-import queue as Queue
-import threading
-import time
-
+from rally.plugins.task.runners.rps import * # noqa: F401,F403
+from rally.plugins.task.runners import rps as _new
+# import it as last item to be sure that we use the right module
from rally.common import logging
-from rally.common import utils
-from rally.common import validation
-from rally import consts
-from rally.task import runner
-
-LOG = logging.getLogger(__name__)
-def _worker_process(queue, iteration_gen, timeout, times, max_concurrent,
- context, cls, method_name, args, event_queue, aborted,
- runs_per_second, rps_cfg, processes_to_start, info):
- """Start scenario within threads.
-
- Spawn N threads per second. Each thread runs the scenario once, and appends
- result to queue. A maximum of max_concurrent threads will be ran
- concurrently.
-
- :param queue: queue object to append results
- :param iteration_gen: next iteration number generator
- :param timeout: operation's timeout
- :param times: total number of scenario iterations to be run
- :param max_concurrent: maximum worker concurrency
- :param context: scenario context object
- :param cls: scenario class
- :param method_name: scenario method name
- :param args: scenario args
- :param aborted: multiprocessing.Event that aborts load generation if
- the flag is set
- :param runs_per_second: function that should return desired rps value
- :param rps_cfg: rps section from task config
- :param processes_to_start: int, number of started processes for scenario
- execution
- :param info: info about all processes count and counter of runned process
- """
-
- pool = collections.deque()
- if isinstance(rps_cfg, dict):
- rps = rps_cfg["start"]
- else:
- rps = rps_cfg
- sleep = 1.0 / rps
-
- runner._log_worker_info(times=times, rps=rps, timeout=timeout,
- cls=cls, method_name=method_name, args=args)
-
- time.sleep(
- (sleep * info["processes_counter"]) / info["processes_to_start"])
-
- start = time.time()
- timeout_queue = Queue.Queue()
-
- if timeout:
- collector_thr_by_timeout = threading.Thread(
- target=utils.timeout_thread,
- args=(timeout_queue, )
- )
- collector_thr_by_timeout.start()
-
- i = 0
- while i < times and not aborted.is_set():
- scenario_context = runner._get_scenario_context(next(iteration_gen),
- context)
- worker_args = (
- queue, cls, method_name, scenario_context, args, event_queue)
- thread = threading.Thread(target=runner._worker_thread,
- args=worker_args)
-
- i += 1
- thread.start()
- if timeout:
- timeout_queue.put((thread, time.time() + timeout))
- pool.append(thread)
-
- time_gap = time.time() - start
- real_rps = i / time_gap if time_gap else "Infinity"
-
- LOG.debug(
- "Worker: %s rps: %s (requested rps: %s)" %
- (i, real_rps, runs_per_second(rps_cfg, start, processes_to_start)))
-
- # try to join latest thread(s) until it finished, or until time to
- # start new thread (if we have concurrent slots available)
- while i / (time.time() - start) > runs_per_second(
- rps_cfg, start, processes_to_start) or (
- len(pool) >= max_concurrent):
- if pool:
- pool[0].join(0.001)
- if not pool[0].is_alive():
- pool.popleft()
- else:
- time.sleep(0.001)
-
- while pool:
- pool.popleft().join()
-
- if timeout:
- timeout_queue.put((None, None,))
- collector_thr_by_timeout.join()
-
-
-@validation.configure("check_rps")
-class CheckPRSValidator(validation.Validator):
- """Additional schema validation for rps runner"""
-
- def validate(self, context, config, plugin_cls, plugin_cfg):
- if isinstance(plugin_cfg["rps"], dict):
- if plugin_cfg["rps"]["end"] < plugin_cfg["rps"]["start"]:
- msg = "rps end value must not be less than rps start value."
- return self.fail(msg)
-
-
-@validation.add("check_rps")
-@runner.configure(name="rps")
-class RPSScenarioRunner(runner.ScenarioRunner):
- """Scenario runner that does the job with specified frequency.
-
- Every single scenario iteration is executed with specified frequency
- (runs per second) in a pool of processes. The scenario will be
- launched for a fixed number of times in total (specified in the config).
-
- An example of a rps scenario is booting 1 VM per second. This
- execution type is thus very helpful in understanding the maximal load that
- a certain cloud can handle.
- """
-
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA7,
- "properties": {
- "times": {
- "type": "integer",
- "minimum": 1
- },
- "rps": {
- "anyOf": [
- {
- "description": "Generate constant requests per second "
- "during the whole workload.",
- "type": "number",
- "exclusiveMinimum": 0,
- "minimum": 0
- },
- {
- "type": "object",
- "description": "Increase requests per second for "
- "specified value each time after a "
- "certain number of seconds.",
- "properties": {
- "start": {
- "type": "number",
- "minimum": 1
- },
- "end": {
- "type": "number",
- "minimum": 1
- },
- "step": {
- "type": "number",
- "minimum": 1
- },
- "duration": {
- "type": "number",
- "minimum": 1
- }
- },
- "additionalProperties": False,
- "required": ["start", "end", "step"]
- }
- ],
- },
- "timeout": {
- "type": "number",
- },
- "max_concurrency": {
- "type": "integer",
- "minimum": 1
- },
- "max_cpu_count": {
- "type": "integer",
- "minimum": 1
- }
- },
- "required": ["times", "rps"],
- "additionalProperties": False
- }
-
- def _run_scenario(self, cls, method_name, context, args):
- """Runs the specified scenario with given arguments.
-
- Every single scenario iteration is executed with specified
- frequency (runs per second) in a pool of processes. The scenario is
- launched for a fixed number of times in total (specified in the
- config).
-
- :param cls: The Scenario class where the scenario is implemented
- :param method_name: Name of the method that implements the scenario
- :param context: Context that contains users, admin & other
- information, that was created before scenario
- execution starts.
- :param args: Arguments to call the scenario method with
-
- :returns: List of results fore each single scenario iteration,
- where each result is a dictionary
- """
- times = self.config["times"]
- timeout = self.config.get("timeout", 0) # 0 means no timeout
- iteration_gen = utils.RAMInt()
-
- cpu_count = multiprocessing.cpu_count()
- max_cpu_used = min(cpu_count,
- self.config.get("max_cpu_count", cpu_count))
-
- def runs_per_second(rps_cfg, start_timer, number_of_processes):
- """At the given second return desired rps."""
-
- if not isinstance(rps_cfg, dict):
- return float(rps_cfg) / number_of_processes
- stage_order = (time.time() - start_timer) / rps_cfg.get(
- "duration", 1) - 1
- rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order)
- / number_of_processes)
-
- return min(rps, float(rps_cfg["end"]))
-
- processes_to_start = min(max_cpu_used, times,
- self.config.get("max_concurrency", times))
- times_per_worker, times_overhead = divmod(times, processes_to_start)
-
- # Determine concurrency per worker
- concurrency_per_worker, concurrency_overhead = divmod(
- self.config.get("max_concurrency", times), processes_to_start)
-
- self._log_debug_info(times=times, timeout=timeout,
- max_cpu_used=max_cpu_used,
- processes_to_start=processes_to_start,
- times_per_worker=times_per_worker,
- times_overhead=times_overhead,
- concurrency_per_worker=concurrency_per_worker,
- concurrency_overhead=concurrency_overhead)
-
- result_queue = multiprocessing.Queue()
- event_queue = multiprocessing.Queue()
-
- def worker_args_gen(times_overhead, concurrency_overhead):
- """Generate arguments for process worker.
-
- Remainder of threads per process division is distributed to
- process workers equally - one thread per each process worker
- until the remainder equals zero. The same logic is applied
- to concurrency overhead.
- :param times_overhead: remaining number of threads to be
- distributed to workers
- :param concurrency_overhead: remaining number of maximum
- concurrent threads to be
- distributed to workers
- """
- while True:
- yield (
- result_queue, iteration_gen, timeout,
- times_per_worker + (times_overhead and 1),
- concurrency_per_worker + (concurrency_overhead and 1),
- context, cls, method_name, args, event_queue,
- self.aborted, runs_per_second, self.config["rps"],
- processes_to_start
- )
- if times_overhead:
- times_overhead -= 1
- if concurrency_overhead:
- concurrency_overhead -= 1
-
- process_pool = self._create_process_pool(
- processes_to_start, _worker_process,
- worker_args_gen(times_overhead, concurrency_overhead))
- self._join_processes(process_pool, result_queue, event_queue)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/runners/serial.py b/rally/plugins/common/runners/serial.py
index e981440d47..2f4b9d0aa0 100644
--- a/rally/plugins/common/runners/serial.py
+++ b/rally/plugins/common/runners/serial.py
@@ -1,4 +1,3 @@
-# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,65 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from rally.common import utils as rutils
-from rally import consts
-from rally.task import runner
+from rally.plugins.task.runners.serial import * # noqa: F401,F403
+from rally.plugins.task.runners import serial as _new
+
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@runner.configure(name="serial")
-class SerialScenarioRunner(runner.ScenarioRunner):
- """Scenario runner that executes scenarios serially.
-
- Unlike scenario runners that execute in parallel, the serial scenario
- runner executes scenarios one-by-one in the same python interpreter process
- as Rally. This allows you to execute scenario without introducing
- any concurrent operations as well as interactively debug the scenario
- from the same command that you use to start Rally.
- """
-
- # NOTE(mmorais): additionalProperties is set True to allow switching
- # between parallel and serial runners by modifying only *type* property
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "properties": {
- "times": {
- "type": "integer",
- "minimum": 1
- }
- },
- "additionalProperties": True
- }
-
- def _run_scenario(self, cls, method_name, context, args):
- """Runs the specified scenario with given arguments.
-
- The scenario iterations are executed one-by-one in the same python
- interpreter process as Rally. This allows you to execute
- scenario without introducing any concurrent operations as well as
- interactively debug the scenario from the same command that you use
- to start Rally.
-
- :param cls: The Scenario class where the scenario is implemented
- :param method_name: Name of the method that implements the scenario
- :param context: context that contains users, admin & other
- information, that was created before scenario
- execution starts.
- :param args: Arguments to call the scenario method with
-
- :returns: List of results fore each single scenario iteration,
- where each result is a dictionary
- """
- times = self.config.get("times", 1)
-
- event_queue = rutils.DequeAsQueue(self.event_queue)
-
- for i in range(times):
- if self.aborted.is_set():
- break
- result = runner._run_scenario_once(
- cls, method_name, runner._get_scenario_context(i, context),
- args, event_queue)
- self._send_result(result)
-
- self._flush_results()
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/scenarios/requests/http_requests.py b/rally/plugins/common/scenarios/requests/http_requests.py
index ec372a4463..250da8c17e 100644
--- a/rally/plugins/common/scenarios/requests/http_requests.py
+++ b/rally/plugins/common/scenarios/requests/http_requests.py
@@ -1,3 +1,5 @@
+# All Rights Reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,47 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import random
+from rally.plugins.task.scenarios.requests.http_requests import * # noqa: F401,F403,E501
+from rally.plugins.task.scenarios.requests import http_requests as _new
-from rally.plugins.common.scenarios.requests import utils
-from rally.task import scenario
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-"""Scenarios for HTTP requests."""
-
-
-@scenario.configure(name="HttpRequests.check_request")
-class HttpRequestsCheckRequest(utils.RequestScenario):
-
- def run(self, url, method, status_code, **kwargs):
- """Standard way for testing web services using HTTP requests.
-
- This scenario is used to make request and check it with expected
- Response.
-
- :param url: url for the Request object
- :param method: method for the Request object
- :param status_code: expected response code
- :param kwargs: optional additional request parameters
- """
-
- self._check_request(url, method, status_code, **kwargs)
-
-
-@scenario.configure(name="HttpRequests.check_random_request")
-class HttpRequestsCheckRandomRequest(utils.RequestScenario):
-
- def run(self, requests, status_code):
- """Executes random HTTP requests from provided list.
-
- This scenario takes random url from list of requests, and raises
- exception if the response is not the expected response.
-
- :param requests: List of request dicts
- :param status_code: Expected Response Code it will
- be used only if we doesn't specified it in request proper
- """
-
- request = random.choice(requests)
- request.setdefault("status_code", status_code)
- self._check_request(**request)
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/scenarios/requests/utils.py b/rally/plugins/common/scenarios/requests/utils.py
index 8fd35347a2..2a4cc2b0ff 100644
--- a/rally/plugins/common/scenarios/requests/utils.py
+++ b/rally/plugins/common/scenarios/requests/utils.py
@@ -1,3 +1,5 @@
+# All Rights Reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,29 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import requests
+from rally.plugins.task.scenarios.requests.utils import * # noqa: F401,F403
+from rally.plugins.task.scenarios.requests import utils as _new
-from rally.task import atomic
-from rally.task import scenario
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-class RequestScenario(scenario.Scenario):
- """Base class for Request scenarios with basic atomic actions."""
-
- @atomic.action_timer("requests.check_request")
- def _check_request(self, url, method, status_code, **kwargs):
- """Compare request status code with specified code
-
- :param status_code: Expected status code of request
- :param url: Uniform resource locator
- :param method: Type of request method (GET | POST ..)
- :param kwargs: Optional additional request parameters
- :raises ValueError: if return http status code
- not equal to expected status code
- """
-
- resp = requests.request(method, url, **kwargs)
- if status_code != resp.status_code:
- error_msg = "Expected HTTP request code is `%s` actual `%s`"
- raise ValueError(
- error_msg % (status_code, resp.status_code))
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/failure_rate.py b/rally/plugins/common/sla/failure_rate.py
index 8830ad6810..b9bba059f8 100644
--- a/rally/plugins/common/sla/failure_rate.py
+++ b/rally/plugins/common/sla/failure_rate.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,55 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.failure_rate import * # noqa: F401,F403
+from rally.plugins.task.sla import failure_rate as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-from rally import consts
-from rally.task import sla
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="failure_rate")
-class FailureRate(sla.SLA):
- """Failure rate minimum and maximum in percents."""
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA,
- "properties": {
- "min": {"type": "number", "minimum": 0.0, "maximum": 100.0},
- "max": {"type": "number", "minimum": 0.0, "maximum": 100.0}
- },
- "minProperties": 1,
- "additionalProperties": False,
- }
-
- def __init__(self, criterion_value):
- super(FailureRate, self).__init__(criterion_value)
- self.min_percent = self.criterion_value.get("min", 0)
- self.max_percent = self.criterion_value.get("max", 100)
- self.errors = 0
- self.total = 0
- self.error_rate = 0.0
-
- def add_iteration(self, iteration):
- self.total += 1
- if iteration["error"]:
- self.errors += 1
- self.error_rate = self.errors * 100.0 / self.total
- self.success = self.min_percent <= self.error_rate <= self.max_percent
- return self.success
-
- def merge(self, other):
- self.total += other.total
- self.errors += other.errors
- if self.total:
- self.error_rate = self.errors * 100.0 / self.total
- self.success = self.min_percent <= self.error_rate <= self.max_percent
- return self.success
-
- def details(self):
- return ("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s" %
- (self.min_percent, self.error_rate,
- self.max_percent, self.status()))
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/iteration_time.py b/rally/plugins/common/sla/iteration_time.py
index 6765f40b17..dc66970ed6 100644
--- a/rally/plugins/common/sla/iteration_time.py
+++ b/rally/plugins/common/sla/iteration_time.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,41 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.iteration_time import * # noqa: F401,F403
+from rally.plugins.task.sla import iteration_time as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-from rally import consts
-from rally.task import sla
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="max_seconds_per_iteration")
-class IterationTime(sla.SLA):
- """Maximum time for one iteration in seconds."""
- CONFIG_SCHEMA = {
- "type": "number",
- "$schema": consts.JSON_SCHEMA7,
- "minimum": 0.0,
- "exclusiveMinimum": 0.0}
-
- def __init__(self, criterion_value):
- super(IterationTime, self).__init__(criterion_value)
- self.max_iteration_time = 0.0
-
- def add_iteration(self, iteration):
- if iteration["duration"] > self.max_iteration_time:
- self.max_iteration_time = iteration["duration"]
- self.success = self.max_iteration_time <= self.criterion_value
- return self.success
-
- def merge(self, other):
- if other.max_iteration_time > self.max_iteration_time:
- self.max_iteration_time = other.max_iteration_time
- self.success = self.max_iteration_time <= self.criterion_value
- return self.success
-
- def details(self):
- return ("Maximum seconds per iteration %.2fs <= %.2fs - %s" %
- (self.max_iteration_time, self.criterion_value, self.status()))
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/max_average_duration.py b/rally/plugins/common/sla/max_average_duration.py
index b3828e8b88..40b9e1b52a 100644
--- a/rally/plugins/common/sla/max_average_duration.py
+++ b/rally/plugins/common/sla/max_average_duration.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,44 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.max_average_duration import * # noqa: F401,F403
+from rally.plugins.task.sla import max_average_duration as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-from rally.common import streaming_algorithms
-from rally import consts
-from rally.task import sla
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="max_avg_duration")
-class MaxAverageDuration(sla.SLA):
- """Maximum average duration of one iteration in seconds."""
- CONFIG_SCHEMA = {
- "type": "number",
- "$schema": consts.JSON_SCHEMA7,
- "exclusiveMinimum": 0.0
- }
-
- def __init__(self, criterion_value):
- super(MaxAverageDuration, self).__init__(criterion_value)
- self.avg = 0.0
- self.avg_comp = streaming_algorithms.MeanComputation()
-
- def add_iteration(self, iteration):
- if not iteration.get("error"):
- self.avg_comp.add(iteration["duration"])
- self.avg = self.avg_comp.result()
- self.success = self.avg <= self.criterion_value
- return self.success
-
- def merge(self, other):
- self.avg_comp.merge(other.avg_comp)
- self.avg = self.avg_comp.result() or 0.0
- self.success = self.avg <= self.criterion_value
- return self.success
-
- def details(self):
- return ("Average duration of one iteration %.2fs <= %.2fs - %s" %
- (self.avg, self.criterion_value, self.status()))
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/max_average_duration_per_atomic.py b/rally/plugins/common/sla/max_average_duration_per_atomic.py
index 1a6a668ed7..9a144033a9 100644
--- a/rally/plugins/common/sla/max_average_duration_per_atomic.py
+++ b/rally/plugins/common/sla/max_average_duration_per_atomic.py
@@ -1,4 +1,3 @@
-# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,61 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.max_average_duration_per_atomic import * # noqa: F401,F403,E501
+from rally.plugins.task.sla import max_average_duration_per_atomic as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-import collections
-
-from rally.common import streaming_algorithms
-from rally import consts
-from rally.task import sla
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="max_avg_duration_per_atomic")
-class MaxAverageDurationPerAtomic(sla.SLA):
- """Maximum average duration of one iterations atomic actions in seconds."""
- CONFIG_SCHEMA = {"type": "object", "$schema": consts.JSON_SCHEMA,
- "patternProperties": {".*": {
- "type": "number",
- "description": "The name of atomic action."}},
- "minProperties": 1,
- "additionalProperties": False}
-
- def __init__(self, criterion_value):
- super(MaxAverageDurationPerAtomic, self).__init__(criterion_value)
- self.avg_by_action = collections.defaultdict(float)
- self.avg_comp_by_action = collections.defaultdict(
- streaming_algorithms.MeanComputation)
- self.criterion_items = self.criterion_value.items()
-
- def add_iteration(self, iteration):
- if not iteration.get("error"):
- for action in iteration["atomic_actions"]:
- duration = action["finished_at"] - action["started_at"]
- self.avg_comp_by_action[action["name"]].add(duration)
- result = self.avg_comp_by_action[action["name"]].result()
- self.avg_by_action[action["name"]] = result
- self.success = all(self.avg_by_action[atom] <= val
- for atom, val in self.criterion_items)
- return self.success
-
- def merge(self, other):
- for atom, comp in self.avg_comp_by_action.items():
- if atom in other.avg_comp_by_action:
- comp.merge(other.avg_comp_by_action[atom])
- self.avg_by_action = {a: comp.result() or 0.0
- for a, comp in self.avg_comp_by_action.items()}
- self.success = all(self.avg_by_action[atom] <= val
- for atom, val in self.criterion_items)
- return self.success
-
- def details(self):
- strs = ["Action: '%s'. %.2fs <= %.2fs" %
- (atom, self.avg_by_action[atom], val)
- for atom, val in self.criterion_items]
- head = "Average duration of one iteration for atomic actions:"
- end = "Status: %s" % self.status()
- return "\n".join([head] + strs + [end])
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/outliers.py b/rally/plugins/common/sla/outliers.py
index 5104585c22..39cf117146 100644
--- a/rally/plugins/common/sla/outliers.py
+++ b/rally/plugins/common/sla/outliers.py
@@ -1,4 +1,3 @@
-# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,99 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.outliers import * # noqa: F401,F403
+from rally.plugins.task.sla import outliers as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-from rally.common import streaming_algorithms
-from rally import consts
-from rally.task import sla
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="outliers")
-class Outliers(sla.SLA):
- """Limit the number of outliers (iterations that take too much time).
-
- The outliers are detected automatically using the computation of the mean
- and standard deviation (std) of the data.
- """
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA7,
- "properties": {
- "max": {"type": "integer", "minimum": 0},
- "min_iterations": {"type": "integer", "minimum": 3},
- "sigmas": {"type": "number", "minimum": 0.0,
- "exclusiveMinimum": 0.0}
- },
- "additionalProperties": False,
- }
-
- def __init__(self, criterion_value):
- super(Outliers, self).__init__(criterion_value)
- self.max_outliers = self.criterion_value.get("max", 0)
- # NOTE(msdubov): Having 3 as default is reasonable (need enough data).
- self.min_iterations = self.criterion_value.get("min_iterations", 3)
- self.sigmas = self.criterion_value.get("sigmas", 3.0)
- self.iterations = 0
- self.outliers = 0
- self.threshold = None
- self.mean_comp = streaming_algorithms.MeanComputation()
- self.std_comp = streaming_algorithms.StdDevComputation()
-
- def add_iteration(self, iteration):
- # NOTE(ikhudoshyn): This method can not be implemented properly.
- # After adding a new iteration, both mean and standard deviation
- # may change. Hence threshold will change as well. In this case we
- # should again compare durations of all accounted iterations
- # to the threshold. Unfortunately we can not do it since
- # we do not store durations.
- # Implementation provided here only gives rough approximation
- # of outliers number.
- if not iteration.get("error"):
- duration = iteration["duration"]
- self.iterations += 1
-
- # NOTE(msdubov): First check if the current iteration is an outlier
- if (self.iterations >= self.min_iterations
- and self.threshold and duration > self.threshold):
- self.outliers += 1
-
- # NOTE(msdubov): Then update the threshold value
- self.mean_comp.add(duration)
- self.std_comp.add(duration)
- if self.iterations >= 2:
- mean = self.mean_comp.result()
- std = self.std_comp.result()
- self.threshold = mean + self.sigmas * std
-
- self.success = self.outliers <= self.max_outliers
- return self.success
-
- def merge(self, other):
- # NOTE(ikhudoshyn): This method can not be implemented properly.
- # After merge, both mean and standard deviation may change.
- # Hence threshold will change as well. In this case we
- # should again compare durations of all accounted iterations
- # to the threshold. Unfortunately we can not do it since
- # we do not store durations.
- # Implementation provided here only gives rough approximation
- # of outliers number.
- self.iterations += other.iterations
- self.outliers += other.outliers
- self.mean_comp.merge(other.mean_comp)
- self.std_comp.merge(other.std_comp)
-
- if self.iterations >= 2:
- mean = self.mean_comp.result()
- std = self.std_comp.result()
- self.threshold = mean + self.sigmas * std
-
- self.success = self.outliers <= self.max_outliers
- return self.success
-
- def details(self):
- return ("Maximum number of outliers %i <= %i - %s" %
- (self.outliers, self.max_outliers, self.status()))
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/sla/performance_degradation.py b/rally/plugins/common/sla/performance_degradation.py
index d9d142393d..0815d02831 100644
--- a/rally/plugins/common/sla/performance_degradation.py
+++ b/rally/plugins/common/sla/performance_degradation.py
@@ -1,4 +1,3 @@
-# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,60 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from rally.plugins.task.sla.performance_degradation import * # noqa: F401,F403
+from rally.plugins.task.sla import performance_degradation as _new
-"""
-SLA (Service-level agreement) is set of details for determining compliance
-with contracted values such as maximum error rate or minimum response time.
-"""
-
-from __future__ import division
-
-from rally.common import streaming_algorithms
-from rally import consts
-from rally.task import sla
-from rally.utils import strutils
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@sla.configure(name="performance_degradation")
-class PerformanceDegradation(sla.SLA):
- """Calculates performance degradation based on iteration time
-
- This SLA plugin finds minimum and maximum duration of
- iterations completed without errors during Rally task execution.
- Assuming that minimum duration is 100%, it calculates
- performance degradation against maximum duration.
- """
- CONFIG_SCHEMA = {
- "type": "object",
- "$schema": consts.JSON_SCHEMA7,
- "properties": {
- "max_degradation": {
- "type": "number",
- "minimum": 0.0,
- },
- },
- "required": [
- "max_degradation",
- ],
- "additionalProperties": False,
- }
-
- def __init__(self, criterion_value):
- super(PerformanceDegradation, self).__init__(criterion_value)
- self.max_degradation = self.criterion_value["max_degradation"]
- self.degradation = streaming_algorithms.DegradationComputation()
-
- def add_iteration(self, iteration):
- if not iteration.get("error"):
- self.degradation.add(iteration["duration"])
- self.success = self.degradation.result() <= self.max_degradation
- return self.success
-
- def merge(self, other):
- self.degradation.merge(other.degradation)
- self.success = self.degradation.result() <= self.max_degradation
- return self.success
-
- def details(self):
- res = strutils.format_float_to_str(self.degradation.result() or 0.0)
- return "Current degradation: %s%% - %s" % (res, self.status())
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/types.py b/rally/plugins/common/types.py
index 8384f54731..46228ad169 100644
--- a/rally/plugins/common/types.py
+++ b/rally/plugins/common/types.py
@@ -12,60 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
+from rally.plugins.task.types import * # noqa: F401,F403
+from rally.plugins.task import types as _new
-import requests
-
-from rally.common.plugin import plugin
-from rally import exceptions
-from rally.task import types
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-@plugin.configure(name="path_or_url")
-class PathOrUrl(types.ResourceType):
- """Check whether file exists or url available."""
-
- def pre_process(self, resource_spec, config):
- path = os.path.expanduser(resource_spec)
- if os.path.isfile(path):
- return path
- try:
- head = requests.head(path, verify=False, allow_redirects=True)
- if head.status_code == 200:
- return path
- raise exceptions.InvalidScenarioArgument(
- "Url %s unavailable (code %s)" % (path, head.status_code))
- except Exception as ex:
- raise exceptions.InvalidScenarioArgument(
- "Url error %s (%s)" % (path, ex))
-
-
-@plugin.configure(name="file")
-class FileType(types.ResourceType):
- """Return content of the file by its path."""
-
- def pre_process(self, resource_spec, config):
- with open(os.path.expanduser(resource_spec), "r") as f:
- return f.read()
-
-
-@plugin.configure(name="expand_user_path")
-class ExpandUserPath(types.ResourceType):
- """Expands user path."""
-
- def pre_process(self, resource_spec, config):
- return os.path.expanduser(resource_spec)
-
-
-@plugin.configure(name="file_dict")
-class FileTypeDict(types.ResourceType):
- """Return the dictionary of items with file path and file content."""
-
- def pre_process(self, resource_spec, config):
- file_type_dict = {}
- for file_path in resource_spec:
- file_path = os.path.expanduser(file_path)
- with open(file_path, "r") as f:
- file_type_dict[file_path] = f.read()
-
- return file_type_dict
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/verification/reporters.py b/rally/plugins/common/verification/reporters.py
index b52f8b5812..5852a59d11 100644
--- a/rally/plugins/common/verification/reporters.py
+++ b/rally/plugins/common/verification/reporters.py
@@ -12,452 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
-import json
-import re
+from rally.plugins.verification.reporters import * # noqa: F401,F403
+from rally.plugins.verification import reporters as _new
-from rally.common.io import junit
-from rally import consts
-from rally.ui import utils as ui_utils
-from rally.verification import reporter
+# import it as last item to be sure that we use the right module
+from rally.common import logging
-SKIP_RE = re.compile(r"Skipped until Bug: ?(?P\d+) is resolved.")
-LP_BUG_LINK = "https://launchpad.net/bugs/%s"
-TIME_FORMAT = consts.TimeFormat.ISO8601
-
-
-@reporter.configure("json")
-class JSONReporter(reporter.VerificationReporter):
- """Generates verification report in JSON format.
-
- An example of the report (All dates, numbers, names appearing in this
- example are fictitious. Any resemblance to real things is purely
- coincidental):
-
- .. code-block:: json
-
- {"verifications": {
- "verification-uuid-1": {
- "status": "finished",
- "skipped": 1,
- "started_at": "2001-01-01T00:00:00",
- "finished_at": "2001-01-01T00:05:00",
- "tests_duration": 5,
- "run_args": {
- "pattern": "set=smoke",
- "xfail_list": {"some.test.TestCase.test_xfail":
- "Some reason why it is expected."},
- "skip_list": {"some.test.TestCase.test_skipped":
- "This test was skipped intentionally"},
- },
- "success": 1,
- "expected_failures": 1,
- "tests_count": 3,
- "failures": 0,
- "unexpected_success": 0
- },
- "verification-uuid-2": {
- "status": "finished",
- "skipped": 1,
- "started_at": "2002-01-01T00:00:00",
- "finished_at": "2002-01-01T00:05:00",
- "tests_duration": 5,
- "run_args": {
- "pattern": "set=smoke",
- "xfail_list": {"some.test.TestCase.test_xfail":
- "Some reason why it is expected."},
- "skip_list": {"some.test.TestCase.test_skipped":
- "This test was skipped intentionally"},
- },
- "success": 1,
- "expected_failures": 1,
- "tests_count": 3,
- "failures": 1,
- "unexpected_success": 0
- }
- },
- "tests": {
- "some.test.TestCase.test_foo[tag1,tag2]": {
- "name": "some.test.TestCase.test_foo",
- "tags": ["tag1","tag2"],
- "by_verification": {
- "verification-uuid-1": {
- "status": "success",
- "duration": "1.111"
- },
- "verification-uuid-2": {
- "status": "success",
- "duration": "22.222"
- }
- }
- },
- "some.test.TestCase.test_skipped[tag1]": {
- "name": "some.test.TestCase.test_skipped",
- "tags": ["tag1"],
- "by_verification": {
- "verification-uuid-1": {
- "status": "skipped",
- "duration": "0",
- "details": "Skipped until Bug: 666 is resolved."
- },
- "verification-uuid-2": {
- "status": "skipped",
- "duration": "0",
- "details": "Skipped until Bug: 666 is resolved."
- }
- }
- },
- "some.test.TestCase.test_xfail": {
- "name": "some.test.TestCase.test_xfail",
- "tags": [],
- "by_verification": {
- "verification-uuid-1": {
- "status": "xfail",
- "duration": "3",
- "details": "Some reason why it is expected.\\n\\n"
- "Traceback (most recent call last): \\n"
- " File "fake.py", line 13, in \\n"
- " yyy()\\n"
- " File "fake.py", line 11, in yyy\\n"
- " xxx()\\n"
- " File "fake.py", line 8, in xxx\\n"
- " bar()\\n"
- " File "fake.py", line 5, in bar\\n"
- " foo()\\n"
- " File "fake.py", line 2, in foo\\n"
- " raise Exception()\\n"
- "Exception"
- },
- "verification-uuid-2": {
- "status": "xfail",
- "duration": "3",
- "details": "Some reason why it is expected.\\n\\n"
- "Traceback (most recent call last): \\n"
- " File "fake.py", line 13, in \\n"
- " yyy()\\n"
- " File "fake.py", line 11, in yyy\\n"
- " xxx()\\n"
- " File "fake.py", line 8, in xxx\\n"
- " bar()\\n"
- " File "fake.py", line 5, in bar\\n"
- " foo()\\n"
- " File "fake.py", line 2, in foo\\n"
- " raise Exception()\\n"
- "Exception"
- }
- }
- },
- "some.test.TestCase.test_failed": {
- "name": "some.test.TestCase.test_failed",
- "tags": [],
- "by_verification": {
- "verification-uuid-2": {
- "status": "fail",
- "duration": "4",
- "details": "Some reason why it is expected.\\n\\n"
- "Traceback (most recent call last): \\n"
- " File "fake.py", line 13, in \\n"
- " yyy()\\n"
- " File "fake.py", line 11, in yyy\\n"
- " xxx()\\n"
- " File "fake.py", line 8, in xxx\\n"
- " bar()\\n"
- " File "fake.py", line 5, in bar\\n"
- " foo()\\n"
- " File "fake.py", line 2, in foo\\n"
- " raise Exception()\\n"
- "Exception"
- }
- }
- }
- }
- }
-
- """
-
- @classmethod
- def validate(cls, output_destination):
- """Validate destination of report.
-
- :param output_destination: Destination of report
- """
- # nothing to check :)
- pass
-
- def _generate(self):
- """Prepare raw report."""
-
- verifications = collections.OrderedDict()
- tests = {}
-
- for v in self.verifications:
- verifications[v.uuid] = {
- "started_at": v.created_at.strftime(TIME_FORMAT),
- "finished_at": v.updated_at.strftime(TIME_FORMAT),
- "status": v.status,
- "run_args": v.run_args,
- "tests_count": v.tests_count,
- "tests_duration": v.tests_duration,
- "skipped": v.skipped,
- "success": v.success,
- "expected_failures": v.expected_failures,
- "unexpected_success": v.unexpected_success,
- "failures": v.failures,
- }
-
- for test_id, result in v.tests.items():
- if test_id not in tests:
- # NOTE(ylobankov): It is more convenient to see test ID
- # at the first place in the report.
- tags = sorted(result.get("tags", []), reverse=True,
- key=lambda tag: tag.startswith("id-"))
- tests[test_id] = {"tags": tags,
- "name": result["name"],
- "by_verification": {}}
-
- tests[test_id]["by_verification"][v.uuid] = {
- "status": result["status"],
- "duration": result["duration"]
- }
-
- reason = result.get("reason", "")
- if reason:
- match = SKIP_RE.match(reason)
- if match:
- link = LP_BUG_LINK % match.group("bug_number")
- reason = re.sub(match.group("bug_number"), link,
- reason)
- traceback = result.get("traceback", "")
- sep = "\n\n" if reason and traceback else ""
- d = (reason + sep + traceback.strip()) or None
- if d:
- tests[test_id]["by_verification"][v.uuid]["details"] = d
-
- return {"verifications": verifications, "tests": tests}
-
- def generate(self):
- raw_report = json.dumps(self._generate(), indent=4)
-
- if self.output_destination:
- return {"files": {self.output_destination: raw_report},
- "open": self.output_destination}
- else:
- return {"print": raw_report}
-
-
-@reporter.configure("html")
-class HTMLReporter(JSONReporter):
- """Generates verification report in HTML format."""
- INCLUDE_LIBS = False
-
- # "T" separator of ISO 8601 is not user-friendly enough.
- TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
-
- def generate(self):
- report = self._generate()
- uuids = report["verifications"].keys()
- show_comparison_note = False
-
- for test in report["tests"].values():
- # make as much as possible processing here to reduce processing
- # at JS side
- test["has_details"] = False
- for test_info in test["by_verification"].values():
- if "details" not in test_info:
- test_info["details"] = None
- elif not test["has_details"]:
- test["has_details"] = True
-
- durations = []
- # iter by uuids to store right order for comparison
- for uuid in uuids:
- if uuid in test["by_verification"]:
- durations.append(test["by_verification"][uuid]["duration"])
- if float(durations[-1]) < 0.001:
- durations[-1] = "0"
- # not to display such little duration in the report
- test["by_verification"][uuid]["duration"] = ""
-
- if len(durations) > 1 and not (
- durations[0] == "0" and durations[-1] == "0"):
- # compare result with result of the first verification
- diff = float(durations[-1]) - float(durations[0])
- result = "%s (" % durations[-1]
- if diff >= 0:
- result += "+"
- result += "%s)" % diff
- test["by_verification"][uuid]["duration"] = result
-
- if not show_comparison_note and len(durations) > 2:
- # NOTE(andreykurilin): only in case of comparison of more
- # than 2 results of the same test we should display a note
- # about the comparison strategy
- show_comparison_note = True
-
- template = ui_utils.get_template("verification/report.html")
- context = {"uuids": list(uuids),
- "verifications": report["verifications"],
- "tests": report["tests"],
- "show_comparison_note": show_comparison_note}
-
- raw_report = template.render(data=json.dumps(context),
- include_libs=self.INCLUDE_LIBS)
-
- # in future we will support html_static and will need to save more
- # files
- if self.output_destination:
- return {"files": {self.output_destination: raw_report},
- "open": self.output_destination}
- else:
- return {"print": raw_report}
-
-
-@reporter.configure("html-static")
-class HTMLStaticReporter(HTMLReporter):
- """Generates verification report in HTML format with embedded JS/CSS."""
- INCLUDE_LIBS = True
-
-
-@reporter.configure("junit-xml")
-class JUnitXMLReporter(reporter.VerificationReporter):
- """Generates verification report in JUnit-XML format.
-
- An example of the report (All dates, numbers, names appearing in this
- example are fictitious. Any resemblance to real things is purely
- coincidental):
-
- .. code-block:: xml
-
-
-
-
-
-
- Skipped until Bug: 666 is resolved.
-
-
-
-
-
-
-
- It is an unexpected success. The test should fail due to:
- It should fail, I said!
-
-
-
-
-
-
- HEEEEEEELP
-
-
- Skipped until Bug: 666 is resolved.
-
-
-
-
-
-
-
-
- """
-
- @classmethod
- def validate(cls, output_destination):
- pass
-
- def generate(self):
- report = junit.JUnitXML()
-
- for v in self.verifications:
- test_suite = report.add_test_suite(
- id=v.uuid,
- time=str(v.tests_duration),
- timestamp=v.created_at.strftime(TIME_FORMAT)
- )
- test_suite.setup_final_stats(
- tests=str(v.tests_count),
- skipped=str(v.skipped),
- failures=str(v.failures + v.unexpected_success)
- )
-
- tests = sorted(v.tests.values(),
- key=lambda t: (t.get("timestamp", ""), t["name"]))
- for result in tests:
- class_name, name = result["name"].rsplit(".", 1)
-
- test_id = [tag[3:] for tag in result.get("tags", [])
- if tag.startswith("id-")]
-
- test_case = test_suite.add_test_case(
- id=(test_id[0] if test_id else None),
- time=result["duration"], name=name, classname=class_name,
- timestamp=result.get("timestamp"))
-
- if result["status"] == "success":
- # nothing to add
- pass
- elif result["status"] == "uxsuccess":
- test_case.mark_as_uxsuccess(
- result.get("reason"))
- elif result["status"] == "fail":
- test_case.mark_as_failed(
- result.get("traceback", None))
- elif result["status"] == "xfail":
- trace = result.get("traceback", None)
- test_case.mark_as_xfail(
- result.get("reason", None),
- f"Traceback:\n{trace}" if trace else None)
- elif result["status"] == "skip":
- test_case.mark_as_skipped(
- result.get("reason", None))
- else:
- # wtf is it?! we should add validation of results...
- pass
-
- raw_report = report.to_string()
- if self.output_destination:
- return {"files": {self.output_destination: raw_report},
- "open": self.output_destination}
- else:
- return {"print": raw_report}
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/verification/testr.py b/rally/plugins/common/verification/testr.py
index 8b1ca7e342..ace83ac6cf 100644
--- a/rally/plugins/common/verification/testr.py
+++ b/rally/plugins/common/verification/testr.py
@@ -12,150 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import re
-import shutil
-import subprocess
+from rally.plugins.verification.testr import * # noqa: F401,F403
+from rally.plugins.verification import testr as _new
-
-from rally.common.io import subunit_v2
+# import it as last item to be sure that we use the right module
from rally.common import logging
-from rally.common import utils as common_utils
-from rally import exceptions
-from rally.verification import context
-from rally.verification import manager
-from rally.verification import utils
-LOG = logging.getLogger(__name__)
-
-TEST_NAME_RE = re.compile(r"^[a-zA-Z_.0-9]+(\[[a-zA-Z-_,=0-9]*\])?$")
-
-
-@context.configure("testr", order=999)
-class TestrContext(context.VerifierContext):
- """Context to transform 'run_args' into CLI arguments for testr."""
-
- def __init__(self, ctx):
- super(TestrContext, self).__init__(ctx)
- self._tmp_files = []
-
- def setup(self):
- super(TestrContext, self).setup()
- use_testr = getattr(self.verifier.manager, "_use_testr", True)
-
- if use_testr:
- base_cmd = "testr"
- else:
- base_cmd = "stestr"
- self.context["testr_cmd"] = [base_cmd, "run", "--subunit"]
- run_args = self.verifier.manager.prepare_run_args(
- self.context.get("run_args", {}))
-
- concurrency = run_args.get("concurrency", 0)
- if concurrency == 0 or concurrency > 1:
- if use_testr:
- self.context["testr_cmd"].append("--parallel")
- if concurrency >= 1:
- if concurrency == 1 and not use_testr:
- self.context["testr_cmd"].append("--serial")
- else:
- self.context["testr_cmd"].extend(
- ["--concurrency", str(concurrency)])
-
- load_list = self.context.get("load_list")
- skip_list = self.context.get("skip_list")
-
- if skip_list:
- load_list = set(load_list) - set(skip_list)
- if load_list:
- load_list_file = common_utils.generate_random_path()
- with open(load_list_file, "w") as f:
- f.write("\n".join(load_list))
- self._tmp_files.append(load_list_file)
- self.context["testr_cmd"].extend(["--load-list", load_list_file])
-
- if run_args.get("failed"):
- self.context["testr_cmd"].append("--failing")
-
- if run_args.get("pattern"):
- self.context["testr_cmd"].append(run_args.get("pattern"))
-
- def cleanup(self):
- for f in self._tmp_files:
- if os.path.exists(f):
- os.remove(f)
-
-
-class TestrLauncher(manager.VerifierManager):
- """Testr/sTestr wrapper."""
-
- def __init__(self, *args, **kwargs):
- super(TestrLauncher, self).__init__(*args, **kwargs)
- self._use_testr = os.path.exists(os.path.join(
- self.repo_dir, ".testr.conf"))
-
- @property
- def run_environ(self):
- return self.environ
-
- def _init_testr(self):
- """Initialize testr."""
- test_repository_dir = os.path.join(self.base_dir, ".testrepository")
- # NOTE(andreykurilin): Is there any possibility that .testrepository
- # presents in clear repo?!
- if not os.path.isdir(test_repository_dir):
- LOG.debug("Initializing testr.")
- if self._use_testr:
- base_cmd = "testr"
- else:
- base_cmd = "stestr"
- try:
- utils.check_output([base_cmd, "init"], cwd=self.repo_dir,
- env=self.environ)
- except (subprocess.CalledProcessError, OSError):
- if os.path.exists(test_repository_dir):
- shutil.rmtree(test_repository_dir)
- raise exceptions.RallyException("Failed to initialize testr.")
-
- def install(self):
- super(TestrLauncher, self).install()
- self._init_testr()
-
- def list_tests(self, pattern=""):
- """List all tests."""
- if self._use_testr:
- cmd = ["testr", "list-tests", pattern]
- else:
- cmd = ["stestr", "list", pattern]
- output = utils.check_output(cmd,
- cwd=self.repo_dir, env=self.environ,
- debug_output=False)
- return [t for t in output.split("\n") if TEST_NAME_RE.match(t)]
-
- def run(self, context):
- """Run tests."""
- testr_cmd = context["testr_cmd"]
- LOG.debug("Test(s) started by the command: '%s'."
- % " ".join(testr_cmd))
- stream = subprocess.Popen(testr_cmd, env=self.run_environ,
- cwd=self.repo_dir,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- xfail_list = context.get("xfail_list")
- skip_list = context.get("skip_list")
- results = subunit_v2.parse(stream.stdout, live=True,
- expected_failures=xfail_list,
- skipped_tests=skip_list,
- logger_name=self.verifier.name)
- stream.wait()
-
- return results
-
- def prepare_run_args(self, run_args):
- """Prepare 'run_args' for testr context.
-
- This method is called by TestrContext before transforming 'run_args'
- into CLI arguments for testr.
- """
- return run_args
+logging.log_deprecated_module(
+ target=__name__, new_module=_new.__name__, release="3.0.0"
+)
diff --git a/rally/plugins/common/contexts/__init__.py b/rally/plugins/task/contexts/__init__.py
similarity index 100%
rename from rally/plugins/common/contexts/__init__.py
rename to rally/plugins/task/contexts/__init__.py
diff --git a/rally/plugins/common/contexts/dummy.py b/rally/plugins/task/contexts/dummy.py
similarity index 100%
rename from rally/plugins/common/contexts/dummy.py
rename to rally/plugins/task/contexts/dummy.py
diff --git a/rally/plugins/common/scenarios/dummy/__init__.py b/rally/plugins/task/exporters/__init__.py
similarity index 100%
rename from rally/plugins/common/scenarios/dummy/__init__.py
rename to rally/plugins/task/exporters/__init__.py
diff --git a/tests/unit/plugins/common/contexts/__init__.py b/rally/plugins/task/exporters/elastic/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/contexts/__init__.py
rename to rally/plugins/task/exporters/elastic/__init__.py
diff --git a/rally/plugins/task/exporters/elastic/client.py b/rally/plugins/task/exporters/elastic/client.py
new file mode 100755
index 0000000000..36ecc676ac
--- /dev/null
+++ b/rally/plugins/task/exporters/elastic/client.py
@@ -0,0 +1,159 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import requests
+
+from rally.common import logging
+from rally import exceptions
+
+LOG = logging.getLogger(__name__)
+
+
+class ElasticSearchClient(object):
+ """The helper class for communication with ElasticSearch 2.*, 5.*, 6.*"""
+
+ # a number of documents to push to the cluster at once.
+ CHUNK_LENGTH = 10000
+
+ def __init__(self, url):
+ self._url = url.rstrip("/") if url else "http://localhost:9200"
+ self._version = None
+
+ @staticmethod
+ def _check_response(resp, action=None):
+ if resp.status_code in (200, 201):
+ return
+ # it is an error. let's try to find the reason
+ reason = None
+ try:
+ data = resp.json()
+ except ValueError:
+ # it is ok
+ pass
+ else:
+ if "error" in data:
+ if isinstance(data["error"], dict):
+ reason = data["error"].get("reason", "")
+ else:
+ reason = data["error"]
+ reason = reason or resp.text or "n/a"
+ action = action or "connect to"
+ raise exceptions.RallyException(
+ "[HTTP %s] Failed to %s ElasticSearch cluster: %s" %
+ (resp.status_code, action, reason))
+
+ def version(self):
+ """Get version of the ElasticSearch cluster."""
+ if self._version is None:
+ self.info()
+ return self._version
+
+ def info(self):
+ """Retrieve info about the ElasticSearch cluster."""
+ resp = requests.get(self._url)
+ self._check_response(resp)
+ err_msg = "Failed to retrieve info about the ElasticSearch cluster: %s"
+ try:
+ data = resp.json()
+ except ValueError:
+ LOG.debug("Return data from %s: %s" % (self._url, resp.text))
+ raise exceptions.RallyException(
+ err_msg % "The return data doesn't look like a json.")
+ version = data.get("version", {}).get("number")
+ if not version:
+ LOG.debug("Return data from %s: %s" % (self._url, resp.text))
+ raise exceptions.RallyException(
+ err_msg % "Failed to parse the received data.")
+ self._version = version
+ if self._version.startswith("2"):
+ data["version"]["build_date"] = data["version"].pop(
+ "build_timestamp")
+ return data
+
+ def push_documents(self, documents):
+ """Push documents to the ElasticSearch cluster using bulk API.
+
+ :param documents: a list of documents to push
+ """
+ LOG.debug("Pushing %s documents by chunks (up to %s documents at once)"
+ " to ElasticSearch." %
+ # dividing numbers by two, since each documents has 2 lines
+ # in `documents` (action and document itself).
+ (len(documents) / 2, self.CHUNK_LENGTH / 2))
+
+ for pos in range(0, len(documents), self.CHUNK_LENGTH):
+ data = "\n".join(documents[pos:pos + self.CHUNK_LENGTH]) + "\n"
+
+ raw_resp = requests.post(
+ self._url + "/_bulk", data=data,
+ headers={"Content-Type": "application/x-ndjson"}
+ )
+ self._check_response(raw_resp, action="push documents to")
+
+ LOG.debug("Successfully pushed %s documents." %
+ len(raw_resp.json()["items"]))
+
+ def list_indices(self):
+ """List all indices."""
+ resp = requests.get(self._url + "/_cat/indices?v")
+ self._check_response(resp, "list the indices at")
+
+ return resp.text.rstrip().split(" ")
+
+ def create_index(self, name, doc_type, properties):
+ """Create an index.
+
+ There are two very different ways to search strings. You can either
+ search whole values, that we often refer to as keyword search, or
+ individual tokens, that we usually refer to as full-text search.
+ In ElasticSearch 2.x `string` data type is used for these cases whereas
+ ElasticSearch 5.0 the `string` data type was replaced by two new types:
+ `keyword` and `text`. Since it is hard to predict the destiny of
+ `string` data type and support of 2 formats of input data, the
+ properties should be transmitted in ElasticSearch 5.x format.
+ """
+ if self.version().startswith("2."):
+ properties = copy.deepcopy(properties)
+ for spec in properties.values():
+ if spec.get("type", None) == "text":
+ spec["type"] = "string"
+ elif spec.get("type", None) == "keyword":
+ spec["type"] = "string"
+ spec["index"] = "not_analyzed"
+
+ resp = requests.put(
+ self._url + "/%s" % name,
+ json={"mappings": {doc_type: {"properties": properties}}})
+ self._check_response(resp, "create index at")
+
+ def check_document(self, index, doc_id, doc_type="data"):
+ """Check for the existence of a document.
+
+ :param index: The index of a document
+ :param doc_id: The ID of a document
+ :param doc_type: The type of a document (Defaults to data)
+ """
+ resp = requests.head("%(url)s/%(index)s/%(type)s/%(id)s" %
+ {"url": self._url,
+ "index": index,
+ "type": doc_type,
+ "id": doc_id})
+ if resp.status_code == 200:
+ return True
+ elif resp.status_code == 404:
+ return False
+ else:
+ self._check_response(resp, "check the index at")
diff --git a/rally/plugins/task/exporters/elastic/exporter.py b/rally/plugins/task/exporters/elastic/exporter.py
new file mode 100755
index 0000000000..c0e5368f8f
--- /dev/null
+++ b/rally/plugins/task/exporters/elastic/exporter.py
@@ -0,0 +1,386 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import datetime as dt
+import itertools
+import json
+import os
+
+from rally.common import logging
+from rally.common import validation
+from rally import consts
+from rally import exceptions
+from rally.plugins.task.exporters.elastic import client
+from rally.plugins.task.exporters.elastic import flatten
+from rally.task import exporter
+
+LOG = logging.getLogger(__name__)
+
+
+@validation.configure("es_exporter_destination")
+class Validator(validation.Validator):
+ """Validates the destination for ElasticSearch exporter.
+
+ In case when the destination is ElasticSearch cluster, the version of it
+ should be 2.* or 5.*
+ """
+ def validate(self, context, config, plugin_cls, plugin_cfg):
+ destination = plugin_cfg["destination"]
+ if destination and (not destination.startswith("http://")
+ and not destination.startswith("https://")):
+ # it is a path to a local file
+ return
+ es = client.ElasticSearchClient(destination)
+ try:
+ version = es.version()
+ except exceptions.RallyException as e:
+ # re-raise a proper exception to hide redundant traceback
+ self.fail(e.format_message())
+ if not (version.startswith("2.")
+ or version.startswith("5.")
+ or version.startswith("6.")):
+ self.fail("The unsupported version detected %s." % version)
+
+
+@validation.add("es_exporter_destination")
+@exporter.configure("elastic")
+class ElasticSearchExporter(exporter.TaskExporter):
+ """Exports task results to the ElasticSearch 2.x, 5.x or 6.x clusters.
+
+ The exported data includes:
+
+ * Task basic information such as title, description, status,
+ deployment uuid, etc.
+ See rally_task_v1_data index.
+
+ * Workload information such as scenario name and configuration, runner
+ type and configuration, time of the start load, success rate, sla
+ details in case of errors, etc.
+ See rally_workload_v1_data index.
+
+ * Separate documents for all atomic actions.
+ See rally_atomic_action_data_v1 index.
+
+ The destination can be a remote server. In this case specify it like:
+
+ https://elastic:changeme@example.com
+
+ Or we can dump documents to the file. The destination should look like:
+
+ /home/foo/bar.txt
+
+ In case of an empty destination, the http://localhost:9200 destination
+ will be used.
+ """
+
+ TASK_INDEX = "rally_task_data_v1"
+ WORKLOAD_INDEX = "rally_workload_data_v1"
+ AA_INDEX = "rally_atomic_action_data_v1"
+ INDEX_SCHEMAS = {
+ TASK_INDEX: {
+ "task_uuid": {"type": "keyword"},
+ "deployment_uuid": {"type": "keyword"},
+ "deployment_name": {"type": "keyword"},
+ "title": {"type": "text"},
+ "description": {"type": "text"},
+ "status": {"type": "keyword"},
+ "pass_sla": {"type": "boolean"},
+ "tags": {"type": "keyword"}
+ },
+ WORKLOAD_INDEX: {
+ "deployment_uuid": {"type": "keyword"},
+ "deployment_name": {"type": "keyword"},
+ "scenario_name": {"type": "keyword"},
+ "scenario_cfg": {"type": "keyword"},
+ "description": {"type": "text"},
+ "runner_name": {"type": "keyword"},
+ "runner_cfg": {"type": "keyword"},
+ "contexts": {"type": "keyword"},
+ "task_uuid": {"type": "keyword"},
+ "subtask_uuid": {"type": "keyword"},
+ "started_at": {"type": "date"},
+ "load_duration": {"type": "long"},
+ "full_duration": {"type": "long"},
+ "pass_sla": {"type": "boolean"},
+ "success_rate": {"type": "float"},
+ "sla_details": {"type": "text"}
+ },
+ AA_INDEX: {
+ "deployment_uuid": {"type": "keyword"},
+ "deployment_name": {"type": "keyword"},
+ "action_name": {"type": "keyword"},
+ "workload_uuid": {"type": "keyword"},
+ "scenario_cfg": {"type": "keyword"},
+ "contexts": {"type": "keyword"},
+ "runner_name": {"type": "keyword"},
+ "runner_cfg": {"type": "keyword"},
+ "success": {"type": "boolean"},
+ "duration": {"type": "float"},
+ "started_at": {"type": "date"},
+ "finished_at": {"type": "date"},
+ "parent": {"type": "keyword"},
+ "error": {"type": "keyword"}
+ }
+ }
+
+ def __init__(self, tasks_results, output_destination, api=None):
+ super(ElasticSearchExporter, self).__init__(tasks_results,
+ output_destination,
+ api=api)
+ self._report = []
+ self._remote = (
+ output_destination is None or (
+ output_destination.startswith("http://")
+ or self.output_destination.startswith("https://")))
+ if self._remote:
+ self._client = client.ElasticSearchClient(self.output_destination)
+
+ def _add_index(self, index, body, doc_id=None, doc_type="data"):
+ """Create a document for the specified index with specified id.
+
+ :param index: The name of the index
+ :param body: The document. Here is the report of (sla,
+ scenario, iteration and atomic action)
+ :param doc_id: Document ID. Here we use task/subtask/workload uuid
+ :param doc_type: The type of document
+
+ """
+ self._report.append(
+ json.dumps(
+ # use OrderedDict to make the report more unified
+ {"index": collections.OrderedDict([
+ ("_index", index),
+ ("_type", doc_type),
+ ("_id", doc_id)])},
+ sort_keys=False))
+ self._report.append(json.dumps(body))
+
+ def _ensure_indices(self):
+ """Check available indices and create require ones if they missed."""
+ available_index = set(self._client.list_indices())
+ missed_index = {self.TASK_INDEX, self.WORKLOAD_INDEX,
+ self.AA_INDEX} - available_index
+ for index in missed_index:
+ LOG.debug("Creating '%s' index." % index)
+ self._client.create_index(index, doc_type="data",
+ properties=self.INDEX_SCHEMAS[index])
+
+ @staticmethod
+ def _make_action_report(name, workload_id, workload, duration,
+ started_at, finished_at, parent, error):
+ # NOTE(andreykurilin): actually, this method just creates a dict object
+ # but we need to have the same format at two places, so the template
+ # transformed into a method.
+ parent = parent[0] if parent else None
+ return {
+ "deployment_uuid": workload["deployment_uuid"],
+ "deployment_name": workload["deployment_name"],
+ "action_name": name,
+ "workload_uuid": workload_id,
+ "scenario_cfg": workload["scenario_cfg"],
+ "contexts": workload["contexts"],
+ "runner_name": workload["runner_name"],
+ "runner_cfg": workload["runner_cfg"],
+ "success": not bool(error),
+ "duration": duration,
+ "started_at": started_at,
+ "finished_at": finished_at,
+ "parent": parent,
+ "error": error
+ }
+
+ def _process_atomic_actions(self, itr, workload, workload_id,
+ atomic_actions=None, _parent=None, _depth=0,
+ _cache=None):
+ """Process atomic actions of an iteration
+
+ :param atomic_actions: A list with an atomic actions
+ :param itr: The iteration data
+ :param workload: The workload report
+ :param workload_id: The workload UUID
+ :param _parent: An inner parameter which is used for pointing to the
+ parent atomic action
+ :param _depth: An inner parameter which is used to mark the level of
+ depth while parsing atomic action children
+ :param _cache: An inner parameter which is used to avoid conflicts in
+ IDs of atomic actions of a single iteration.
+ """
+
+ if _depth >= 3:
+ return
+ cache = _cache or {}
+
+ if atomic_actions is None:
+ atomic_actions = itr["atomic_actions"]
+
+ act_id_tmpl = "%(itr_id)s_action_%(action_name)s_%(num)s"
+ for i, action in enumerate(atomic_actions, 1):
+ cache.setdefault(action["name"], 0)
+ act_id = act_id_tmpl % {
+ "itr_id": itr["id"],
+ "action_name": action["name"],
+ "num": cache[action["name"]]}
+ cache[action["name"]] += 1
+
+ started_at = dt.datetime.utcfromtimestamp(action["started_at"])
+ finished_at = dt.datetime.utcfromtimestamp(action["finished_at"])
+ started_at = started_at.strftime(consts.TimeFormat.ISO8601)
+ finished_at = finished_at.strftime(consts.TimeFormat.ISO8601)
+
+ action_report = self._make_action_report(
+ name=action["name"],
+ workload_id=workload_id,
+ workload=workload,
+ duration=(action["finished_at"] - action["started_at"]),
+ started_at=started_at,
+ finished_at=finished_at,
+ parent=_parent,
+ error=(itr["error"] if action.get("failed", False) else None)
+ )
+
+ self._add_index(self.AA_INDEX, action_report,
+ doc_id=act_id)
+
+ self._process_atomic_actions(
+ atomic_actions=action["children"],
+ itr=itr,
+ workload=workload,
+ workload_id=workload_id,
+ _parent=(act_id, action_report),
+ _depth=(_depth + 1),
+ _cache=cache)
+
+ if itr["error"] and (
+ # the case when it is a top level of the scenario and the
+ # first fails the item which is not wrapped by AtomicTimer
+ (not _parent and not atomic_actions)
+ # the case when it is a top level of the scenario and and
+ # the item fails after some atomic actions completed
+ or (not _parent and atomic_actions
+ and not atomic_actions[-1].get("failed", False))):
+ act_id = act_id_tmpl % {
+ "itr_id": itr["id"],
+ "action_name": "no-name-action",
+ "num": 0}
+
+ # Since the action had not be wrapped by AtomicTimer, we cannot
+ # make any assumption about it's duration (start_time) so let's use
+ # finished_at timestamp of iteration with 0 duration
+ timestamp = (itr["timestamp"] + itr["duration"]
+ + itr["idle_duration"])
+ timestamp = dt.datetime.utcfromtimestamp(timestamp)
+ timestamp = timestamp.strftime(consts.TimeFormat.ISO8601)
+ action_report = self._make_action_report(
+ name="no-name-action",
+ workload_id=workload_id,
+ workload=workload,
+ duration=0,
+ started_at=timestamp,
+ finished_at=timestamp,
+ parent=_parent,
+ error=itr["error"]
+ )
+ self._add_index(self.AA_INDEX, action_report, doc_id=act_id)
+
+ def generate(self):
+ if self._remote:
+ self._ensure_indices()
+
+ for task in self.tasks_results:
+ if self._remote:
+ if self._client.check_document(self.TASK_INDEX, task["uuid"]):
+ raise exceptions.RallyException(
+ "Failed to push the task %s to the ElasticSearch "
+ "cluster. The document with such UUID already exists" %
+ task["uuid"])
+
+ task_report = {
+ "task_uuid": task["uuid"],
+ "deployment_uuid": task["env_uuid"],
+ "deployment_name": task["env_name"],
+ "title": task["title"],
+ "description": task["description"],
+ "status": task["status"],
+ "pass_sla": task["pass_sla"],
+ "tags": task["tags"]
+ }
+ self._add_index(self.TASK_INDEX, task_report,
+ doc_id=task["uuid"])
+
+ # NOTE(andreykurilin): The subtasks do not have much logic now, so
+ # there is no reason to save the info about them.
+ for workload in itertools.chain(
+ *[s["workloads"] for s in task["subtasks"]]):
+
+ durations = workload["statistics"]["durations"]
+ success_rate = durations["total"]["data"]["success"]
+ if success_rate == "n/a":
+ success_rate = 0.0
+ else:
+ # cut the % char and transform to the float value
+ success_rate = float(success_rate[:-1]) / 100.0
+
+ started_at = workload["start_time"]
+ if started_at:
+ started_at = dt.datetime.utcfromtimestamp(started_at)
+ started_at = started_at.strftime(consts.TimeFormat.ISO8601)
+ workload_report = {
+ "task_uuid": workload["task_uuid"],
+ "subtask_uuid": workload["subtask_uuid"],
+ "deployment_uuid": task["env_uuid"],
+ "deployment_name": task["env_name"],
+ "scenario_name": workload["name"],
+ "scenario_cfg": flatten.transform(workload["args"]),
+ "description": workload["description"],
+ "runner_name": workload["runner_type"],
+ "runner_cfg": flatten.transform(workload["runner"]),
+ "contexts": flatten.transform(workload["contexts"]),
+ "started_at": started_at,
+ "load_duration": workload["load_duration"],
+ "full_duration": workload["full_duration"],
+ "pass_sla": workload["pass_sla"],
+ "success_rate": success_rate,
+ "sla_details": [s["detail"]
+ for s in workload["sla_results"]["sla"]
+ if not s["success"]]}
+
+ # do we need to store hooks ?!
+ self._add_index(self.WORKLOAD_INDEX, workload_report,
+ doc_id=workload["uuid"])
+
+ # Iterations
+ for idx, itr in enumerate(workload.get("data", []), 1):
+ itr["id"] = "%(uuid)s_iter_%(num)s" % {
+ "uuid": workload["uuid"],
+ "num": str(idx)}
+
+ self._process_atomic_actions(
+ itr=itr,
+ workload=workload_report,
+ workload_id=workload["uuid"])
+ if self._remote:
+ LOG.debug("The info of ElasticSearch cluster to which the results "
+ "will be exported: %s" % self._client.info())
+ self._client.push_documents(self._report)
+
+ msg = ("Successfully exported results to ElasticSearch at url "
+ "'%s'" % self.output_destination)
+ return {"print": msg}
+ else:
+ # a new line is required in the end of the file.
+ report = "\n".join(self._report) + "\n"
+ return {"files": {self.output_destination: report},
+ "open": "file://" + os.path.abspath(
+ self.output_destination)}
diff --git a/rally/plugins/task/exporters/elastic/flatten.py b/rally/plugins/task/exporters/elastic/flatten.py
new file mode 100644
index 0000000000..bbca29c9e6
--- /dev/null
+++ b/rally/plugins/task/exporters/elastic/flatten.py
@@ -0,0 +1,65 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def _join_keys(first, second):
+ if not second:
+ return first
+ elif second.startswith("["):
+ return "%s%s" % (first, second)
+ else:
+ return "%s.%s" % (first, second)
+
+
+def _process(obj):
+ if isinstance(obj, (str, bytes)):
+ yield "", obj
+ elif isinstance(obj, dict):
+ for first, tmp_value in obj.items():
+ for second, value in _process(tmp_value):
+ yield _join_keys(first, second), value
+ elif isinstance(obj, (list, tuple)):
+ for i, tmp_value in enumerate(obj):
+ for second, value in _process(tmp_value):
+ yield _join_keys("[%s]" % i, second), value
+ else:
+ try:
+ yield "", "%s" % obj
+ except Exception:
+ raise ValueError("Cannot transform obj of '%s' type to flatten "
+ "structure." % type(obj))
+
+
+def transform(obj):
+ """Transform object to a flatten structure.
+
+ Example:
+ IN:
+ {"foo": ["xxx", "yyy", {"bar": {"zzz": ["Hello", "World!"]}}]}
+ OUTPUT:
+ [
+ "foo[0]=xxx",
+ "foo[1]=yyy",
+ "foo[2].bar.zzz[0]=Hello",
+ "foo[2].bar.zzz[1]=World!"
+ ]
+
+ """
+ result = []
+ for key, value in _process(obj):
+ if key:
+ result.append("%s=%s" % (key, value))
+ else:
+ result.append(value)
+ return sorted(result)
diff --git a/rally/plugins/task/exporters/html.py b/rally/plugins/task/exporters/html.py
new file mode 100644
index 0000000000..02c7184558
--- /dev/null
+++ b/rally/plugins/task/exporters/html.py
@@ -0,0 +1,56 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+import os
+
+from rally.task import exporter
+from rally.task.processing import plot
+
+
+@exporter.configure("html")
+class HTMLExporter(exporter.TaskExporter):
+ """Generates task report in HTML format."""
+ INCLUDE_LIBS = False
+
+ def _generate_results(self):
+ results = []
+ processed_names = {}
+ for task in self.tasks_results:
+ for workload in itertools.chain(
+ *[s["workloads"] for s in task["subtasks"]]):
+ if workload["name"] in processed_names:
+ processed_names[workload["name"]] += 1
+ workload["position"] = processed_names[workload["name"]]
+ else:
+ processed_names[workload["name"]] = 0
+ results.append(task)
+ return results
+
+ def generate(self):
+ report = plot.plot(self._generate_results(),
+ include_libs=self.INCLUDE_LIBS)
+
+ if self.output_destination:
+ return {"files": {self.output_destination: report},
+ "open": "file://" + os.path.abspath(
+ self.output_destination)}
+ else:
+ return {"print": report}
+
+
+@exporter.configure("html-static")
+class HTMLStaticExporter(HTMLExporter):
+ """Generates task report in HTML format with embedded JS/CSS."""
+ INCLUDE_LIBS = True
diff --git a/rally/plugins/task/exporters/json_exporter.py b/rally/plugins/task/exporters/json_exporter.py
new file mode 100644
index 0000000000..ad1bfb5bf4
--- /dev/null
+++ b/rally/plugins/task/exporters/json_exporter.py
@@ -0,0 +1,123 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import datetime as dt
+import json
+
+from rally.common import version as rally_version
+from rally.task import exporter
+
+TIMEFORMAT = "%Y-%m-%dT%H:%M:%S"
+
+
+@exporter.configure("json")
+class JSONExporter(exporter.TaskExporter):
+ """Generates task report in JSON format."""
+
+ # Revisions:
+ # 1.0 - the json report v1
+ # 1.1 - add `contexts_results` key with contexts execution results of
+ # workloads.
+ # 1.2 - add `env_uuid` and `env_uuid` which represent environment name
+ # and UUID where task was executed
+ REVISION = "1.2"
+
+ def _generate_tasks(self):
+ tasks = []
+ for task in self.tasks_results:
+ subtasks = []
+ for subtask in task["subtasks"]:
+ workloads = []
+ for workload in subtask["workloads"]:
+ hooks = [{
+ "config": {"action": dict([h["config"]["action"]]),
+ "trigger": dict([h["config"]["trigger"]]),
+ "description": h["config"]["description"]},
+ "results": h["results"],
+ "summary": h["summary"], } for h in workload["hooks"]]
+ workloads.append(
+ collections.OrderedDict(
+ [("uuid", workload["uuid"]),
+ ("description", workload["description"]),
+ ("runner", {
+ workload["runner_type"]: workload["runner"]}),
+ ("hooks", hooks),
+ ("scenario", {
+ workload["name"]: workload["args"]}),
+ ("min_duration", workload["min_duration"]),
+ ("max_duration", workload["max_duration"]),
+ ("start_time", workload["start_time"]),
+ ("load_duration", workload["load_duration"]),
+ ("full_duration", workload["full_duration"]),
+ ("statistics", workload["statistics"]),
+ ("data", workload["data"]),
+ ("failed_iteration_count",
+ workload["failed_iteration_count"]),
+ ("total_iteration_count",
+ workload["total_iteration_count"]),
+ ("created_at", workload["created_at"]),
+ ("updated_at", workload["updated_at"]),
+ ("contexts", workload["contexts"]),
+ ("contexts_results",
+ workload["contexts_results"]),
+ ("position", workload["position"]),
+ ("pass_sla", workload["pass_sla"]),
+ ("sla_results", workload["sla_results"]),
+ ("sla", workload["sla"])]
+ )
+ )
+ subtasks.append(
+ collections.OrderedDict(
+ [("uuid", subtask["uuid"]),
+ ("title", subtask["title"]),
+ ("description", subtask["description"]),
+ ("status", subtask["status"]),
+ ("created_at", subtask["created_at"]),
+ ("updated_at", subtask["updated_at"]),
+ ("sla", subtask["sla"]),
+ ("workloads", workloads)]
+ )
+ )
+ tasks.append(
+ collections.OrderedDict(
+ [("uuid", task["uuid"]),
+ ("title", task["title"]),
+ ("description", task["description"]),
+ ("status", task["status"]),
+ ("tags", task["tags"]),
+ ("env_uuid", task.get("env_uuid", "n\a")),
+ ("env_name", task.get("env_name", "n\a")),
+ ("created_at", task["created_at"]),
+ ("updated_at", task["updated_at"]),
+ ("pass_sla", task["pass_sla"]),
+ ("subtasks", subtasks)]
+ )
+ )
+ return tasks
+
+ def generate(self):
+ results = {"info": {"rally_version": rally_version.version_string(),
+ "generated_at": dt.datetime.strftime(
+ dt.datetime.utcnow(), TIMEFORMAT),
+ "format_version": self.REVISION},
+ "tasks": self._generate_tasks()}
+
+ results = json.dumps(results, sort_keys=False, indent=4)
+
+ if self.output_destination:
+ return {"files": {self.output_destination: results},
+ "open": "file://" + self.output_destination}
+ else:
+ return {"print": results}
diff --git a/rally/plugins/task/exporters/junit.py b/rally/plugins/task/exporters/junit.py
new file mode 100644
index 0000000000..99b09f672a
--- /dev/null
+++ b/rally/plugins/task/exporters/junit.py
@@ -0,0 +1,96 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime as dt
+import itertools
+import os
+
+from rally.common.io import junit
+from rally.task import exporter
+
+
+@exporter.configure("junit-xml")
+class JUnitXMLExporter(exporter.TaskExporter):
+ """Generates task report in JUnit-XML format.
+
+ An example of the report (All dates, numbers, names appearing in this
+ example are fictitious. Any resemblance to real things is purely
+ coincidental):
+
+ .. code-block:: xml
+
+
+
+
+
+
+ ooops
+
+
+
+ """
+
+ def generate(self):
+ root = junit.JUnitXML()
+
+ for t in self.tasks_results:
+ created_at = dt.datetime.strptime(t["created_at"],
+ "%Y-%m-%dT%H:%M:%S")
+ updated_at = dt.datetime.strptime(t["updated_at"],
+ "%Y-%m-%dT%H:%M:%S")
+ test_suite = root.add_test_suite(
+ id=t["uuid"],
+ time="%.2f" % (updated_at - created_at).total_seconds(),
+ timestamp=t["created_at"]
+ )
+ for workload in itertools.chain(
+ *[s["workloads"] for s in t["subtasks"]]):
+ class_name, name = workload["name"].split(".", 1)
+ test_case = test_suite.add_test_case(
+ id=workload["uuid"],
+ time="%.2f" % workload["full_duration"],
+ classname=class_name,
+ name=name,
+ timestamp=workload["created_at"]
+ )
+ if not workload["pass_sla"]:
+ details = "\n".join(
+ [s["detail"]
+ for s in workload["sla_results"]["sla"]
+ if not s["success"]]
+ )
+ test_case.mark_as_failed(details)
+
+ raw_report = root.to_string()
+
+ if self.output_destination:
+ return {"files": {self.output_destination: raw_report},
+ "open": "file://" + os.path.abspath(
+ self.output_destination)}
+ else:
+ return {"print": raw_report}
diff --git a/rally/plugins/task/exporters/trends.py b/rally/plugins/task/exporters/trends.py
new file mode 100644
index 0000000000..be19c54c8c
--- /dev/null
+++ b/rally/plugins/task/exporters/trends.py
@@ -0,0 +1,40 @@
+# Copyright 2018: ZTE Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from rally.task import exporter
+from rally.task.processing import plot
+
+
+@exporter.configure("trends-html")
+class TrendsExporter(exporter.TaskExporter):
+ """Generates task trends report in HTML format."""
+ INCLUDE_LIBS = False
+
+ def generate(self):
+ report = plot.trends(self.tasks_results, self.INCLUDE_LIBS)
+ if self.output_destination:
+ return {"files": {self.output_destination: report},
+ "open": "file://" + os.path.abspath(
+ self.output_destination)}
+ else:
+ return {"print": report}
+
+
+@exporter.configure("trends-html-static")
+class TrendsStaticExport(TrendsExporter):
+ """Generates task trends report in HTML format with embedded JS/CSS."""
+ INCLUDE_LIBS = True
diff --git a/tests/unit/plugins/common/exporters/__init__.py b/rally/plugins/task/hook_triggers/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/exporters/__init__.py
rename to rally/plugins/task/hook_triggers/__init__.py
diff --git a/rally/plugins/task/hook_triggers/event.py b/rally/plugins/task/hook_triggers/event.py
new file mode 100644
index 0000000000..aefe0eb026
--- /dev/null
+++ b/rally/plugins/task/hook_triggers/event.py
@@ -0,0 +1,74 @@
+# Copyright 2016: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from rally import consts
+from rally.task import hook
+
+
+@hook.configure(name="event")
+class EventTrigger(hook.HookTrigger):
+ """Triggers hook on specified event and list of values."""
+
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "oneOf": [
+ {
+ "description": "Triage hook based on specified seconds after "
+ "start of workload.",
+ "properties": {
+ "unit": {"enum": ["time"]},
+ "at": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": True,
+ "items": {
+ "type": "integer",
+ "minimum": 0
+ }
+ },
+ },
+ "required": ["unit", "at"],
+ "additionalProperties": False,
+ },
+ {
+ "description": "Triage hook based on specific iterations.",
+ "properties": {
+ "unit": {"enum": ["iteration"]},
+ "at": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": True,
+ "items": {
+ "type": "integer",
+ "minimum": 1,
+ }
+ },
+ },
+ "required": ["unit", "at"],
+ "additionalProperties": False,
+ },
+ ]
+ }
+
+ def get_listening_event(self):
+ return self.config["unit"]
+
+ def on_event(self, event_type, value=None):
+ if not (event_type == self.get_listening_event()
+ and value in self.config["at"]):
+ # do nothing
+ return
+ super(EventTrigger, self).on_event(event_type, value)
diff --git a/rally/plugins/task/hook_triggers/periodic.py b/rally/plugins/task/hook_triggers/periodic.py
new file mode 100644
index 0000000000..b7bd720dc4
--- /dev/null
+++ b/rally/plugins/task/hook_triggers/periodic.py
@@ -0,0 +1,69 @@
+# Copyright 2016: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from rally import consts
+from rally.task import hook
+
+
+@hook.configure(name="periodic")
+class PeriodicTrigger(hook.HookTrigger):
+ """Periodically triggers hook with specified range and step."""
+
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "oneOf": [
+ {
+ "description": "Periodically triage hook based on elapsed time"
+ " after start of workload.",
+ "properties": {
+ "unit": {"enum": ["time"]},
+ "start": {"type": "integer", "minimum": 0},
+ "end": {"type": "integer", "minimum": 1},
+ "step": {"type": "integer", "minimum": 1},
+ },
+ "required": ["unit", "step"],
+ "additionalProperties": False,
+ },
+ {
+ "description": "Periodically triage hook based on iterations.",
+ "properties": {
+ "unit": {"enum": ["iteration"]},
+ "start": {"type": "integer", "minimum": 1},
+ "end": {"type": "integer", "minimum": 1},
+ "step": {"type": "integer", "minimum": 1},
+ },
+ "required": ["unit", "step"],
+ "additionalProperties": False,
+ },
+ ]
+ }
+
+ def __init__(self, context, task, hook_cls):
+ super(PeriodicTrigger, self).__init__(context, task, hook_cls)
+ self.config.setdefault(
+ "start", 0 if self.config["unit"] == "time" else 1)
+ self.config.setdefault("end", float("Inf"))
+
+ def get_listening_event(self):
+ return self.config["unit"]
+
+ def on_event(self, event_type, value=None):
+ if not (event_type == self.get_listening_event()
+ and self.config["start"] <= value <= self.config["end"]
+ and (value - self.config["start"]) % self.config["step"] == 0):
+ # do nothing
+ return
+ super(PeriodicTrigger, self).on_event(event_type, value)
diff --git a/tests/unit/plugins/common/exporters/elastic/__init__.py b/rally/plugins/task/hooks/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/exporters/elastic/__init__.py
rename to rally/plugins/task/hooks/__init__.py
diff --git a/rally/plugins/task/hooks/sys_call.py b/rally/plugins/task/hooks/sys_call.py
new file mode 100644
index 0000000000..a7f5f5f89d
--- /dev/null
+++ b/rally/plugins/task/hooks/sys_call.py
@@ -0,0 +1,68 @@
+# Copyright 2016: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import shlex
+import subprocess
+
+from rally.common import logging
+from rally import consts
+from rally import exceptions
+from rally.task import hook
+
+
+LOG = logging.getLogger(__name__)
+
+
+@hook.configure(name="sys_call")
+class SysCallHook(hook.HookAction):
+ """Performs system call."""
+
+ CONFIG_SCHEMA = {
+ "$schema": consts.JSON_SCHEMA,
+ "type": "string",
+ "description": "Command to execute."
+ }
+
+ def run(self):
+ LOG.debug("sys_call hook: Running command %s" % self.config)
+ proc = subprocess.Popen(shlex.split(self.config),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ out, err = proc.communicate()
+ LOG.debug("sys_call hook: Command %s returned %s"
+ % (self.config, proc.returncode))
+ if proc.returncode:
+ self.set_error(
+ exception_name="n/a", # no exception class
+ description="Subprocess returned %s" % proc.returncode,
+ details=(err or "stdout: %s" % out))
+
+ # NOTE(amaretskiy): Try to load JSON for charts,
+ # otherwise save output as-is
+ try:
+ output = json.loads(out)
+ for arg in ("additive", "complete"):
+ for out_ in output.get(arg, []):
+ self.add_output(**{arg: out_})
+ except (TypeError, ValueError, exceptions.RallyException):
+ self.add_output(
+ complete={"title": "System call",
+ "chart_plugin": "TextArea",
+ "description": "Args: %s" % self.config,
+ "data": ["RetCode: %i" % proc.returncode,
+ "StdOut: %s" % (out or "(empty)"),
+ "StdErr: %s" % (err or "(empty)")]})
diff --git a/tests/unit/plugins/common/hook/__init__.py b/rally/plugins/task/runners/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/hook/__init__.py
rename to rally/plugins/task/runners/__init__.py
diff --git a/rally/plugins/task/runners/constant.py b/rally/plugins/task/runners/constant.py
new file mode 100644
index 0000000000..5feb1fee18
--- /dev/null
+++ b/rally/plugins/task/runners/constant.py
@@ -0,0 +1,342 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import multiprocessing
+import queue as Queue
+import threading
+import time
+
+from rally.common import utils
+from rally.common import validation
+from rally import consts
+from rally.task import runner
+
+
+def _worker_process(queue, iteration_gen, timeout, concurrency, times,
+ duration, context, cls, method_name, args, event_queue,
+ aborted, info):
+ """Start the scenario within threads.
+
+ Spawn threads to support scenario execution.
+ Scenario is ran for a fixed number of times if times is specified
+ Scenario is ran for fixed duration if duration is specified.
+ This generates a constant load on the cloud under test by executing each
+ scenario iteration without pausing between iterations. Each thread runs
+ the scenario method once with passed scenario arguments and context.
+ After execution the result is appended to the queue.
+
+ :param queue: queue object to append results
+ :param iteration_gen: next iteration number generator
+ :param timeout: operation's timeout
+ :param concurrency: number of concurrently running scenario iterations
+ :param times: total number of scenario iterations to be run
+ :param duration: total duration in seconds of the run
+ :param context: scenario context object
+ :param cls: scenario class
+ :param method_name: scenario method name
+ :param args: scenario args
+ :param event_queue: queue object to append events
+ :param aborted: multiprocessing.Event that aborts load generation if
+ the flag is set
+ :param info: info about all processes count and counter of launched process
+ """
+ def _to_be_continued(iteration, current_duration, aborted, times=None,
+ duration=None):
+ if times is not None:
+ return iteration < times and not aborted.is_set()
+ elif duration is not None:
+ return current_duration < duration and not aborted.is_set()
+ else:
+ return False
+
+ if times is None and duration is None:
+ raise ValueError("times or duration must be specified")
+
+ pool = collections.deque()
+ alive_threads_in_pool = 0
+ finished_threads_in_pool = 0
+
+ runner._log_worker_info(times=times, duration=duration,
+ concurrency=concurrency, timeout=timeout, cls=cls,
+ method_name=method_name, args=args)
+
+ if timeout:
+ timeout_queue = Queue.Queue()
+ collector_thr_by_timeout = threading.Thread(
+ target=utils.timeout_thread,
+ args=(timeout_queue, )
+ )
+ collector_thr_by_timeout.start()
+
+ iteration = next(iteration_gen)
+ start_time = time.time()
+ # NOTE(msimonin): keep the previous behaviour
+ # > when duration is 0, scenario executes exactly 1 time
+ current_duration = -1
+ while _to_be_continued(iteration, current_duration, aborted,
+ times=times, duration=duration):
+
+ scenario_context = runner._get_scenario_context(iteration, context)
+ worker_args = (
+ queue, cls, method_name, scenario_context, args, event_queue)
+
+ thread = threading.Thread(target=runner._worker_thread,
+ args=worker_args)
+
+ thread.start()
+ if timeout:
+ timeout_queue.put((thread, time.time() + timeout))
+ pool.append(thread)
+ alive_threads_in_pool += 1
+
+ while alive_threads_in_pool == concurrency:
+ prev_finished_threads_in_pool = finished_threads_in_pool
+ finished_threads_in_pool = 0
+ for t in pool:
+ if not t.is_alive():
+ finished_threads_in_pool += 1
+
+ alive_threads_in_pool -= finished_threads_in_pool
+ alive_threads_in_pool += prev_finished_threads_in_pool
+
+ if alive_threads_in_pool < concurrency:
+ # NOTE(boris-42): cleanup pool array. This is required because
+ # in other case array length will be equal to times which
+ # is unlimited big
+ while pool and not pool[0].is_alive():
+ pool.popleft().join()
+ finished_threads_in_pool -= 1
+ break
+
+ # we should wait to not create big noise with these checks
+ time.sleep(0.001)
+ iteration = next(iteration_gen)
+ current_duration = time.time() - start_time
+
+ # Wait until all threads are done
+ while pool:
+ pool.popleft().join()
+
+ if timeout:
+ timeout_queue.put((None, None,))
+ collector_thr_by_timeout.join()
+
+
+@validation.configure("check_constant")
+class CheckConstantValidator(validation.Validator):
+ """Additional schema validation for constant runner"""
+
+ def validate(self, context, config, plugin_cls, plugin_cfg):
+ if plugin_cfg.get("concurrency", 1) > plugin_cfg.get("times", 1):
+ return self.fail(
+ "Parameter 'concurrency' means a number of parallel "
+ "executions of iterations. Parameter 'times' means total "
+ "number of iteration executions. It is redundant "
+ "(and restricted) to have number of parallel iterations "
+ "bigger then total number of iterations.")
+
+
+@validation.add("check_constant")
+@runner.configure(name="constant")
+class ConstantScenarioRunner(runner.ScenarioRunner):
+ """Creates constant load executing a scenario a specified number of times.
+
+ This runner will place a constant load on the cloud under test by
+ executing each scenario iteration without pausing between iterations
+ up to the number of times specified in the scenario config.
+
+ The concurrency parameter of the scenario config controls the
+ number of concurrent iterations which execute during a single
+ scenario in order to simulate the activities of multiple users
+ placing load on the cloud under test.
+ """
+
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "properties": {
+ "concurrency": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of parallel iteration executions."
+ },
+ "times": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "Total number of iteration executions."
+ },
+ "timeout": {
+ "type": "number",
+ "description": "Operation's timeout."
+ },
+ "max_cpu_count": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The maximum number of processes to create load"
+ " from."
+ }
+ },
+ "additionalProperties": False
+ }
+
+ def _run_scenario(self, cls, method_name, context, args):
+ """Runs the specified scenario with given arguments.
+
+ This method generates a constant load on the cloud under test by
+ executing each scenario iteration using a pool of processes without
+ pausing between iterations up to the number of times specified
+ in the scenario config.
+
+ :param cls: The Scenario class where the scenario is implemented
+ :param method_name: Name of the method that implements the scenario
+ :param context: context that contains users, admin & other
+ information, that was created before scenario
+ execution starts.
+ :param args: Arguments to call the scenario method with
+
+ :returns: List of results fore each single scenario iteration,
+ where each result is a dictionary
+ """
+ timeout = self.config.get("timeout", 0) # 0 means no timeout
+ times = self.config.get("times", 1)
+ concurrency = self.config.get("concurrency", 1)
+ iteration_gen = utils.RAMInt()
+
+ cpu_count = multiprocessing.cpu_count()
+ max_cpu_used = min(cpu_count,
+ self.config.get("max_cpu_count", cpu_count))
+
+ processes_to_start = min(max_cpu_used, times, concurrency)
+ concurrency_per_worker, concurrency_overhead = divmod(
+ concurrency, processes_to_start)
+
+ self._log_debug_info(times=times, concurrency=concurrency,
+ timeout=timeout, max_cpu_used=max_cpu_used,
+ processes_to_start=processes_to_start,
+ concurrency_per_worker=concurrency_per_worker,
+ concurrency_overhead=concurrency_overhead)
+
+ result_queue = multiprocessing.Queue()
+ event_queue = multiprocessing.Queue()
+
+ def worker_args_gen(concurrency_overhead):
+ while True:
+ yield (result_queue, iteration_gen, timeout,
+ concurrency_per_worker + (concurrency_overhead and 1),
+ times, None, context, cls, method_name, args,
+ event_queue, self.aborted)
+ if concurrency_overhead:
+ concurrency_overhead -= 1
+
+ process_pool = self._create_process_pool(
+ processes_to_start, _worker_process,
+ worker_args_gen(concurrency_overhead))
+ self._join_processes(process_pool, result_queue, event_queue)
+
+
+@runner.configure(name="constant_for_duration")
+class ConstantForDurationScenarioRunner(runner.ScenarioRunner):
+ """Creates constant load executing a scenario for an interval of time.
+
+ This runner will place a constant load on the cloud under test by
+ executing each scenario iteration without pausing between iterations
+ until a specified interval of time has elapsed.
+
+ The concurrency parameter of the scenario config controls the
+ number of concurrent iterations which execute during a single
+ sceanario in order to simulate the activities of multiple users
+ placing load on the cloud under test.
+ """
+
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "properties": {
+ "concurrency": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The number of parallel iteration executions."
+ },
+ "duration": {
+ "type": "number",
+ "minimum": 0.0,
+ "description": "The number of seconds during which to generate"
+ " a load. If the duration is 0, the scenario"
+ " will run once per parallel execution."
+ },
+ "timeout": {
+ "type": "number",
+ "minimum": 1,
+ "description": "Operation's timeout."
+ }
+ },
+ "required": ["duration"],
+ "additionalProperties": False
+ }
+
+ def _run_scenario(self, cls, method_name, context, args):
+ """Runs the specified scenario with given arguments.
+
+ This method generates a constant load on the cloud under test by
+ executing each scenario iteration using a pool of processes without
+ pausing between iterations up to the number of times specified
+ in the scenario config.
+
+ :param cls: The Scenario class where the scenario is implemented
+ :param method_name: Name of the method that implements the scenario
+ :param context: context that contains users, admin & other
+ information, that was created before scenario
+ execution starts.
+ :param args: Arguments to call the scenario method with
+
+ :returns: List of results fore each single scenario iteration,
+ where each result is a dictionary
+ """
+ timeout = self.config.get("timeout", 600)
+ duration = self.config.get("duration", 0)
+ concurrency = self.config.get("concurrency", 1)
+ iteration_gen = utils.RAMInt()
+
+ cpu_count = multiprocessing.cpu_count()
+ max_cpu_used = min(cpu_count,
+ self.config.get("max_cpu_count", cpu_count))
+
+ processes_to_start = min(max_cpu_used, concurrency)
+ concurrency_per_worker, concurrency_overhead = divmod(
+ concurrency, processes_to_start)
+
+ self._log_debug_info(duration=duration, concurrency=concurrency,
+ timeout=timeout, max_cpu_used=max_cpu_used,
+ processes_to_start=processes_to_start,
+ concurrency_per_worker=concurrency_per_worker,
+ concurrency_overhead=concurrency_overhead)
+
+ result_queue = multiprocessing.Queue()
+ event_queue = multiprocessing.Queue()
+
+ def worker_args_gen(concurrency_overhead):
+ while True:
+ yield (result_queue, iteration_gen, timeout,
+ concurrency_per_worker + (concurrency_overhead and 1),
+ None, duration, context, cls, method_name, args,
+ event_queue, self.aborted)
+ if concurrency_overhead:
+ concurrency_overhead -= 1
+
+ process_pool = self._create_process_pool(
+ processes_to_start, _worker_process,
+ worker_args_gen(concurrency_overhead))
+ self._join_processes(process_pool, result_queue, event_queue)
diff --git a/rally/plugins/task/runners/rps.py b/rally/plugins/task/runners/rps.py
new file mode 100644
index 0000000000..98a706d116
--- /dev/null
+++ b/rally/plugins/task/runners/rps.py
@@ -0,0 +1,296 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import multiprocessing
+import queue as Queue
+import threading
+import time
+
+from rally.common import logging
+from rally.common import utils
+from rally.common import validation
+from rally import consts
+from rally.task import runner
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, iteration_gen, timeout, times, max_concurrent,
+ context, cls, method_name, args, event_queue, aborted,
+ runs_per_second, rps_cfg, processes_to_start, info):
+ """Start scenario within threads.
+
+ Spawn N threads per second. Each thread runs the scenario once, and appends
+ result to queue. A maximum of max_concurrent threads will be ran
+ concurrently.
+
+ :param queue: queue object to append results
+ :param iteration_gen: next iteration number generator
+ :param timeout: operation's timeout
+ :param times: total number of scenario iterations to be run
+ :param max_concurrent: maximum worker concurrency
+ :param context: scenario context object
+ :param cls: scenario class
+ :param method_name: scenario method name
+ :param args: scenario args
+ :param aborted: multiprocessing.Event that aborts load generation if
+ the flag is set
+ :param runs_per_second: function that should return desired rps value
+ :param rps_cfg: rps section from task config
+ :param processes_to_start: int, number of started processes for scenario
+ execution
+ :param info: info about all processes count and counter of runned process
+ """
+
+ pool = collections.deque()
+ if isinstance(rps_cfg, dict):
+ rps = rps_cfg["start"]
+ else:
+ rps = rps_cfg
+ sleep = 1.0 / rps
+
+ runner._log_worker_info(times=times, rps=rps, timeout=timeout,
+ cls=cls, method_name=method_name, args=args)
+
+ time.sleep(
+ (sleep * info["processes_counter"]) / info["processes_to_start"])
+
+ start = time.time()
+ timeout_queue = Queue.Queue()
+
+ if timeout:
+ collector_thr_by_timeout = threading.Thread(
+ target=utils.timeout_thread,
+ args=(timeout_queue, )
+ )
+ collector_thr_by_timeout.start()
+
+ i = 0
+ while i < times and not aborted.is_set():
+ scenario_context = runner._get_scenario_context(next(iteration_gen),
+ context)
+ worker_args = (
+ queue, cls, method_name, scenario_context, args, event_queue)
+ thread = threading.Thread(target=runner._worker_thread,
+ args=worker_args)
+
+ i += 1
+ thread.start()
+ if timeout:
+ timeout_queue.put((thread, time.time() + timeout))
+ pool.append(thread)
+
+ time_gap = time.time() - start
+ real_rps = i / time_gap if time_gap else "Infinity"
+
+ LOG.debug(
+ "Worker: %s rps: %s (requested rps: %s)" %
+ (i, real_rps, runs_per_second(rps_cfg, start, processes_to_start)))
+
+ # try to join latest thread(s) until it finished, or until time to
+ # start new thread (if we have concurrent slots available)
+ while i / (time.time() - start) > runs_per_second(
+ rps_cfg, start, processes_to_start) or (
+ len(pool) >= max_concurrent):
+ if pool:
+ pool[0].join(0.001)
+ if not pool[0].is_alive():
+ pool.popleft()
+ else:
+ time.sleep(0.001)
+
+ while pool:
+ pool.popleft().join()
+
+ if timeout:
+ timeout_queue.put((None, None,))
+ collector_thr_by_timeout.join()
+
+
+@validation.configure("check_rps")
+class CheckPRSValidator(validation.Validator):
+ """Additional schema validation for rps runner"""
+
+ def validate(self, context, config, plugin_cls, plugin_cfg):
+ if isinstance(plugin_cfg["rps"], dict):
+ if plugin_cfg["rps"]["end"] < plugin_cfg["rps"]["start"]:
+ msg = "rps end value must not be less than rps start value."
+ return self.fail(msg)
+
+
+@validation.add("check_rps")
+@runner.configure(name="rps")
+class RPSScenarioRunner(runner.ScenarioRunner):
+ """Scenario runner that does the job with specified frequency.
+
+ Every single scenario iteration is executed with specified frequency
+ (runs per second) in a pool of processes. The scenario will be
+ launched for a fixed number of times in total (specified in the config).
+
+ An example of a rps scenario is booting 1 VM per second. This
+ execution type is thus very helpful in understanding the maximal load that
+ a certain cloud can handle.
+ """
+
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA7,
+ "properties": {
+ "times": {
+ "type": "integer",
+ "minimum": 1
+ },
+ "rps": {
+ "anyOf": [
+ {
+ "description": "Generate constant requests per second "
+ "during the whole workload.",
+ "type": "number",
+ "exclusiveMinimum": 0,
+ "minimum": 0
+ },
+ {
+ "type": "object",
+ "description": "Increase requests per second for "
+ "specified value each time after a "
+ "certain number of seconds.",
+ "properties": {
+ "start": {
+ "type": "number",
+ "minimum": 1
+ },
+ "end": {
+ "type": "number",
+ "minimum": 1
+ },
+ "step": {
+ "type": "number",
+ "minimum": 1
+ },
+ "duration": {
+ "type": "number",
+ "minimum": 1
+ }
+ },
+ "additionalProperties": False,
+ "required": ["start", "end", "step"]
+ }
+ ],
+ },
+ "timeout": {
+ "type": "number",
+ },
+ "max_concurrency": {
+ "type": "integer",
+ "minimum": 1
+ },
+ "max_cpu_count": {
+ "type": "integer",
+ "minimum": 1
+ }
+ },
+ "required": ["times", "rps"],
+ "additionalProperties": False
+ }
+
+ def _run_scenario(self, cls, method_name, context, args):
+ """Runs the specified scenario with given arguments.
+
+ Every single scenario iteration is executed with specified
+ frequency (runs per second) in a pool of processes. The scenario is
+ launched for a fixed number of times in total (specified in the
+ config).
+
+ :param cls: The Scenario class where the scenario is implemented
+ :param method_name: Name of the method that implements the scenario
+ :param context: Context that contains users, admin & other
+ information, that was created before scenario
+ execution starts.
+ :param args: Arguments to call the scenario method with
+
+ :returns: List of results fore each single scenario iteration,
+ where each result is a dictionary
+ """
+ times = self.config["times"]
+ timeout = self.config.get("timeout", 0) # 0 means no timeout
+ iteration_gen = utils.RAMInt()
+
+ cpu_count = multiprocessing.cpu_count()
+ max_cpu_used = min(cpu_count,
+ self.config.get("max_cpu_count", cpu_count))
+
+ def runs_per_second(rps_cfg, start_timer, number_of_processes):
+ """At the given second return desired rps."""
+
+ if not isinstance(rps_cfg, dict):
+ return float(rps_cfg) / number_of_processes
+ stage_order = (time.time() - start_timer) / rps_cfg.get(
+ "duration", 1) - 1
+ rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order)
+ / number_of_processes)
+
+ return min(rps, float(rps_cfg["end"]))
+
+ processes_to_start = min(max_cpu_used, times,
+ self.config.get("max_concurrency", times))
+ times_per_worker, times_overhead = divmod(times, processes_to_start)
+
+ # Determine concurrency per worker
+ concurrency_per_worker, concurrency_overhead = divmod(
+ self.config.get("max_concurrency", times), processes_to_start)
+
+ self._log_debug_info(times=times, timeout=timeout,
+ max_cpu_used=max_cpu_used,
+ processes_to_start=processes_to_start,
+ times_per_worker=times_per_worker,
+ times_overhead=times_overhead,
+ concurrency_per_worker=concurrency_per_worker,
+ concurrency_overhead=concurrency_overhead)
+
+ result_queue = multiprocessing.Queue()
+ event_queue = multiprocessing.Queue()
+
+ def worker_args_gen(times_overhead, concurrency_overhead):
+ """Generate arguments for process worker.
+
+ Remainder of threads per process division is distributed to
+ process workers equally - one thread per each process worker
+ until the remainder equals zero. The same logic is applied
+ to concurrency overhead.
+ :param times_overhead: remaining number of threads to be
+ distributed to workers
+ :param concurrency_overhead: remaining number of maximum
+ concurrent threads to be
+ distributed to workers
+ """
+ while True:
+ yield (
+ result_queue, iteration_gen, timeout,
+ times_per_worker + (times_overhead and 1),
+ concurrency_per_worker + (concurrency_overhead and 1),
+ context, cls, method_name, args, event_queue,
+ self.aborted, runs_per_second, self.config["rps"],
+ processes_to_start
+ )
+ if times_overhead:
+ times_overhead -= 1
+ if concurrency_overhead:
+ concurrency_overhead -= 1
+
+ process_pool = self._create_process_pool(
+ processes_to_start, _worker_process,
+ worker_args_gen(times_overhead, concurrency_overhead))
+ self._join_processes(process_pool, result_queue, event_queue)
diff --git a/rally/plugins/task/runners/serial.py b/rally/plugins/task/runners/serial.py
new file mode 100644
index 0000000000..e981440d47
--- /dev/null
+++ b/rally/plugins/task/runners/serial.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from rally.common import utils as rutils
+from rally import consts
+from rally.task import runner
+
+
+@runner.configure(name="serial")
+class SerialScenarioRunner(runner.ScenarioRunner):
+ """Scenario runner that executes scenarios serially.
+
+ Unlike scenario runners that execute in parallel, the serial scenario
+ runner executes scenarios one-by-one in the same python interpreter process
+ as Rally. This allows you to execute scenario without introducing
+ any concurrent operations as well as interactively debug the scenario
+ from the same command that you use to start Rally.
+ """
+
+ # NOTE(mmorais): additionalProperties is set True to allow switching
+ # between parallel and serial runners by modifying only *type* property
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "properties": {
+ "times": {
+ "type": "integer",
+ "minimum": 1
+ }
+ },
+ "additionalProperties": True
+ }
+
+ def _run_scenario(self, cls, method_name, context, args):
+ """Runs the specified scenario with given arguments.
+
+ The scenario iterations are executed one-by-one in the same python
+ interpreter process as Rally. This allows you to execute
+ scenario without introducing any concurrent operations as well as
+ interactively debug the scenario from the same command that you use
+ to start Rally.
+
+ :param cls: The Scenario class where the scenario is implemented
+ :param method_name: Name of the method that implements the scenario
+ :param context: context that contains users, admin & other
+ information, that was created before scenario
+ execution starts.
+ :param args: Arguments to call the scenario method with
+
+ :returns: List of results fore each single scenario iteration,
+ where each result is a dictionary
+ """
+ times = self.config.get("times", 1)
+
+ event_queue = rutils.DequeAsQueue(self.event_queue)
+
+ for i in range(times):
+ if self.aborted.is_set():
+ break
+ result = runner._run_scenario_once(
+ cls, method_name, runner._get_scenario_context(i, context),
+ args, event_queue)
+ self._send_result(result)
+
+ self._flush_results()
diff --git a/tests/unit/plugins/common/hook/triggers/__init__.py b/rally/plugins/task/scenarios/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/hook/triggers/__init__.py
rename to rally/plugins/task/scenarios/__init__.py
diff --git a/tests/unit/plugins/common/runners/__init__.py b/rally/plugins/task/scenarios/dummy/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/runners/__init__.py
rename to rally/plugins/task/scenarios/dummy/__init__.py
diff --git a/rally/plugins/common/scenarios/dummy/dummy.py b/rally/plugins/task/scenarios/dummy/dummy.py
similarity index 100%
rename from rally/plugins/common/scenarios/dummy/dummy.py
rename to rally/plugins/task/scenarios/dummy/dummy.py
diff --git a/tests/unit/plugins/common/scenarios/__init__.py b/rally/plugins/task/scenarios/requests/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/scenarios/__init__.py
rename to rally/plugins/task/scenarios/requests/__init__.py
diff --git a/rally/plugins/task/scenarios/requests/http_requests.py b/rally/plugins/task/scenarios/requests/http_requests.py
new file mode 100644
index 0000000000..e85ee5af26
--- /dev/null
+++ b/rally/plugins/task/scenarios/requests/http_requests.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+
+from rally.plugins.task.scenarios.requests import utils
+from rally.task import scenario
+
+
+"""Scenarios for HTTP requests."""
+
+
+@scenario.configure(name="HttpRequests.check_request")
+class HttpRequestsCheckRequest(utils.RequestScenario):
+
+ def run(self, url, method, status_code, **kwargs):
+ """Standard way for testing web services using HTTP requests.
+
+ This scenario is used to make request and check it with expected
+ Response.
+
+ :param url: url for the Request object
+ :param method: method for the Request object
+ :param status_code: expected response code
+ :param kwargs: optional additional request parameters
+ """
+
+ self._check_request(url, method, status_code, **kwargs)
+
+
+@scenario.configure(name="HttpRequests.check_random_request")
+class HttpRequestsCheckRandomRequest(utils.RequestScenario):
+
+ def run(self, requests, status_code):
+ """Executes random HTTP requests from provided list.
+
+ This scenario takes random url from list of requests, and raises
+ exception if the response is not the expected response.
+
+ :param requests: List of request dicts
+ :param status_code: Expected Response Code it will
+ be used only if we doesn't specified it in request proper
+ """
+
+ request = random.choice(requests)
+ request.setdefault("status_code", status_code)
+ self._check_request(**request)
diff --git a/rally/plugins/task/scenarios/requests/utils.py b/rally/plugins/task/scenarios/requests/utils.py
new file mode 100644
index 0000000000..8fd35347a2
--- /dev/null
+++ b/rally/plugins/task/scenarios/requests/utils.py
@@ -0,0 +1,38 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import requests
+
+from rally.task import atomic
+from rally.task import scenario
+
+
+class RequestScenario(scenario.Scenario):
+ """Base class for Request scenarios with basic atomic actions."""
+
+ @atomic.action_timer("requests.check_request")
+ def _check_request(self, url, method, status_code, **kwargs):
+ """Compare request status code with specified code
+
+ :param status_code: Expected status code of request
+ :param url: Uniform resource locator
+ :param method: Type of request method (GET | POST ..)
+ :param kwargs: Optional additional request parameters
+ :raises ValueError: if return http status code
+ not equal to expected status code
+ """
+
+ resp = requests.request(method, url, **kwargs)
+ if status_code != resp.status_code:
+ error_msg = "Expected HTTP request code is `%s` actual `%s`"
+ raise ValueError(
+ error_msg % (status_code, resp.status_code))
diff --git a/tests/unit/plugins/common/scenarios/dummy/__init__.py b/rally/plugins/task/sla/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/scenarios/dummy/__init__.py
rename to rally/plugins/task/sla/__init__.py
diff --git a/rally/plugins/task/sla/failure_rate.py b/rally/plugins/task/sla/failure_rate.py
new file mode 100644
index 0000000000..8830ad6810
--- /dev/null
+++ b/rally/plugins/task/sla/failure_rate.py
@@ -0,0 +1,67 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+from rally import consts
+from rally.task import sla
+
+
+@sla.configure(name="failure_rate")
+class FailureRate(sla.SLA):
+ """Failure rate minimum and maximum in percents."""
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA,
+ "properties": {
+ "min": {"type": "number", "minimum": 0.0, "maximum": 100.0},
+ "max": {"type": "number", "minimum": 0.0, "maximum": 100.0}
+ },
+ "minProperties": 1,
+ "additionalProperties": False,
+ }
+
+ def __init__(self, criterion_value):
+ super(FailureRate, self).__init__(criterion_value)
+ self.min_percent = self.criterion_value.get("min", 0)
+ self.max_percent = self.criterion_value.get("max", 100)
+ self.errors = 0
+ self.total = 0
+ self.error_rate = 0.0
+
+ def add_iteration(self, iteration):
+ self.total += 1
+ if iteration["error"]:
+ self.errors += 1
+ self.error_rate = self.errors * 100.0 / self.total
+ self.success = self.min_percent <= self.error_rate <= self.max_percent
+ return self.success
+
+ def merge(self, other):
+ self.total += other.total
+ self.errors += other.errors
+ if self.total:
+ self.error_rate = self.errors * 100.0 / self.total
+ self.success = self.min_percent <= self.error_rate <= self.max_percent
+ return self.success
+
+ def details(self):
+ return ("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s" %
+ (self.min_percent, self.error_rate,
+ self.max_percent, self.status()))
diff --git a/rally/plugins/task/sla/iteration_time.py b/rally/plugins/task/sla/iteration_time.py
new file mode 100644
index 0000000000..6765f40b17
--- /dev/null
+++ b/rally/plugins/task/sla/iteration_time.py
@@ -0,0 +1,53 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+from rally import consts
+from rally.task import sla
+
+
+@sla.configure(name="max_seconds_per_iteration")
+class IterationTime(sla.SLA):
+ """Maximum time for one iteration in seconds."""
+ CONFIG_SCHEMA = {
+ "type": "number",
+ "$schema": consts.JSON_SCHEMA7,
+ "minimum": 0.0,
+ "exclusiveMinimum": 0.0}
+
+ def __init__(self, criterion_value):
+ super(IterationTime, self).__init__(criterion_value)
+ self.max_iteration_time = 0.0
+
+ def add_iteration(self, iteration):
+ if iteration["duration"] > self.max_iteration_time:
+ self.max_iteration_time = iteration["duration"]
+ self.success = self.max_iteration_time <= self.criterion_value
+ return self.success
+
+ def merge(self, other):
+ if other.max_iteration_time > self.max_iteration_time:
+ self.max_iteration_time = other.max_iteration_time
+ self.success = self.max_iteration_time <= self.criterion_value
+ return self.success
+
+ def details(self):
+ return ("Maximum seconds per iteration %.2fs <= %.2fs - %s" %
+ (self.max_iteration_time, self.criterion_value, self.status()))
diff --git a/rally/plugins/task/sla/max_average_duration.py b/rally/plugins/task/sla/max_average_duration.py
new file mode 100644
index 0000000000..b3828e8b88
--- /dev/null
+++ b/rally/plugins/task/sla/max_average_duration.py
@@ -0,0 +1,56 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+from rally.common import streaming_algorithms
+from rally import consts
+from rally.task import sla
+
+
+@sla.configure(name="max_avg_duration")
+class MaxAverageDuration(sla.SLA):
+ """Maximum average duration of one iteration in seconds."""
+ CONFIG_SCHEMA = {
+ "type": "number",
+ "$schema": consts.JSON_SCHEMA7,
+ "exclusiveMinimum": 0.0
+ }
+
+ def __init__(self, criterion_value):
+ super(MaxAverageDuration, self).__init__(criterion_value)
+ self.avg = 0.0
+ self.avg_comp = streaming_algorithms.MeanComputation()
+
+ def add_iteration(self, iteration):
+ if not iteration.get("error"):
+ self.avg_comp.add(iteration["duration"])
+ self.avg = self.avg_comp.result()
+ self.success = self.avg <= self.criterion_value
+ return self.success
+
+ def merge(self, other):
+ self.avg_comp.merge(other.avg_comp)
+ self.avg = self.avg_comp.result() or 0.0
+ self.success = self.avg <= self.criterion_value
+ return self.success
+
+ def details(self):
+ return ("Average duration of one iteration %.2fs <= %.2fs - %s" %
+ (self.avg, self.criterion_value, self.status()))
diff --git a/rally/plugins/task/sla/max_average_duration_per_atomic.py b/rally/plugins/task/sla/max_average_duration_per_atomic.py
new file mode 100644
index 0000000000..1a6a668ed7
--- /dev/null
+++ b/rally/plugins/task/sla/max_average_duration_per_atomic.py
@@ -0,0 +1,73 @@
+# Copyright 2016: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+import collections
+
+from rally.common import streaming_algorithms
+from rally import consts
+from rally.task import sla
+
+
+@sla.configure(name="max_avg_duration_per_atomic")
+class MaxAverageDurationPerAtomic(sla.SLA):
+ """Maximum average duration of one iterations atomic actions in seconds."""
+ CONFIG_SCHEMA = {"type": "object", "$schema": consts.JSON_SCHEMA,
+ "patternProperties": {".*": {
+ "type": "number",
+ "description": "The name of atomic action."}},
+ "minProperties": 1,
+ "additionalProperties": False}
+
+ def __init__(self, criterion_value):
+ super(MaxAverageDurationPerAtomic, self).__init__(criterion_value)
+ self.avg_by_action = collections.defaultdict(float)
+ self.avg_comp_by_action = collections.defaultdict(
+ streaming_algorithms.MeanComputation)
+ self.criterion_items = self.criterion_value.items()
+
+ def add_iteration(self, iteration):
+ if not iteration.get("error"):
+ for action in iteration["atomic_actions"]:
+ duration = action["finished_at"] - action["started_at"]
+ self.avg_comp_by_action[action["name"]].add(duration)
+ result = self.avg_comp_by_action[action["name"]].result()
+ self.avg_by_action[action["name"]] = result
+ self.success = all(self.avg_by_action[atom] <= val
+ for atom, val in self.criterion_items)
+ return self.success
+
+ def merge(self, other):
+ for atom, comp in self.avg_comp_by_action.items():
+ if atom in other.avg_comp_by_action:
+ comp.merge(other.avg_comp_by_action[atom])
+ self.avg_by_action = {a: comp.result() or 0.0
+ for a, comp in self.avg_comp_by_action.items()}
+ self.success = all(self.avg_by_action[atom] <= val
+ for atom, val in self.criterion_items)
+ return self.success
+
+ def details(self):
+ strs = ["Action: '%s'. %.2fs <= %.2fs" %
+ (atom, self.avg_by_action[atom], val)
+ for atom, val in self.criterion_items]
+ head = "Average duration of one iteration for atomic actions:"
+ end = "Status: %s" % self.status()
+ return "\n".join([head] + strs + [end])
diff --git a/rally/plugins/task/sla/outliers.py b/rally/plugins/task/sla/outliers.py
new file mode 100644
index 0000000000..5104585c22
--- /dev/null
+++ b/rally/plugins/task/sla/outliers.py
@@ -0,0 +1,111 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+from rally.common import streaming_algorithms
+from rally import consts
+from rally.task import sla
+
+
+@sla.configure(name="outliers")
+class Outliers(sla.SLA):
+ """Limit the number of outliers (iterations that take too much time).
+
+ The outliers are detected automatically using the computation of the mean
+ and standard deviation (std) of the data.
+ """
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA7,
+ "properties": {
+ "max": {"type": "integer", "minimum": 0},
+ "min_iterations": {"type": "integer", "minimum": 3},
+ "sigmas": {"type": "number", "minimum": 0.0,
+ "exclusiveMinimum": 0.0}
+ },
+ "additionalProperties": False,
+ }
+
+ def __init__(self, criterion_value):
+ super(Outliers, self).__init__(criterion_value)
+ self.max_outliers = self.criterion_value.get("max", 0)
+ # NOTE(msdubov): Having 3 as default is reasonable (need enough data).
+ self.min_iterations = self.criterion_value.get("min_iterations", 3)
+ self.sigmas = self.criterion_value.get("sigmas", 3.0)
+ self.iterations = 0
+ self.outliers = 0
+ self.threshold = None
+ self.mean_comp = streaming_algorithms.MeanComputation()
+ self.std_comp = streaming_algorithms.StdDevComputation()
+
+ def add_iteration(self, iteration):
+ # NOTE(ikhudoshyn): This method can not be implemented properly.
+ # After adding a new iteration, both mean and standard deviation
+ # may change. Hence threshold will change as well. In this case we
+ # should again compare durations of all accounted iterations
+ # to the threshold. Unfortunately we can not do it since
+ # we do not store durations.
+ # Implementation provided here only gives rough approximation
+ # of outliers number.
+ if not iteration.get("error"):
+ duration = iteration["duration"]
+ self.iterations += 1
+
+ # NOTE(msdubov): First check if the current iteration is an outlier
+ if (self.iterations >= self.min_iterations
+ and self.threshold and duration > self.threshold):
+ self.outliers += 1
+
+ # NOTE(msdubov): Then update the threshold value
+ self.mean_comp.add(duration)
+ self.std_comp.add(duration)
+ if self.iterations >= 2:
+ mean = self.mean_comp.result()
+ std = self.std_comp.result()
+ self.threshold = mean + self.sigmas * std
+
+ self.success = self.outliers <= self.max_outliers
+ return self.success
+
+ def merge(self, other):
+ # NOTE(ikhudoshyn): This method can not be implemented properly.
+ # After merge, both mean and standard deviation may change.
+ # Hence threshold will change as well. In this case we
+ # should again compare durations of all accounted iterations
+ # to the threshold. Unfortunately we can not do it since
+ # we do not store durations.
+ # Implementation provided here only gives rough approximation
+ # of outliers number.
+ self.iterations += other.iterations
+ self.outliers += other.outliers
+ self.mean_comp.merge(other.mean_comp)
+ self.std_comp.merge(other.std_comp)
+
+ if self.iterations >= 2:
+ mean = self.mean_comp.result()
+ std = self.std_comp.result()
+ self.threshold = mean + self.sigmas * std
+
+ self.success = self.outliers <= self.max_outliers
+ return self.success
+
+ def details(self):
+ return ("Maximum number of outliers %i <= %i - %s" %
+ (self.outliers, self.max_outliers, self.status()))
diff --git a/rally/plugins/task/sla/performance_degradation.py b/rally/plugins/task/sla/performance_degradation.py
new file mode 100644
index 0000000000..d9d142393d
--- /dev/null
+++ b/rally/plugins/task/sla/performance_degradation.py
@@ -0,0 +1,72 @@
+# Copyright 2016: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+SLA (Service-level agreement) is set of details for determining compliance
+with contracted values such as maximum error rate or minimum response time.
+"""
+
+from __future__ import division
+
+from rally.common import streaming_algorithms
+from rally import consts
+from rally.task import sla
+from rally.utils import strutils
+
+
+@sla.configure(name="performance_degradation")
+class PerformanceDegradation(sla.SLA):
+ """Calculates performance degradation based on iteration time
+
+ This SLA plugin finds minimum and maximum duration of
+ iterations completed without errors during Rally task execution.
+ Assuming that minimum duration is 100%, it calculates
+ performance degradation against maximum duration.
+ """
+ CONFIG_SCHEMA = {
+ "type": "object",
+ "$schema": consts.JSON_SCHEMA7,
+ "properties": {
+ "max_degradation": {
+ "type": "number",
+ "minimum": 0.0,
+ },
+ },
+ "required": [
+ "max_degradation",
+ ],
+ "additionalProperties": False,
+ }
+
+ def __init__(self, criterion_value):
+ super(PerformanceDegradation, self).__init__(criterion_value)
+ self.max_degradation = self.criterion_value["max_degradation"]
+ self.degradation = streaming_algorithms.DegradationComputation()
+
+ def add_iteration(self, iteration):
+ if not iteration.get("error"):
+ self.degradation.add(iteration["duration"])
+ self.success = self.degradation.result() <= self.max_degradation
+ return self.success
+
+ def merge(self, other):
+ self.degradation.merge(other.degradation)
+ self.success = self.degradation.result() <= self.max_degradation
+ return self.success
+
+ def details(self):
+ res = strutils.format_float_to_str(self.degradation.result() or 0.0)
+ return "Current degradation: %s%% - %s" % (res, self.status())
diff --git a/rally/plugins/task/types.py b/rally/plugins/task/types.py
new file mode 100644
index 0000000000..8384f54731
--- /dev/null
+++ b/rally/plugins/task/types.py
@@ -0,0 +1,71 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import requests
+
+from rally.common.plugin import plugin
+from rally import exceptions
+from rally.task import types
+
+
+@plugin.configure(name="path_or_url")
+class PathOrUrl(types.ResourceType):
+ """Check whether file exists or url available."""
+
+ def pre_process(self, resource_spec, config):
+ path = os.path.expanduser(resource_spec)
+ if os.path.isfile(path):
+ return path
+ try:
+ head = requests.head(path, verify=False, allow_redirects=True)
+ if head.status_code == 200:
+ return path
+ raise exceptions.InvalidScenarioArgument(
+ "Url %s unavailable (code %s)" % (path, head.status_code))
+ except Exception as ex:
+ raise exceptions.InvalidScenarioArgument(
+ "Url error %s (%s)" % (path, ex))
+
+
+@plugin.configure(name="file")
+class FileType(types.ResourceType):
+ """Return content of the file by its path."""
+
+ def pre_process(self, resource_spec, config):
+ with open(os.path.expanduser(resource_spec), "r") as f:
+ return f.read()
+
+
+@plugin.configure(name="expand_user_path")
+class ExpandUserPath(types.ResourceType):
+ """Expands user path."""
+
+ def pre_process(self, resource_spec, config):
+ return os.path.expanduser(resource_spec)
+
+
+@plugin.configure(name="file_dict")
+class FileTypeDict(types.ResourceType):
+ """Return the dictionary of items with file path and file content."""
+
+ def pre_process(self, resource_spec, config):
+ file_type_dict = {}
+ for file_path in resource_spec:
+ file_path = os.path.expanduser(file_path)
+ with open(file_path, "r") as f:
+ file_type_dict[file_path] = f.read()
+
+ return file_type_dict
diff --git a/rally/plugins/verification/reporters.py b/rally/plugins/verification/reporters.py
new file mode 100644
index 0000000000..b52f8b5812
--- /dev/null
+++ b/rally/plugins/verification/reporters.py
@@ -0,0 +1,463 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import json
+import re
+
+from rally.common.io import junit
+from rally import consts
+from rally.ui import utils as ui_utils
+from rally.verification import reporter
+
+
+SKIP_RE = re.compile(r"Skipped until Bug: ?(?P\d+) is resolved.")
+LP_BUG_LINK = "https://launchpad.net/bugs/%s"
+TIME_FORMAT = consts.TimeFormat.ISO8601
+
+
+@reporter.configure("json")
+class JSONReporter(reporter.VerificationReporter):
+ """Generates verification report in JSON format.
+
+ An example of the report (All dates, numbers, names appearing in this
+ example are fictitious. Any resemblance to real things is purely
+ coincidental):
+
+ .. code-block:: json
+
+ {"verifications": {
+ "verification-uuid-1": {
+ "status": "finished",
+ "skipped": 1,
+ "started_at": "2001-01-01T00:00:00",
+ "finished_at": "2001-01-01T00:05:00",
+ "tests_duration": 5,
+ "run_args": {
+ "pattern": "set=smoke",
+ "xfail_list": {"some.test.TestCase.test_xfail":
+ "Some reason why it is expected."},
+ "skip_list": {"some.test.TestCase.test_skipped":
+ "This test was skipped intentionally"},
+ },
+ "success": 1,
+ "expected_failures": 1,
+ "tests_count": 3,
+ "failures": 0,
+ "unexpected_success": 0
+ },
+ "verification-uuid-2": {
+ "status": "finished",
+ "skipped": 1,
+ "started_at": "2002-01-01T00:00:00",
+ "finished_at": "2002-01-01T00:05:00",
+ "tests_duration": 5,
+ "run_args": {
+ "pattern": "set=smoke",
+ "xfail_list": {"some.test.TestCase.test_xfail":
+ "Some reason why it is expected."},
+ "skip_list": {"some.test.TestCase.test_skipped":
+ "This test was skipped intentionally"},
+ },
+ "success": 1,
+ "expected_failures": 1,
+ "tests_count": 3,
+ "failures": 1,
+ "unexpected_success": 0
+ }
+ },
+ "tests": {
+ "some.test.TestCase.test_foo[tag1,tag2]": {
+ "name": "some.test.TestCase.test_foo",
+ "tags": ["tag1","tag2"],
+ "by_verification": {
+ "verification-uuid-1": {
+ "status": "success",
+ "duration": "1.111"
+ },
+ "verification-uuid-2": {
+ "status": "success",
+ "duration": "22.222"
+ }
+ }
+ },
+ "some.test.TestCase.test_skipped[tag1]": {
+ "name": "some.test.TestCase.test_skipped",
+ "tags": ["tag1"],
+ "by_verification": {
+ "verification-uuid-1": {
+ "status": "skipped",
+ "duration": "0",
+ "details": "Skipped until Bug: 666 is resolved."
+ },
+ "verification-uuid-2": {
+ "status": "skipped",
+ "duration": "0",
+ "details": "Skipped until Bug: 666 is resolved."
+ }
+ }
+ },
+ "some.test.TestCase.test_xfail": {
+ "name": "some.test.TestCase.test_xfail",
+ "tags": [],
+ "by_verification": {
+ "verification-uuid-1": {
+ "status": "xfail",
+ "duration": "3",
+ "details": "Some reason why it is expected.\\n\\n"
+ "Traceback (most recent call last): \\n"
+ " File "fake.py", line 13, in \\n"
+ " yyy()\\n"
+ " File "fake.py", line 11, in yyy\\n"
+ " xxx()\\n"
+ " File "fake.py", line 8, in xxx\\n"
+ " bar()\\n"
+ " File "fake.py", line 5, in bar\\n"
+ " foo()\\n"
+ " File "fake.py", line 2, in foo\\n"
+ " raise Exception()\\n"
+ "Exception"
+ },
+ "verification-uuid-2": {
+ "status": "xfail",
+ "duration": "3",
+ "details": "Some reason why it is expected.\\n\\n"
+ "Traceback (most recent call last): \\n"
+ " File "fake.py", line 13, in \\n"
+ " yyy()\\n"
+ " File "fake.py", line 11, in yyy\\n"
+ " xxx()\\n"
+ " File "fake.py", line 8, in xxx\\n"
+ " bar()\\n"
+ " File "fake.py", line 5, in bar\\n"
+ " foo()\\n"
+ " File "fake.py", line 2, in foo\\n"
+ " raise Exception()\\n"
+ "Exception"
+ }
+ }
+ },
+ "some.test.TestCase.test_failed": {
+ "name": "some.test.TestCase.test_failed",
+ "tags": [],
+ "by_verification": {
+ "verification-uuid-2": {
+ "status": "fail",
+ "duration": "4",
+ "details": "Some reason why it is expected.\\n\\n"
+ "Traceback (most recent call last): \\n"
+ " File "fake.py", line 13, in \\n"
+ " yyy()\\n"
+ " File "fake.py", line 11, in yyy\\n"
+ " xxx()\\n"
+ " File "fake.py", line 8, in xxx\\n"
+ " bar()\\n"
+ " File "fake.py", line 5, in bar\\n"
+ " foo()\\n"
+ " File "fake.py", line 2, in foo\\n"
+ " raise Exception()\\n"
+ "Exception"
+ }
+ }
+ }
+ }
+ }
+
+ """
+
+ @classmethod
+ def validate(cls, output_destination):
+ """Validate destination of report.
+
+ :param output_destination: Destination of report
+ """
+ # nothing to check :)
+ pass
+
+ def _generate(self):
+ """Prepare raw report."""
+
+ verifications = collections.OrderedDict()
+ tests = {}
+
+ for v in self.verifications:
+ verifications[v.uuid] = {
+ "started_at": v.created_at.strftime(TIME_FORMAT),
+ "finished_at": v.updated_at.strftime(TIME_FORMAT),
+ "status": v.status,
+ "run_args": v.run_args,
+ "tests_count": v.tests_count,
+ "tests_duration": v.tests_duration,
+ "skipped": v.skipped,
+ "success": v.success,
+ "expected_failures": v.expected_failures,
+ "unexpected_success": v.unexpected_success,
+ "failures": v.failures,
+ }
+
+ for test_id, result in v.tests.items():
+ if test_id not in tests:
+ # NOTE(ylobankov): It is more convenient to see test ID
+ # at the first place in the report.
+ tags = sorted(result.get("tags", []), reverse=True,
+ key=lambda tag: tag.startswith("id-"))
+ tests[test_id] = {"tags": tags,
+ "name": result["name"],
+ "by_verification": {}}
+
+ tests[test_id]["by_verification"][v.uuid] = {
+ "status": result["status"],
+ "duration": result["duration"]
+ }
+
+ reason = result.get("reason", "")
+ if reason:
+ match = SKIP_RE.match(reason)
+ if match:
+ link = LP_BUG_LINK % match.group("bug_number")
+ reason = re.sub(match.group("bug_number"), link,
+ reason)
+ traceback = result.get("traceback", "")
+ sep = "\n\n" if reason and traceback else ""
+ d = (reason + sep + traceback.strip()) or None
+ if d:
+ tests[test_id]["by_verification"][v.uuid]["details"] = d
+
+ return {"verifications": verifications, "tests": tests}
+
+ def generate(self):
+ raw_report = json.dumps(self._generate(), indent=4)
+
+ if self.output_destination:
+ return {"files": {self.output_destination: raw_report},
+ "open": self.output_destination}
+ else:
+ return {"print": raw_report}
+
+
+@reporter.configure("html")
+class HTMLReporter(JSONReporter):
+ """Generates verification report in HTML format."""
+ INCLUDE_LIBS = False
+
+ # "T" separator of ISO 8601 is not user-friendly enough.
+ TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+ def generate(self):
+ report = self._generate()
+ uuids = report["verifications"].keys()
+ show_comparison_note = False
+
+ for test in report["tests"].values():
+ # make as much as possible processing here to reduce processing
+ # at JS side
+ test["has_details"] = False
+ for test_info in test["by_verification"].values():
+ if "details" not in test_info:
+ test_info["details"] = None
+ elif not test["has_details"]:
+ test["has_details"] = True
+
+ durations = []
+ # iter by uuids to store right order for comparison
+ for uuid in uuids:
+ if uuid in test["by_verification"]:
+ durations.append(test["by_verification"][uuid]["duration"])
+ if float(durations[-1]) < 0.001:
+ durations[-1] = "0"
+ # not to display such little duration in the report
+ test["by_verification"][uuid]["duration"] = ""
+
+ if len(durations) > 1 and not (
+ durations[0] == "0" and durations[-1] == "0"):
+ # compare result with result of the first verification
+ diff = float(durations[-1]) - float(durations[0])
+ result = "%s (" % durations[-1]
+ if diff >= 0:
+ result += "+"
+ result += "%s)" % diff
+ test["by_verification"][uuid]["duration"] = result
+
+ if not show_comparison_note and len(durations) > 2:
+ # NOTE(andreykurilin): only in case of comparison of more
+ # than 2 results of the same test we should display a note
+ # about the comparison strategy
+ show_comparison_note = True
+
+ template = ui_utils.get_template("verification/report.html")
+ context = {"uuids": list(uuids),
+ "verifications": report["verifications"],
+ "tests": report["tests"],
+ "show_comparison_note": show_comparison_note}
+
+ raw_report = template.render(data=json.dumps(context),
+ include_libs=self.INCLUDE_LIBS)
+
+ # in future we will support html_static and will need to save more
+ # files
+ if self.output_destination:
+ return {"files": {self.output_destination: raw_report},
+ "open": self.output_destination}
+ else:
+ return {"print": raw_report}
+
+
+@reporter.configure("html-static")
+class HTMLStaticReporter(HTMLReporter):
+ """Generates verification report in HTML format with embedded JS/CSS."""
+ INCLUDE_LIBS = True
+
+
+@reporter.configure("junit-xml")
+class JUnitXMLReporter(reporter.VerificationReporter):
+ """Generates verification report in JUnit-XML format.
+
+ An example of the report (All dates, numbers, names appearing in this
+ example are fictitious. Any resemblance to real things is purely
+ coincidental):
+
+ .. code-block:: xml
+
+
+
+
+
+
+ Skipped until Bug: 666 is resolved.
+
+
+
+
+
+
+
+ It is an unexpected success. The test should fail due to:
+ It should fail, I said!
+
+
+
+
+
+
+ HEEEEEEELP
+
+
+ Skipped until Bug: 666 is resolved.
+
+
+
+
+
+
+
+
+ """
+
+ @classmethod
+ def validate(cls, output_destination):
+ pass
+
+ def generate(self):
+ report = junit.JUnitXML()
+
+ for v in self.verifications:
+ test_suite = report.add_test_suite(
+ id=v.uuid,
+ time=str(v.tests_duration),
+ timestamp=v.created_at.strftime(TIME_FORMAT)
+ )
+ test_suite.setup_final_stats(
+ tests=str(v.tests_count),
+ skipped=str(v.skipped),
+ failures=str(v.failures + v.unexpected_success)
+ )
+
+ tests = sorted(v.tests.values(),
+ key=lambda t: (t.get("timestamp", ""), t["name"]))
+ for result in tests:
+ class_name, name = result["name"].rsplit(".", 1)
+
+ test_id = [tag[3:] for tag in result.get("tags", [])
+ if tag.startswith("id-")]
+
+ test_case = test_suite.add_test_case(
+ id=(test_id[0] if test_id else None),
+ time=result["duration"], name=name, classname=class_name,
+ timestamp=result.get("timestamp"))
+
+ if result["status"] == "success":
+ # nothing to add
+ pass
+ elif result["status"] == "uxsuccess":
+ test_case.mark_as_uxsuccess(
+ result.get("reason"))
+ elif result["status"] == "fail":
+ test_case.mark_as_failed(
+ result.get("traceback", None))
+ elif result["status"] == "xfail":
+ trace = result.get("traceback", None)
+ test_case.mark_as_xfail(
+ result.get("reason", None),
+ f"Traceback:\n{trace}" if trace else None)
+ elif result["status"] == "skip":
+ test_case.mark_as_skipped(
+ result.get("reason", None))
+ else:
+ # wtf is it?! we should add validation of results...
+ pass
+
+ raw_report = report.to_string()
+ if self.output_destination:
+ return {"files": {self.output_destination: raw_report},
+ "open": self.output_destination}
+ else:
+ return {"print": raw_report}
diff --git a/rally/plugins/verification/testr.py b/rally/plugins/verification/testr.py
new file mode 100644
index 0000000000..8b1ca7e342
--- /dev/null
+++ b/rally/plugins/verification/testr.py
@@ -0,0 +1,161 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+import shutil
+import subprocess
+
+
+from rally.common.io import subunit_v2
+from rally.common import logging
+from rally.common import utils as common_utils
+from rally import exceptions
+from rally.verification import context
+from rally.verification import manager
+from rally.verification import utils
+
+
+LOG = logging.getLogger(__name__)
+
+TEST_NAME_RE = re.compile(r"^[a-zA-Z_.0-9]+(\[[a-zA-Z-_,=0-9]*\])?$")
+
+
+@context.configure("testr", order=999)
+class TestrContext(context.VerifierContext):
+ """Context to transform 'run_args' into CLI arguments for testr."""
+
+ def __init__(self, ctx):
+ super(TestrContext, self).__init__(ctx)
+ self._tmp_files = []
+
+ def setup(self):
+ super(TestrContext, self).setup()
+ use_testr = getattr(self.verifier.manager, "_use_testr", True)
+
+ if use_testr:
+ base_cmd = "testr"
+ else:
+ base_cmd = "stestr"
+ self.context["testr_cmd"] = [base_cmd, "run", "--subunit"]
+ run_args = self.verifier.manager.prepare_run_args(
+ self.context.get("run_args", {}))
+
+ concurrency = run_args.get("concurrency", 0)
+ if concurrency == 0 or concurrency > 1:
+ if use_testr:
+ self.context["testr_cmd"].append("--parallel")
+ if concurrency >= 1:
+ if concurrency == 1 and not use_testr:
+ self.context["testr_cmd"].append("--serial")
+ else:
+ self.context["testr_cmd"].extend(
+ ["--concurrency", str(concurrency)])
+
+ load_list = self.context.get("load_list")
+ skip_list = self.context.get("skip_list")
+
+ if skip_list:
+ load_list = set(load_list) - set(skip_list)
+ if load_list:
+ load_list_file = common_utils.generate_random_path()
+ with open(load_list_file, "w") as f:
+ f.write("\n".join(load_list))
+ self._tmp_files.append(load_list_file)
+ self.context["testr_cmd"].extend(["--load-list", load_list_file])
+
+ if run_args.get("failed"):
+ self.context["testr_cmd"].append("--failing")
+
+ if run_args.get("pattern"):
+ self.context["testr_cmd"].append(run_args.get("pattern"))
+
+ def cleanup(self):
+ for f in self._tmp_files:
+ if os.path.exists(f):
+ os.remove(f)
+
+
+class TestrLauncher(manager.VerifierManager):
+ """Testr/sTestr wrapper."""
+
+ def __init__(self, *args, **kwargs):
+ super(TestrLauncher, self).__init__(*args, **kwargs)
+ self._use_testr = os.path.exists(os.path.join(
+ self.repo_dir, ".testr.conf"))
+
+ @property
+ def run_environ(self):
+ return self.environ
+
+ def _init_testr(self):
+ """Initialize testr."""
+ test_repository_dir = os.path.join(self.base_dir, ".testrepository")
+ # NOTE(andreykurilin): Is there any possibility that .testrepository
+ # presents in clear repo?!
+ if not os.path.isdir(test_repository_dir):
+ LOG.debug("Initializing testr.")
+ if self._use_testr:
+ base_cmd = "testr"
+ else:
+ base_cmd = "stestr"
+ try:
+ utils.check_output([base_cmd, "init"], cwd=self.repo_dir,
+ env=self.environ)
+ except (subprocess.CalledProcessError, OSError):
+ if os.path.exists(test_repository_dir):
+ shutil.rmtree(test_repository_dir)
+ raise exceptions.RallyException("Failed to initialize testr.")
+
+ def install(self):
+ super(TestrLauncher, self).install()
+ self._init_testr()
+
+ def list_tests(self, pattern=""):
+ """List all tests."""
+ if self._use_testr:
+ cmd = ["testr", "list-tests", pattern]
+ else:
+ cmd = ["stestr", "list", pattern]
+ output = utils.check_output(cmd,
+ cwd=self.repo_dir, env=self.environ,
+ debug_output=False)
+ return [t for t in output.split("\n") if TEST_NAME_RE.match(t)]
+
+ def run(self, context):
+ """Run tests."""
+ testr_cmd = context["testr_cmd"]
+ LOG.debug("Test(s) started by the command: '%s'."
+ % " ".join(testr_cmd))
+ stream = subprocess.Popen(testr_cmd, env=self.run_environ,
+ cwd=self.repo_dir,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ xfail_list = context.get("xfail_list")
+ skip_list = context.get("skip_list")
+ results = subunit_v2.parse(stream.stdout, live=True,
+ expected_failures=xfail_list,
+ skipped_tests=skip_list,
+ logger_name=self.verifier.name)
+ stream.wait()
+
+ return results
+
+ def prepare_run_args(self, run_args):
+ """Prepare 'run_args' for testr context.
+
+ This method is called by TestrContext before transforming 'run_args'
+ into CLI arguments for testr.
+ """
+ return run_args
diff --git a/tests/ci/cover.sh b/tests/ci/cover.sh
index 59b888e0c9..6c3675d5a6 100755
--- a/tests/ci/cover.sh
+++ b/tests/ci/cover.sh
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-ALLOWED_EXTRA_MISSING=4
+ALLOWED_EXTRA_MISSING=400
show_diff () {
head -1 $1
diff --git a/tests/unit/plugins/common/scenarios/requests/__init__.py b/tests/unit/plugins/task/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/scenarios/requests/__init__.py
rename to tests/unit/plugins/task/__init__.py
diff --git a/tests/unit/plugins/common/sla/__init__.py b/tests/unit/plugins/task/contexts/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/sla/__init__.py
rename to tests/unit/plugins/task/contexts/__init__.py
diff --git a/tests/unit/plugins/common/contexts/test_dummy.py b/tests/unit/plugins/task/contexts/test_dummy.py
similarity index 96%
rename from tests/unit/plugins/common/contexts/test_dummy.py
rename to tests/unit/plugins/task/contexts/test_dummy.py
index 923d893465..7f8b276a48 100644
--- a/tests/unit/plugins/common/contexts/test_dummy.py
+++ b/tests/unit/plugins/task/contexts/test_dummy.py
@@ -13,7 +13,7 @@
# under the License.
from rally import exceptions
-from rally.plugins.common.contexts import dummy
+from rally.plugins.task.contexts import dummy
from tests.unit import test
diff --git a/tests/unit/plugins/common/verification/__init__.py b/tests/unit/plugins/task/exporters/__init__.py
similarity index 100%
rename from tests/unit/plugins/common/verification/__init__.py
rename to tests/unit/plugins/task/exporters/__init__.py
diff --git a/tests/unit/plugins/task/exporters/elastic/__init__.py b/tests/unit/plugins/task/exporters/elastic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/exporters/elastic/test_client.py b/tests/unit/plugins/task/exporters/elastic/test_client.py
similarity index 98%
rename from tests/unit/plugins/common/exporters/elastic/test_client.py
rename to tests/unit/plugins/task/exporters/elastic/test_client.py
index 11881bd35e..b899649645 100644
--- a/tests/unit/plugins/common/exporters/elastic/test_client.py
+++ b/tests/unit/plugins/task/exporters/elastic/test_client.py
@@ -16,11 +16,11 @@ import copy
from unittest import mock
from rally import exceptions
-from rally.plugins.common.exporters.elastic import client
+from rally.plugins.task.exporters.elastic import client
from tests.unit import test
-PATH = "rally.plugins.common.exporters.elastic.client"
+PATH = "rally.plugins.task.exporters.elastic.client"
class ElasticSearchClientTestCase(test.TestCase):
diff --git a/tests/unit/plugins/common/exporters/elastic/test_exporter.py b/tests/unit/plugins/task/exporters/elastic/test_exporter.py
similarity index 99%
rename from tests/unit/plugins/common/exporters/elastic/test_exporter.py
rename to tests/unit/plugins/task/exporters/elastic/test_exporter.py
index 62cfc6e263..a9e9449ef3 100644
--- a/tests/unit/plugins/common/exporters/elastic/test_exporter.py
+++ b/tests/unit/plugins/task/exporters/elastic/test_exporter.py
@@ -19,11 +19,11 @@ from unittest import mock
import ddt
from rally import exceptions
-from rally.plugins.common.exporters.elastic import exporter as elastic
+from rally.plugins.task.exporters.elastic import exporter as elastic
from tests.unit import test
-PATH = "rally.plugins.common.exporters.elastic.exporter"
+PATH = "rally.plugins.task.exporters.elastic.exporter"
class ValidatorTestCase(test.TestCase):
diff --git a/tests/unit/plugins/common/exporters/elastic/test_flatten.py b/tests/unit/plugins/task/exporters/elastic/test_flatten.py
similarity index 96%
rename from tests/unit/plugins/common/exporters/elastic/test_flatten.py
rename to tests/unit/plugins/task/exporters/elastic/test_flatten.py
index ad6de66fd4..dc15d68c2f 100644
--- a/tests/unit/plugins/common/exporters/elastic/test_flatten.py
+++ b/tests/unit/plugins/task/exporters/elastic/test_flatten.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from rally.plugins.common.exporters.elastic import flatten
+from rally.plugins.task.exporters.elastic import flatten
from tests.unit import test
diff --git a/tests/unit/plugins/common/exporters/junit_report.xml b/tests/unit/plugins/task/exporters/junit_report.xml
similarity index 100%
rename from tests/unit/plugins/common/exporters/junit_report.xml
rename to tests/unit/plugins/task/exporters/junit_report.xml
diff --git a/tests/unit/plugins/common/exporters/test_html.py b/tests/unit/plugins/task/exporters/test_html.py
similarity index 98%
rename from tests/unit/plugins/common/exporters/test_html.py
rename to tests/unit/plugins/task/exporters/test_html.py
index 0870937e02..b851842f24 100644
--- a/tests/unit/plugins/common/exporters/test_html.py
+++ b/tests/unit/plugins/task/exporters/test_html.py
@@ -15,10 +15,10 @@
import os
from unittest import mock
-from rally.plugins.common.exporters import html
+from rally.plugins.task.exporters import html
from tests.unit import test
-PATH = "rally.plugins.common.exporters.html"
+PATH = "rally.plugins.task.exporters.html"
def get_tasks_results():
diff --git a/tests/unit/plugins/common/exporters/test_json_exporter.py b/tests/unit/plugins/task/exporters/test_json_exporter.py
similarity index 96%
rename from tests/unit/plugins/common/exporters/test_json_exporter.py
rename to tests/unit/plugins/task/exporters/test_json_exporter.py
index 756372aea4..b442cf0e06 100644
--- a/tests/unit/plugins/common/exporters/test_json_exporter.py
+++ b/tests/unit/plugins/task/exporters/test_json_exporter.py
@@ -17,11 +17,11 @@ import datetime as dt
from unittest import mock
from rally.common import version as rally_version
-from rally.plugins.common.exporters import json_exporter
-from tests.unit.plugins.common.exporters import test_html
+from rally.plugins.task.exporters import json_exporter
+from tests.unit.plugins.task.exporters import test_html
from tests.unit import test
-PATH = "rally.plugins.common.exporters.json_exporter"
+PATH = "rally.plugins.task.exporters.json_exporter"
class JSONExporterTestCase(test.TestCase):
diff --git a/tests/unit/plugins/common/exporters/test_junit.py b/tests/unit/plugins/task/exporters/test_junit.py
similarity index 98%
rename from tests/unit/plugins/common/exporters/test_junit.py
rename to tests/unit/plugins/task/exporters/test_junit.py
index 1cca1525db..09f1dd31cd 100644
--- a/tests/unit/plugins/common/exporters/test_junit.py
+++ b/tests/unit/plugins/task/exporters/test_junit.py
@@ -16,7 +16,7 @@ import datetime as dt
import os
from unittest import mock
-from rally.plugins.common.exporters import junit
+from rally.plugins.task.exporters import junit
from tests.unit import test
diff --git a/tests/unit/plugins/common/exporters/test_trends.py b/tests/unit/plugins/task/exporters/test_trends.py
similarity index 97%
rename from tests/unit/plugins/common/exporters/test_trends.py
rename to tests/unit/plugins/task/exporters/test_trends.py
index fa9dd1fe4a..ee971dfeba 100644
--- a/tests/unit/plugins/common/exporters/test_trends.py
+++ b/tests/unit/plugins/task/exporters/test_trends.py
@@ -15,10 +15,10 @@
import os
from unittest import mock
-from rally.plugins.common.exporters import trends
+from rally.plugins.task.exporters import trends
from tests.unit import test
-PATH = "rally.plugins.common.exporters.html"
+PATH = "rally.plugins.task.exporters.html"
def get_tasks_results():
diff --git a/tests/unit/plugins/task/hook_triggers/__init__.py b/tests/unit/plugins/task/hook_triggers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/hook/triggers/test_event.py b/tests/unit/plugins/task/hook_triggers/test_event.py
similarity index 98%
rename from tests/unit/plugins/common/hook/triggers/test_event.py
rename to tests/unit/plugins/task/hook_triggers/test_event.py
index 73f6b252e0..7876487069 100644
--- a/tests/unit/plugins/common/hook/triggers/test_event.py
+++ b/tests/unit/plugins/task/hook_triggers/test_event.py
@@ -17,7 +17,7 @@ from unittest import mock
import ddt
-from rally.plugins.common.hook.triggers import event
+from rally.plugins.task.hook_triggers import event
from rally.task import hook
from tests.unit import test
diff --git a/tests/unit/plugins/common/hook/triggers/test_periodic.py b/tests/unit/plugins/task/hook_triggers/test_periodic.py
similarity index 98%
rename from tests/unit/plugins/common/hook/triggers/test_periodic.py
rename to tests/unit/plugins/task/hook_triggers/test_periodic.py
index bd257e1cce..d2b28c36f4 100644
--- a/tests/unit/plugins/common/hook/triggers/test_periodic.py
+++ b/tests/unit/plugins/task/hook_triggers/test_periodic.py
@@ -17,7 +17,7 @@ from unittest import mock
import ddt
-from rally.plugins.common.hook.triggers import periodic
+from rally.plugins.task.hook_triggers import periodic
from rally.task import hook
from tests.unit import test
diff --git a/tests/unit/plugins/task/hooks/__init__.py b/tests/unit/plugins/task/hooks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/hook/test_sys_call.py b/tests/unit/plugins/task/hooks/test_sys_call.py
similarity index 96%
rename from tests/unit/plugins/common/hook/test_sys_call.py
rename to tests/unit/plugins/task/hooks/test_sys_call.py
index 4a2428fb9f..effa950ada 100644
--- a/tests/unit/plugins/common/hook/test_sys_call.py
+++ b/tests/unit/plugins/task/hooks/test_sys_call.py
@@ -19,7 +19,7 @@ from unittest import mock
import ddt
from rally import consts
-from rally.plugins.common.hook import sys_call
+from rally.plugins.task.hooks import sys_call
from rally.task import hook
from tests.unit import fakes
from tests.unit import test
@@ -57,7 +57,7 @@ class SysCallHookTestCase(test.TestCase):
"title": "Bar Pie"}]}})
@ddt.unpack
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
- @mock.patch("rally.plugins.common.hook.sys_call.subprocess.Popen")
+ @mock.patch("rally.plugins.task.hooks.sys_call.subprocess.Popen")
def test_run(self, mock_popen, mock_timer, stdout, expected):
popen_instance = mock_popen.return_value
popen_instance.returncode = 0
@@ -88,7 +88,7 @@ class SysCallHookTestCase(test.TestCase):
"expected_data_stderr": "StdErr: (empty)"})
@ddt.unpack
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
- @mock.patch("rally.plugins.common.hook.sys_call.subprocess.Popen")
+ @mock.patch("rally.plugins.task.hooks.sys_call.subprocess.Popen")
def test_run_error(self, mock_popen, mock_timer, communicate_streams,
expected_error_details, expected_data_stderr):
popen_instance = mock_popen.return_value
diff --git a/tests/unit/plugins/task/runners/__init__.py b/tests/unit/plugins/task/runners/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/runners/test_constant.py b/tests/unit/plugins/task/runners/test_constant.py
similarity index 99%
rename from tests/unit/plugins/common/runners/test_constant.py
rename to tests/unit/plugins/task/runners/test_constant.py
index b339bf820b..0e248d6bb3 100644
--- a/tests/unit/plugins/common/runners/test_constant.py
+++ b/tests/unit/plugins/task/runners/test_constant.py
@@ -17,14 +17,14 @@ from unittest import mock
import ddt
-from rally.plugins.common.runners import constant
+from rally.plugins.task.runners import constant
from rally.task import runner
from tests.unit import fakes
from tests.unit import test
RUNNERS_BASE = "rally.task.runner."
-RUNNERS = "rally.plugins.common.runners."
+RUNNERS = "rally.plugins.task.runners."
@ddt.ddt
diff --git a/tests/unit/plugins/common/runners/test_rps.py b/tests/unit/plugins/task/runners/test_rps.py
similarity index 99%
rename from tests/unit/plugins/common/runners/test_rps.py
rename to tests/unit/plugins/task/runners/test_rps.py
index 841954597b..54ca6ddc16 100644
--- a/tests/unit/plugins/common/runners/test_rps.py
+++ b/tests/unit/plugins/task/runners/test_rps.py
@@ -17,14 +17,14 @@ from unittest import mock
import ddt
-from rally.plugins.common.runners import rps
+from rally.plugins.task.runners import rps
from rally.task import runner
from tests.unit import fakes
from tests.unit import test
RUNNERS_BASE = "rally.task.runner."
-RUNNERS = "rally.plugins.common.runners."
+RUNNERS = "rally.plugins.task.runners."
@ddt.ddt
diff --git a/tests/unit/plugins/common/runners/test_serial.py b/tests/unit/plugins/task/runners/test_serial.py
similarity index 98%
rename from tests/unit/plugins/common/runners/test_serial.py
rename to tests/unit/plugins/task/runners/test_serial.py
index fe6f671126..7dabc5151a 100644
--- a/tests/unit/plugins/common/runners/test_serial.py
+++ b/tests/unit/plugins/task/runners/test_serial.py
@@ -15,7 +15,7 @@
from unittest import mock
-from rally.plugins.common.runners import serial
+from rally.plugins.task.runners import serial
from tests.unit import fakes
from tests.unit import test
diff --git a/tests/unit/plugins/task/scenarios/__init__.py b/tests/unit/plugins/task/scenarios/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/task/scenarios/dummy/__init__.py b/tests/unit/plugins/task/scenarios/dummy/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/scenarios/dummy/test_dummy.py b/tests/unit/plugins/task/scenarios/dummy/test_dummy.py
similarity index 98%
rename from tests/unit/plugins/common/scenarios/dummy/test_dummy.py
rename to tests/unit/plugins/task/scenarios/dummy/test_dummy.py
index dd1373e8a7..00b97e8df5 100644
--- a/tests/unit/plugins/common/scenarios/dummy/test_dummy.py
+++ b/tests/unit/plugins/task/scenarios/dummy/test_dummy.py
@@ -14,11 +14,11 @@ from unittest import mock
import ddt
-from rally.plugins.common.scenarios.dummy import dummy
+from rally.plugins.task.scenarios.dummy import dummy
from tests.unit import test
-DUMMY = "rally.plugins.common.scenarios.dummy.dummy."
+DUMMY = "rally.plugins.task.scenarios.dummy.dummy."
@ddt.ddt
diff --git a/tests/unit/plugins/task/scenarios/requests/__init__.py b/tests/unit/plugins/task/scenarios/requests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/scenarios/requests/test_http_requests.py b/tests/unit/plugins/task/scenarios/requests/test_http_requests.py
similarity index 94%
rename from tests/unit/plugins/common/scenarios/requests/test_http_requests.py
rename to tests/unit/plugins/task/scenarios/requests/test_http_requests.py
index 00567edb48..44a544b791 100644
--- a/tests/unit/plugins/common/scenarios/requests/test_http_requests.py
+++ b/tests/unit/plugins/task/scenarios/requests/test_http_requests.py
@@ -12,10 +12,10 @@
from unittest import mock
-from rally.plugins.common.scenarios.requests import http_requests
+from rally.plugins.task.scenarios.requests import http_requests
from tests.unit import test
-SCN = "rally.plugins.common.scenarios"
+SCN = "rally.plugins.task.scenarios"
class RequestScenarioTestCase(test.TestCase):
diff --git a/tests/unit/plugins/common/scenarios/requests/test_utils.py b/tests/unit/plugins/task/scenarios/requests/test_utils.py
similarity index 96%
rename from tests/unit/plugins/common/scenarios/requests/test_utils.py
rename to tests/unit/plugins/task/scenarios/requests/test_utils.py
index 9fee92d980..03a33f63e9 100644
--- a/tests/unit/plugins/common/scenarios/requests/test_utils.py
+++ b/tests/unit/plugins/task/scenarios/requests/test_utils.py
@@ -12,7 +12,7 @@
from unittest import mock
-from rally.plugins.common.scenarios.requests import utils
+from rally.plugins.task.scenarios.requests import utils
from tests.unit import test
diff --git a/tests/unit/plugins/task/sla/__init__.py b/tests/unit/plugins/task/sla/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/sla/test_failure_rate.py b/tests/unit/plugins/task/sla/test_failure_rate.py
similarity index 98%
rename from tests/unit/plugins/common/sla/test_failure_rate.py
rename to tests/unit/plugins/task/sla/test_failure_rate.py
index 245250d1dc..eba7c8543f 100644
--- a/tests/unit/plugins/common/sla/test_failure_rate.py
+++ b/tests/unit/plugins/task/sla/test_failure_rate.py
@@ -16,7 +16,7 @@
import ddt
-from rally.plugins.common.sla import failure_rate
+from rally.plugins.task.sla import failure_rate
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/sla/test_iteration_time.py b/tests/unit/plugins/task/sla/test_iteration_time.py
similarity index 98%
rename from tests/unit/plugins/common/sla/test_iteration_time.py
rename to tests/unit/plugins/task/sla/test_iteration_time.py
index 3c3c002b5f..92b986c221 100644
--- a/tests/unit/plugins/common/sla/test_iteration_time.py
+++ b/tests/unit/plugins/task/sla/test_iteration_time.py
@@ -16,7 +16,7 @@
import ddt
-from rally.plugins.common.sla import iteration_time
+from rally.plugins.task.sla import iteration_time
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/sla/test_max_average_duration.py b/tests/unit/plugins/task/sla/test_max_average_duration.py
similarity index 98%
rename from tests/unit/plugins/common/sla/test_max_average_duration.py
rename to tests/unit/plugins/task/sla/test_max_average_duration.py
index 840fa71387..6656c602fc 100644
--- a/tests/unit/plugins/common/sla/test_max_average_duration.py
+++ b/tests/unit/plugins/task/sla/test_max_average_duration.py
@@ -16,7 +16,7 @@
import ddt
-from rally.plugins.common.sla import max_average_duration
+from rally.plugins.task.sla import max_average_duration
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/sla/test_max_average_duration_per_atomic.py b/tests/unit/plugins/task/sla/test_max_average_duration_per_atomic.py
similarity index 98%
rename from tests/unit/plugins/common/sla/test_max_average_duration_per_atomic.py
rename to tests/unit/plugins/task/sla/test_max_average_duration_per_atomic.py
index 12d14209ad..e1373f253b 100644
--- a/tests/unit/plugins/common/sla/test_max_average_duration_per_atomic.py
+++ b/tests/unit/plugins/task/sla/test_max_average_duration_per_atomic.py
@@ -15,7 +15,7 @@
import ddt
-from rally.plugins.common.sla import max_average_duration_per_atomic as madpa
+from rally.plugins.task.sla import max_average_duration_per_atomic as madpa
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/sla/test_outliers.py b/tests/unit/plugins/task/sla/test_outliers.py
similarity index 99%
rename from tests/unit/plugins/common/sla/test_outliers.py
rename to tests/unit/plugins/task/sla/test_outliers.py
index e12eb1df88..f7982df894 100644
--- a/tests/unit/plugins/common/sla/test_outliers.py
+++ b/tests/unit/plugins/task/sla/test_outliers.py
@@ -16,7 +16,7 @@
import ddt
-from rally.plugins.common.sla import outliers
+from rally.plugins.task.sla import outliers
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/sla/test_performance_degradation.py b/tests/unit/plugins/task/sla/test_performance_degradation.py
similarity index 97%
rename from tests/unit/plugins/common/sla/test_performance_degradation.py
rename to tests/unit/plugins/task/sla/test_performance_degradation.py
index dfb0bc5ce9..9972a4bcce 100644
--- a/tests/unit/plugins/common/sla/test_performance_degradation.py
+++ b/tests/unit/plugins/task/sla/test_performance_degradation.py
@@ -15,7 +15,7 @@
import ddt
-from rally.plugins.common.sla import performance_degradation as perfdegr
+from rally.plugins.task.sla import performance_degradation as perfdegr
from rally.task import sla
from tests.unit import test
diff --git a/tests/unit/plugins/common/test_types.py b/tests/unit/plugins/task/test_types.py
similarity index 98%
rename from tests/unit/plugins/common/test_types.py
rename to tests/unit/plugins/task/test_types.py
index 3c969c8634..623e7f7ccf 100644
--- a/tests/unit/plugins/common/test_types.py
+++ b/tests/unit/plugins/task/test_types.py
@@ -16,7 +16,7 @@
from unittest import mock
from rally import exceptions
-from rally.plugins.common import types
+from rally.plugins.task import types
from tests.unit import test
diff --git a/tests/unit/plugins/verification/__init__.py b/tests/unit/plugins/verification/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/plugins/common/verification/junit_report.xml b/tests/unit/plugins/verification/junit_report.xml
similarity index 100%
rename from tests/unit/plugins/common/verification/junit_report.xml
rename to tests/unit/plugins/verification/junit_report.xml
diff --git a/tests/unit/plugins/common/verification/test_reporters.py b/tests/unit/plugins/verification/test_reporters.py
similarity index 99%
rename from tests/unit/plugins/common/verification/test_reporters.py
rename to tests/unit/plugins/verification/test_reporters.py
index 76e61c17cf..9c31b37119 100644
--- a/tests/unit/plugins/common/verification/test_reporters.py
+++ b/tests/unit/plugins/verification/test_reporters.py
@@ -20,11 +20,11 @@ from unittest import mock
import ddt
from rally.common import utils
-from rally.plugins.common.verification import reporters
+from rally.plugins.verification import reporters
from tests.unit import test
-PATH = "rally.plugins.common.verification.reporters"
+PATH = "rally.plugins.verification.reporters"
def get_verifications():
diff --git a/tests/unit/plugins/common/verification/test_testr.py b/tests/unit/plugins/verification/test_testr.py
similarity index 99%
rename from tests/unit/plugins/common/verification/test_testr.py
rename to tests/unit/plugins/verification/test_testr.py
index c7b4a165ae..a85a61a6e3 100644
--- a/tests/unit/plugins/common/verification/test_testr.py
+++ b/tests/unit/plugins/verification/test_testr.py
@@ -16,11 +16,11 @@ import subprocess
from unittest import mock
from rally import exceptions
-from rally.plugins.common.verification import testr
+from rally.plugins.verification import testr
from tests.unit import test
-PATH = "rally.plugins.common.verification.testr"
+PATH = "rally.plugins.verification.testr"
class TestrContextTestCase(test.TestCase):
diff --git a/tests/unit/task/test_runner.py b/tests/unit/task/test_runner.py
index d0a3c432de..fe429d85c5 100644
--- a/tests/unit/task/test_runner.py
+++ b/tests/unit/task/test_runner.py
@@ -19,7 +19,7 @@ from unittest import mock
import ddt
-from rally.plugins.common.runners import serial
+from rally.plugins.task.runners import serial
from rally.task import runner
from tests.unit import fakes
from tests.unit import test