Port inner stuff to the new task format

* use new format of a runner section
  the schemas of existing plugins are changed to not include "type"

* use "contexts" key instead of "context"
  note: the database model still operates the word "context". hope it
        will be fixed soon while extendind abilities of contexts

* use new format of a hook section

Change-Id: I2ef6ba7a24b542fb001bce378cadf8c83c774b01
This commit is contained in:
Andrey Kurilin
2017-09-14 15:57:44 +03:00
parent 79dcb93957
commit 7165f31726
27 changed files with 494 additions and 224 deletions

View File

@@ -523,6 +523,7 @@ class _Task(APIGroup):
workload_obj = subtask_obj.add_workload(
name=workload["name"], description=workload["description"],
position=workload["position"], runner=workload["runner"],
runner_type=workload["runner_type"],
context=workload["context"], hooks=workload["hooks"],
sla=workload["sla"], args=workload["args"])
chunk_size = CONF.raw_result_chunk_size

View File

@@ -578,8 +578,17 @@ class TaskCommands(object):
itr["atomic_actions"]).items()
)
results = [
{
results = []
for w in itertools.chain(*[s["workloads"] for s in task["subtasks"]]):
w["runner"]["type"] = w["runner_type"]
hooks = [
{"name": h["config"]["action"][0],
"args": h["config"]["action"][1],
"description": h["config"].get("description"),
"trigger": {"name": h["config"]["trigger"][0],
"args": h["config"]["trigger"][1]}}
for h in w["hooks"]]
results.append({
"key": {
"name": w["name"],
"description": w["description"],
@@ -589,7 +598,7 @@ class TaskCommands(object):
"runner": w["runner"],
"context": w["context"],
"sla": w["sla"],
"hooks": [r["config"] for r in w["hooks"]],
"hooks": hooks,
}
},
"result": w["data"],
@@ -597,9 +606,7 @@ class TaskCommands(object):
"hooks": w["hooks"],
"load_duration": w["load_duration"],
"full_duration": w["full_duration"],
"created_at": w["created_at"]}
for w in itertools.chain(
*[s["workloads"] for s in task["subtasks"]])]
"created_at": w["created_at"]})
print(json.dumps(results, sort_keys=False, indent=4))
@@ -742,6 +749,13 @@ class TaskCommands(object):
updated_at += dt.timedelta(seconds=result["full_duration"])
updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
pass_sla = all(s.get("success") for s in result["sla"])
runner_type = result["key"]["kw"]["runner"].pop("type")
for h in result["hooks"]:
trigger = h["config"]["trigger"]
h["config"] = {
"description": h["config"].get("description"),
"action": (h["config"]["name"], h["config"]["args"]),
"trigger": (trigger["name"], trigger["args"])}
workload = {"uuid": "n/a",
"name": result["key"]["name"],
"position": result["key"]["pos"],
@@ -757,6 +771,7 @@ class TaskCommands(object):
"created_at": result["created_at"],
"updated_at": updated_at,
"args": result["key"]["kw"]["args"],
"runner_type": runner_type,
"runner": result["key"]["kw"]["runner"],
"hooks": result["hooks"],
"sla": result["key"]["kw"]["sla"],

View File

@@ -0,0 +1,72 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""port-configs-to-new-formats
Revision ID: 046a38742e89
Revises: fab4f4f31f8a
Create Date: 2017-09-14 15:58:28.950132
"""
from alembic import op
import json
import sqlalchemy as sa
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "046a38742e89"
down_revision = "fab4f4f31f8a"
branch_labels = None
depends_on = None
workload_helper = sa.Table(
"workloads",
sa.MetaData(),
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("uuid", sa.String(36), nullable=False),
sa.Column("runner", sa.Text),
sa.Column("hooks", sa.Text)
)
def upgrade():
connection = op.get_bind()
for workload in connection.execute(workload_helper.select()):
runner = json.loads(workload["runner"])
runner.pop("type")
values = {"runner": json.dumps(runner)}
hooks = workload["hooks"]
if hooks:
values["hooks"] = []
for hook in json.loads(hooks):
hook_cfg = hook["config"]
trigger_cfg = hook_cfg["trigger"]
hook["config"] = {
"description": hook_cfg.get("description"),
"action": (hook_cfg["name"], hook_cfg["args"]),
"trigger": (trigger_cfg["name"], trigger_cfg["args"])}
values["hooks"].append(hook)
values["hooks"] = json.dumps(values["hooks"])
connection.execute(workload_helper.update().where(
workload_helper.c.uuid == workload.uuid).values(
**values))
def downgrade():
raise exceptions.DowngradeNotSupported()

View File

@@ -14,7 +14,6 @@
# under the License.
import collections
import copy
import datetime as dt
import uuid
@@ -285,24 +284,27 @@ class Subtask(object):
def update_status(self, status):
self._update({"status": status})
def add_workload(self, name, description, position, runner, context, hooks,
sla, args):
def add_workload(self, name, description, position, runner, runner_type,
context, hooks, sla, args):
# store hooks config as it will look after adding results
if hooks:
hooks = [{"config": hook} for hook in hooks]
return Workload(task_uuid=self.subtask["task_uuid"],
subtask_uuid=self.subtask["uuid"], name=name,
description=description, position=position,
runner=runner, hooks=hooks, context=context, sla=sla,
args=args)
runner=runner, runner_type=runner_type, hooks=hooks,
context=context, sla=sla, args=args)
class Workload(object):
"""Represents a workload object."""
def __init__(self, task_uuid, subtask_uuid, name, description, position,
runner, hooks, context, sla, args):
runner, runner_type, hooks, context, sla, args):
self.workload = db.workload_create(
task_uuid=task_uuid, subtask_uuid=subtask_uuid, name=name,
description=description, position=position, runner=runner,
runner_type=runner["type"], hooks=hooks, context=context, sla=sla,
runner_type=runner_type, hooks=hooks, context=context, sla=sla,
args=args)
def __getitem__(self, key):
@@ -326,6 +328,11 @@ class Workload(object):
@classmethod
def to_task(cls, workload):
"""Format a single workload as a full Task to launch.
:param workload: A workload config as it stores in database or like in
input file (the difference in hook format).
"""
task = collections.OrderedDict()
task["version"] = 2
task["title"] = "A cropped version of a bigger task."
@@ -337,9 +344,22 @@ class Workload(object):
subtask["title"] = workload["name"]
subtask["description"] = workload["description"]
subtask["scenario"] = {workload["name"]: workload["args"]}
subtask["contexts"] = workload["context"]
runner = copy.copy(workload["runner"])
subtask["runner"] = {runner.pop("type"): runner}
subtask["hooks"] = [h["config"] for h in workload["hooks"]]
# TODO(andreykurilin): fix database model as soon as the work related
# contexts execution stats will start.
if "context" in workload:
# it is an object from database
subtask["contexts"] = workload["context"]
else:
subtask["contexts"] = workload["contexts"]
subtask["runner"] = {workload["runner_type"]: workload["runner"]}
subtask["hooks"] = []
for hook in workload["hooks"]:
if "config" in hook:
# it is an object from database
hook = hook["config"]
subtask["hooks"].append({
"description": hook.get("description"),
"action": dict([hook["action"]]),
"trigger": dict([hook["trigger"]])})
subtask["sla"] = workload["sla"]
return task

View File

@@ -149,10 +149,6 @@ class ConstantScenarioRunner(runner.ScenarioRunner):
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string",
"description": "Type of Runner."
},
"concurrency": {
"type": "integer",
"minimum": 1,
@@ -174,7 +170,6 @@ class ConstantScenarioRunner(runner.ScenarioRunner):
" from."
}
},
"required": ["type"],
"additionalProperties": False
}
@@ -263,10 +258,6 @@ class ConstantForDurationScenarioRunner(runner.ScenarioRunner):
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string",
"description": "Type of Runner."
},
"concurrency": {
"type": "integer",
"minimum": 1,
@@ -284,7 +275,7 @@ class ConstantForDurationScenarioRunner(runner.ScenarioRunner):
"description": "Operation's timeout."
}
},
"required": ["type", "duration"],
"required": ["duration"],
"additionalProperties": False
}

View File

@@ -150,9 +150,6 @@ class RPSScenarioRunner(runner.ScenarioRunner):
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string"
},
"times": {
"type": "integer",
"minimum": 1
@@ -205,7 +202,7 @@ class RPSScenarioRunner(runner.ScenarioRunner):
"minimum": 1
}
},
"required": ["type", "times", "rps"],
"required": ["times", "rps"],
"additionalProperties": False
}

View File

@@ -35,9 +35,6 @@ class SerialScenarioRunner(runner.ScenarioRunner):
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string"
},
"times": {
"type": "integer",
"minimum": 1

View File

@@ -270,7 +270,7 @@ class RequiredContextsValidator(validation.Validator):
def validate(self, context, config, plugin_cls, plugin_cfg):
missing_contexts = []
input_context = config.get("context", {})
input_context = config.get("contexts", {})
for name in self.contexts:
if isinstance(name, tuple):
@@ -304,7 +304,7 @@ class RequiredParamOrContextValidator(validation.Validator):
msg = ("You should specify either scenario argument %s or"
" use context %s." % (self.param_name, self.ctx_name))
if self.ctx_name in config.get("context", {}):
if self.ctx_name in config.get("contexts", {}):
return
if self.param_name in config.get("args", {}):
return

View File

@@ -55,7 +55,7 @@ class ImageExistsValidator(validation.Validator):
if not image_args and self.nullable:
return
image_context = config.get("context", {}).get("images", {})
image_context = config.get("contexts", {}).get("images", {})
image_ctx_name = image_context.get("image_name")
if not image_args:
@@ -162,11 +162,11 @@ class FlavorExistsValidator(validation.Validator):
self.param_name = param_name
def _get_flavor_from_context(self, config, flavor_value):
if "flavors" not in config.get("context", {}):
if "flavors" not in config.get("contexts", {}):
self.fail("No flavors context")
flavors = [flavors_ctx.FlavorConfig(**f)
for f in config["context"]["flavors"]]
for f in config["contexts"]["flavors"]]
resource = types.obj_from_name(resource_config=flavor_value,
resources=flavors, typename="flavor")
flavor = flavors_ctx.FlavorConfig(**resource)
@@ -223,7 +223,7 @@ class ImageValidOnFlavorValidator(FlavorExistsValidator):
self.validate_disk = validate_disk
def _get_validated_image(self, config, clients, param_name):
image_context = config.get("context", {}).get("images", {})
image_context = config.get("contexts", {}).get("images", {})
image_args = config.get("args", {}).get(param_name)
image_ctx_name = image_context.get("image_name")
@@ -387,7 +387,7 @@ class RequiredServicesValidator(validation.Validator):
for service in self.services:
# NOTE(andreykurilin): validator should ignore services configured
# via context(a proper validation should be in context)
service_config = config.get("context", {}).get(
service_config = config.get("contexts", {}).get(
"api_versions@openstack", {}).get(service, {})
if (service not in available_services and
@@ -507,7 +507,7 @@ class RequiredAPIVersionsValidator(validation.Validator):
"version": versions_str,
"found_version": "3"})
else:
av_ctx = config.get("context", {}).get(
av_ctx = config.get("contexts", {}).get(
"api_versions@openstack", {})
default_version = getattr(clients,
self.component).choose_version()
@@ -540,10 +540,10 @@ class VolumeTypeExistsValidator(validation.Validator):
def validate(self, context, config, plugin_cls, plugin_cfg):
volume_type = config.get("args", {}).get(self.param, False)
if not volume_type and self.nullable:
return
if not volume_type:
if self.nullable:
return
self.fail("The parameter '%s' is required and should not be empty."
% self.param)
@@ -551,7 +551,7 @@ class VolumeTypeExistsValidator(validation.Validator):
clients = user["credential"].clients()
vt_names = [vt.name for vt in
clients.cinder().volume_types.list()]
ctx = config.get("context", {}).get("volume_types", [])
ctx = config.get("contexts", {}).get("volume_types", [])
vt_names += ctx
if volume_type not in vt_names:
self.fail("Specified volume type %s not found for user %s."

View File

@@ -276,15 +276,15 @@ class TaskEngine(object):
plugin_cfg=None,
vtype=vtype))
if workload["runner"]:
if workload["runner_type"]:
results.extend(runner.ScenarioRunner.validate(
name=workload["runner"]["type"],
name=workload["runner_type"],
context=vcontext,
config=None,
plugin_cfg=workload["runner"],
vtype=vtype))
for context_name, context_conf in workload["context"].items():
for context_name, context_conf in workload["contexts"].items():
results.extend(context.Context.validate(
name=context_name,
context=vcontext,
@@ -310,8 +310,7 @@ class TaskEngine(object):
vtype=vtype))
for hook_conf in workload["hooks"]:
action_name, action_cfg = list(
hook_conf["config"]["action"].items())[0]
action_name, action_cfg = hook_conf["action"]
results.extend(hook.HookAction.validate(
name=action_name,
context=vcontext,
@@ -319,8 +318,7 @@ class TaskEngine(object):
plugin_cfg=action_cfg,
vtype=vtype))
trigger_name, trigger_cfg = list(
hook_conf["config"]["trigger"].items())[0]
trigger_name, trigger_cfg = hook_conf["trigger"]
results.extend(hook.HookTrigger.validate(
name=trigger_name,
context=vcontext,
@@ -461,8 +459,9 @@ class TaskEngine(object):
description=workload["description"],
position=workload["position"],
runner=workload["runner"],
runner_type=workload["runner_type"],
hooks=workload["hooks"],
context=workload["context"],
context=workload["contexts"],
sla=workload["sla"],
args=workload["args"])
workload["uuid"] = workload_obj["uuid"]
@@ -474,10 +473,10 @@ class TaskEngine(object):
% {"position": workload["position"],
"cfg": json.dumps(workload_cfg, indent=3)})
runner_cls = runner.ScenarioRunner.get(workload["runner"]["type"])
runner_cls = runner.ScenarioRunner.get(workload["runner_type"])
runner_obj = runner_cls(self.task, workload["runner"])
context_obj = self._prepare_context(
workload["context"], workload["name"], workload_obj["uuid"])
workload["contexts"], workload["name"], workload_obj["uuid"])
try:
with ResultConsumer(workload, self.task, subtask_obj, workload_obj,
runner_obj, self.abort_on_sla_failure):
@@ -746,14 +745,15 @@ class TaskConfig(object):
# validation step
pass
wconf["context"] = wconf.pop("contexts", {})
wconf.setdefault("contexts", {})
if "runner" in wconf:
runner_type, runner_cfg = list(wconf["runner"].items())[0]
runner_cfg["type"] = runner_type
wconf["runner"] = runner_cfg
runner = list(wconf["runner"].items())[0]
wconf["runner_type"], wconf["runner"] = runner
else:
wconf["runner"] = {"serial": {}}
wconf["runner_type"] = "serial"
wconf["runner"] = {}
wconf.setdefault("sla", {"failure_rate": {"max": 0}})
hooks = wconf.get("hooks", [])
@@ -764,14 +764,17 @@ class TaskConfig(object):
"Check task format documentation for more "
"details.")
trigger_cfg = hook_cfg["trigger"]
wconf["hooks"].append({"config": {
wconf["hooks"].append({
"description": hook_cfg["description"],
"action": {hook_cfg["name"]: hook_cfg["args"]},
"trigger": {
trigger_cfg["name"]: trigger_cfg["args"]}}
})
"action": (hook_cfg["name"], hook_cfg["args"]),
"trigger": (
trigger_cfg["name"], trigger_cfg["args"])})
else:
wconf["hooks"].append({"config": hook_cfg})
hook_cfg["action"] = list(
hook_cfg["action"].items())[0]
hook_cfg["trigger"] = list(
hook_cfg["trigger"].items())[0]
wconf["hooks"].append(hook_cfg)
workloads.append(wconf)
sconf["workloads"] = workloads
@@ -815,7 +818,7 @@ class TaskConfig(object):
for hook_cfg in hooks:
trigger_cfg = hook_cfg["trigger"]
subtask["hooks"].append(
{"description": hook_cfg["description"],
{"description": hook_cfg.get("description"),
"action": {
hook_cfg["name"]: hook_cfg["args"]},
"trigger": {

View File

@@ -43,10 +43,9 @@ class HookExecutor(object):
self.task = task
self.triggers = collections.defaultdict(list)
for hook in config.get("hooks", []):
hook_cfg = hook["config"]
action_name = list(hook_cfg["action"].keys())[0]
trigger_name = list(hook_cfg["trigger"].keys())[0]
for hook_cfg in config.get("hooks", []):
action_name = hook_cfg["action"][0]
trigger_name = hook_cfg["trigger"][0]
action_cls = HookAction.get(action_name)
trigger_obj = HookTrigger.get(
trigger_name)(hook_cfg, self.task, action_cls)
@@ -216,7 +215,7 @@ class HookTrigger(plugin.Plugin, validation.ValidatablePluginMixin):
def __init__(self, hook_cfg, task, hook_cls):
self.hook_cfg = hook_cfg
self.config = self.hook_cfg["trigger"][self.get_name()]
self.config = self.hook_cfg["trigger"][1]
self.task = task
self.hook_cls = hook_cls
self._runs = []
@@ -230,7 +229,7 @@ class HookTrigger(plugin.Plugin, validation.ValidatablePluginMixin):
LOG.info("Hook action %s is triggered for Task %s by %s=%s"
% (self.hook_cls.get_name(), self.task["uuid"],
event_type, value))
action_cfg = list(self.hook_cfg["action"].values())[0]
action_cfg = self.hook_cfg["action"][1]
action = self.hook_cls(self.task, action_cfg,
{"event_type": event_type, "value": value})
action.run_async()

View File

@@ -14,7 +14,6 @@
# under the License.
import collections
import copy
import datetime as dt
import hashlib
import itertools
@@ -33,7 +32,7 @@ def _process_hooks(hooks):
"""Prepare hooks data for report."""
hooks_ctx = []
for hook in hooks:
hook_ctx = {"name": list(hook["config"]["action"].keys())[0],
hook_ctx = {"name": hook["config"]["action"][0],
"desc": hook["config"].get("description", ""),
"additive": [], "complete": []}
@@ -138,7 +137,7 @@ def _process_workload(workload, workload_cfg, pos):
"met": method,
"pos": str(pos),
"name": method + (pos and " [%d]" % (pos + 1) or ""),
"runner": workload["runner"]["type"],
"runner": workload["runner_type"],
"config": json.dumps(workload_cfg, indent=2),
"hooks": _process_hooks(workload["hooks"]),
"description": workload.get("description", ""),
@@ -212,8 +211,8 @@ def _make_source(tasks):
workload_cfg["scenario"] = {workload["name"]: workload["args"]}
workload_cfg["description"] = workload["description"]
workload_cfg["contexts"] = workload["context"]
runner = copy.copy(workload["runner"])
workload_cfg["runner"] = {runner.pop("type"): runner}
workload_cfg["runner"] = {
workload["runner_type"]: workload["runner"]}
workload_cfg["hooks"] = [h["config"]
for h in workload["hooks"]]
workload_cfg["sla"] = workload["sla"]

View File

@@ -31,8 +31,8 @@ class Trigger(hook.HookTrigger):
@property
def context(self):
action_name, action_cfg = list(self.hook_cfg["action"].items())[0]
trigger_name, trigger_cfg = list(self.hook_cfg["trigger"].items())[0]
action_name, action_cfg = self.hook_cfg["action"]
trigger_name, trigger_cfg = self.hook_cfg["trigger"]
return {"description": self.hook_cfg["description"],
"name": action_name,
"args": action_cfg,

View File

@@ -553,7 +553,8 @@ class TaskCommandsTestCase(test.TestCase):
"name": "Foo.bar", "description": "descr",
"position": 2,
"args": {"key1": "value1"},
"runner": {"type": "rruunneerr"},
"runner_type": "rruunneerr",
"runner": {"arg1": "args2"},
"hooks": [],
"sla": {"failure_rate": {"max": 0}},
"sla_results": {"sla": [{"success": True}]},
@@ -565,10 +566,16 @@ class TaskCommandsTestCase(test.TestCase):
task_id = "foo_task_id"
task_obj = self._make_task(data=[{"atomic_actions": {"foo": 1.1}}])
def fix_r(workload):
cfg = workload["runner"]
cfg["type"] = workload["runner_type"]
return cfg
result = map(lambda x: {"key": {"kw": {"sla": x["sla"],
"args": x["args"],
"context": x["context"],
"runner": x["runner"],
"runner": fix_r(x),
"hooks": x["hooks"]},
"pos": x["position"],
"name": x["name"],
@@ -1089,7 +1096,8 @@ class TaskCommandsTestCase(test.TestCase):
"subtasks": [{"workloads": [{
"name": "fake_name",
"position": "fake_pos",
"args": {}, "runner": {}, "context": {}, "sla": {},
"args": {}, "runner_type": "foo",
"runner": {}, "context": {}, "sla": {},
"hooks": {},
"load_duration": 3.2,
"full_duration": 3.5,
@@ -1181,8 +1189,12 @@ class TaskCommandsTestCase(test.TestCase):
"name": "Foo.bar", "description": "descr",
"position": 2,
"args": {"key1": "value1"},
"runner": {"type": "rruunneerr"},
"hooks": [{"config": {"type": "hookk"}}],
"runner_type": "constant",
"runner": {"time": 3},
"hooks": [{"config": {
"description": "descr",
"action": ("foo", {"arg1": "v1"}),
"trigger": ("t", {"a2", "v2"})}}],
"pass_sla": True,
"sla": {"failure_rate": {"max": 0}},
"sla_results": {"sla": [{"success": True}]},
@@ -1196,14 +1208,25 @@ class TaskCommandsTestCase(test.TestCase):
}
results = [
{"hooks": workload["hooks"],
{"hooks": [{"config": {
"name": "foo",
"args": {"arg1": "v1"},
"description": "descr",
"trigger": {"name": "t",
"args": {"a2", "v2"}}}}],
"key": {"name": workload["name"],
"description": workload["description"],
"pos": workload["position"],
"kw": {
"args": workload["args"],
"runner": workload["runner"],
"hooks": [h["config"] for h in workload["hooks"]],
"runner": {"type": "constant", "time": 3},
"hooks": [{"name": "foo",
"args": {"arg1": "v1"},
"description": "descr",
"trigger": {
"name": "t",
"args": {"a2", "v2"}
}}],
"sla": workload["sla"],
"context": workload["context"]}},
"sla": workload["sla_results"]["sla"],

View File

@@ -1965,3 +1965,135 @@ class MigrationWalkTestCase(rtest.DBTestCase,
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_046a38742e89(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
self._046a38742e89_deployment_uuid = str(uuid.uuid4())
self._046a38742e89_task_uuid = str(uuid.uuid4())
subtask_uuid = str(uuid.uuid4())
workloads = [
{
"runner": {"type": "constant",
"times": 1000}},
{
"runner": {"type": "rps",
"rps": 300},
"hooks": [
{
"config": {"args": {"arg1": "v1"},
"description": "descr",
"name": "foo",
"trigger": {"name": "bar",
"args": {"arg2": "v2"}}}}
]
}
]
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._046a38742e89_deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._046a38742e89_task_uuid,
"created_at": timeutils.utcnow(),
"updated_at": timeutils.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._046a38742e89_deployment_uuid
}]
)
conn.execute(
subtask_table.insert(),
[{
"uuid": subtask_uuid,
"created_at": timeutils.utcnow(),
"updated_at": timeutils.utcnow(),
"task_uuid": self._046a38742e89_task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
for workload in workloads:
conn.execute(
workload_table.insert(),
[{
"uuid": str(uuid.uuid4()),
"name": "foo",
"task_uuid": self._046a38742e89_task_uuid,
"subtask_uuid": subtask_uuid,
"created_at": timeutils.utcnow(),
"updated_at": timeutils.utcnow(),
"position": 0,
"runner": json.dumps(workload["runner"]),
"runner_type": "",
"context": "",
"context_execution": "",
"statistics": "",
"hooks": json.dumps(workload.get("hooks", "")),
"sla": "",
"sla_results": "",
"args": "",
"load_duration": 0,
"pass_sla": True,
"min_duration": 0,
"max_duration": 1
}]
)
def _check_046a38742e89(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
subtask_uuid = None
with engine.connect() as conn:
task_uuid = self._046a38742e89_task_uuid
for workload in conn.execute(workload_table.select().where(
workload_table.c.task_uuid == task_uuid)).fetchall():
if subtask_uuid is None:
subtask_uuid = workload.subtask_uuid
runner = json.loads(workload.runner)
self.assertNotIn("type", runner)
hooks = json.loads(workload.hooks)
if hooks:
for hook in hooks:
hook_cfg = hook["config"]
self.assertEqual(2, len(hook_cfg["action"]))
self.assertEqual(2, len(hook_cfg["trigger"]))
conn.execute(
workload_table.delete().where(
workload_table.c.uuid == workload.uuid))
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask_uuid))
conn.execute(
task_table.delete().where(task_table.c.uuid == task_uuid))
deployment_uuid = self._046a38742e89_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))

View File

@@ -311,21 +311,24 @@ class SubtaskTestCase(test.TestCase):
name = "w"
description = "descr"
position = 0
runner = {"type": "runner"}
runner_type = "runner"
runner = {}
context = {"users": {}}
sla = {"failure_rate": {"max": 0}}
args = {"arg": "xxx"}
hooks = [{"config": {"foo": "bar"}}]
hooks = [{"foo": "bar"}]
workload = subtask.add_workload(name, description=description,
position=position, runner=runner,
context=context, sla=sla, args=args,
hooks=hooks)
workload = subtask.add_workload(
name, description=description, position=position,
runner_type=runner_type, runner=runner, context=context, sla=sla,
args=args, hooks=hooks)
mock_workload.assert_called_once_with(
task_uuid=self.subtask["task_uuid"],
subtask_uuid=self.subtask["uuid"], name=name,
description=description, position=position, runner=runner,
context=context, sla=sla, args=args, hooks=hooks)
description=description, position=position,
runner_type=runner_type, runner=runner,
context=context, sla=sla, args=args,
hooks=[{"config": h} for h in hooks])
self.assertIs(workload, mock_workload.return_value)
@@ -345,14 +348,16 @@ class WorkloadTestCase(test.TestCase):
name = "w"
description = "descr"
position = 0
runner = {"type": "constant"}
runner_type = "constant"
runner = {"times": 3}
context = {"users": {}}
sla = {"failure_rate": {"max": 0}}
args = {"arg": "xxx"}
hooks = [{"config": {"foo": "bar"}}]
workload = objects.Workload("uuid1", "uuid2", name=name,
description=description, position=position,
runner=runner, context=context, sla=sla,
runner=runner, runner_type=runner_type,
context=context, sla=sla,
args=args, hooks=hooks)
mock_workload_create.assert_called_once_with(
task_uuid="uuid1", subtask_uuid="uuid2", name=name, hooks=hooks,
@@ -367,7 +372,7 @@ class WorkloadTestCase(test.TestCase):
mock_workload_create.return_value = self.workload
workload = objects.Workload("uuid1", "uuid2", name="w",
description="descr", position=0,
runner={"type": "foo"}, context=None,
runner_type="foo", runner={}, context=None,
sla=None, args=None, hooks=[])
workload.add_workload_data(0, {"data": "foo"})
@@ -383,7 +388,8 @@ class WorkloadTestCase(test.TestCase):
name = "w"
description = "descr"
position = 0
runner = {"type": "constant"}
runner_type = "constant"
runner = {"times": 3}
context = {"users": {}}
sla = {"failure_rate": {"max": 0}}
args = {"arg": "xxx"}
@@ -394,8 +400,9 @@ class WorkloadTestCase(test.TestCase):
hooks = []
workload = objects.Workload("uuid1", "uuid2", name=name,
description=description, position=position,
runner=runner, context=context, sla=sla,
args=args, hooks=hooks)
runner=runner, runner_type=runner_type,
context=context, sla=sla, args=args,
hooks=hooks)
workload.set_results(load_duration=load_duration,
full_duration=full_duration,
@@ -417,11 +424,15 @@ class WorkloadTestCase(test.TestCase):
"name": "Foo.bar",
"description": "Make something useful (or not).",
"position": 3,
"runner": {"type": "constant", "times": 3},
"context": {"users": {}},
"runner_type": "constant",
"runner": {"times": 3},
"contexts": {"users": {}},
"sla": {"failure_rate": {"max": 0}},
"args": {"key1": "value1"},
"hooks": [{"config": {"hook1": "xxx"}}],
"hooks": [{"config": {
"action": ["foo", {"arg1": "v1"}],
"trigger": ["bar", {"arg2": "v2"}]
}}],
"sla_results": {"sla": []},
"context_execution": {},
"start_time": "2997.23.12",
@@ -444,8 +455,10 @@ class WorkloadTestCase(test.TestCase):
("title", workload["name"]),
("description", workload["description"]),
("scenario", {workload["name"]: workload["args"]}),
("contexts", workload["context"]),
("contexts", workload["contexts"]),
("runner", {"constant": {"times": 3}}),
("hooks", [h["config"] for h in workload["hooks"]]),
("hooks", [{"action": {"foo": {"arg1": "v1"}},
"trigger": {"bar": {"arg2": "v2"}},
"description": None}]),
("sla", workload["sla"])])])])
self.assertEqual(expected_task, objects.Workload.to_task(workload))

View File

@@ -28,8 +28,8 @@ class EventTriggerTestCase(test.TestCase):
super(EventTriggerTestCase, self).setUp()
self.hook_cls = mock.MagicMock(__name__="name")
self.trigger = event.EventTrigger(
{"trigger": {"event": {"unit": "iteration", "at": [1, 4, 5]}},
"action": {"foo": {}}},
{"trigger": ("event", {"unit": "iteration", "at": [1, 4, 5]}),
"action": ("foo", {})},
mock.MagicMock(), self.hook_cls)
@ddt.data((dict(unit="time", at=[0, 3, 5]), True),

View File

@@ -28,8 +28,8 @@ class PeriodicTriggerTestCase(test.TestCase):
super(PeriodicTriggerTestCase, self).setUp()
self.hook_cls = mock.MagicMock(__name__="name")
self.trigger = periodic.PeriodicTrigger(
{"trigger": {"periodic": {"unit": "iteration", "step": 2}},
"action": {"foo": {}}},
{"trigger": ("periodic", {"unit": "iteration", "step": 2}),
"action": ("foo", {})},
mock.MagicMock(), self.hook_cls)
@ddt.data((dict(unit="time", step=1), True),
@@ -74,9 +74,9 @@ class PeriodicTriggerTestCase(test.TestCase):
@ddt.unpack
def test_on_event_start_end(self, value, should_call):
trigger = periodic.PeriodicTrigger(
{"trigger": {"periodic": {"unit": "time",
"step": 3, "start": 2, "end": 9}},
"action": {"foo": {}}},
{"trigger": ("periodic", {"unit": "time",
"step": 3, "start": 2, "end": 9}),
"action": ("foo", {})},
mock.MagicMock(), self.hook_cls)
trigger.on_event("time", value)
self.assertEqual(should_call, self.hook_cls.called)

View File

@@ -38,11 +38,13 @@ class ConstantScenarioRunnerTestCase(test.TestCase):
self.args = {"a": 1}
self.task = mock.MagicMock()
@ddt.data(({"times": 4, "concurrency": 2,
"timeout": 2, "type": "constant",
@ddt.data(({"times": 4,
"concurrency": 2,
"timeout": 2,
"max_cpu_count": 2}, True),
({"times": 4, "concurrency": 5,
"timeout": 2, "type": "constant",
({"times": 4,
"concurrency": 5,
"timeout": 2,
"max_cpu_count": 2}, False),
({"foo": "bar"}, False))
@ddt.unpack
@@ -267,8 +269,9 @@ class ConstantForDurationScenarioRunnerTestCase(test.TestCase):
self.context["iteration"] = 14
self.args = {"a": 1}
@ddt.data(({"duration": 0, "concurrency": 2,
"timeout": 2, "type": "constant_for_duration"}, True),
@ddt.data(({"duration": 0,
"concurrency": 2,
"timeout": 2}, True),
({"foo": "bar"}, False))
@ddt.unpack
def test_validate(self, config, valid):

View File

@@ -36,7 +36,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
@ddt.data(
{
"config": {
"type": "rps",
"rps": {
"start": 1,
"end": 3,
@@ -47,7 +46,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": {
"start": 1,
"end": 10,
@@ -58,7 +56,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": {
"start": 1,
"end": 2,
@@ -69,7 +66,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": {
"start": 2,
"end": 1,
@@ -81,7 +77,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": {
"start": 2,
"end": 1,
@@ -93,7 +88,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"times": 1,
"rps": 100,
"max_concurrency": 50,
@@ -103,14 +97,12 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": 0.000001
},
"valid": False
},
{
"config": {
"type": "rps",
"rps": {
"start": 1,
"end": 10,
@@ -121,7 +113,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": 0,
"times": 55
},
@@ -129,7 +120,6 @@ class RPSScenarioRunnerTestCase(test.TestCase):
},
{
"config": {
"type": "rps",
"rps": 2,
"times": 55,
"foo": "bar"

View File

@@ -247,8 +247,8 @@ class RequiredContextsValidatorTestCase(test.TestCase):
"users": [mock.MagicMock()], })
@ddt.data(
{"config": {"context": {"c1": 1, "c2": 2, "c3": 3}}},
{"config": {"context": {"c1": 1, "c2": 2, "c3": 3, "a": 1}}}
{"config": {"contexts": {"c1": 1, "c2": 2, "c3": 3}}},
{"config": {"contexts": {"c1": 1, "c2": 2, "c3": 3, "a": 1}}}
)
@ddt.unpack
def test_validate(self, config):
@@ -261,16 +261,19 @@ class RequiredContextsValidatorTestCase(test.TestCase):
contexts=("c1", "c2", "c3"))
e = self.assertRaises(
validation.ValidationError,
validator.validate, self.credentials, {"context": {"a": 1}},
validator.validate, self.credentials, {"contexts": {"a": 1}},
None, None)
self.assertEqual(
"The following context(s) are required but missing from "
"the input task file: c1, c2, c3", e.message)
@ddt.data(
{"config": {"context": {"c1": 1, "c2": 2, "c3": 3, "b1": 1, "a1": 1}}},
{"config": {"context": {"c1": 1, "c2": 2, "c3": 3,
"b1": 1, "b2": 2, "a1": 1}}},
{"config": {
"contexts": {"c1": 1, "c2": 2, "c3": 3,
"b1": 1, "a1": 1}}},
{"config": {
"contexts": {"c1": 1, "c2": 2, "c3": 3,
"b1": 1, "b2": 2, "a1": 1}}},
)
@ddt.unpack
def test_validate_with_or(self, config):
@@ -284,7 +287,7 @@ class RequiredContextsValidatorTestCase(test.TestCase):
e = self.assertRaises(
validation.ValidationError,
validator.validate, self.credentials,
{"context": {"c1": 1, "c2": 2}}, None, None)
{"contexts": {"c1": 1, "c2": 2}}, None, None)
self.assertEqual(
"The following context(s) are required but missing "
"from the input task file: 'a1 or a2', 'b1 or b2'", e.message)
@@ -302,20 +305,20 @@ class RequiredParamOrContextValidatorTestCase(test.TestCase):
@ddt.data(
{"config": {"args": {"image": {"name": ""}},
"context": {"custom_image": {"name": "fake_image"}}}},
{"config": {"context": {"custom_image": {"name": "fake_image"}}}},
"contexts": {"custom_image": {"name": "fake_image"}}}},
{"config": {"contexts": {"custom_image": {"name": "fake_image"}}}},
{"config": {"args": {"image": {"name": "fake_image"}},
"context": {"custom_image": ""}}},
"contexts": {"custom_image": ""}}},
{"config": {"args": {"image": {"name": "fake_image"}}}},
{"config": {"args": {"image": {"name": ""}},
"context": {"custom_image": {"name": ""}}}}
"contexts": {"custom_image": {"name": ""}}}}
)
@ddt.unpack
def test_validate(self, config):
self.validator.validate(self.credentials, config, None, None)
@ddt.data(
{"config": {"args": {}, "context": {}},
{"config": {"args": {}, "contexts": {}},
"err_msg": "You should specify either scenario argument image or "
"use context custom_image."},
{"config": {},

View File

@@ -85,20 +85,19 @@ class ImageExistsValidatorTestCase(test.TestCase):
self.assertIsNone(result)
def test_validator_image_from_context(self):
config = {"args": {
"image": {"regex": r"^foo$"}}, "context": {
"images": {
"image_name": "foo"}}}
config = {
"args": {"image": {"regex": r"^foo$"}},
"contexts": {"images": {"image_name": "foo"}}}
self.validator.validate(self.context, config, None, None)
@mock.patch("%s.openstack_types.GlanceImage.transform" % PATH,
return_value="image_id")
def test_validator_image_not_in_context(self, mock_glance_image_transform):
config = {"args": {
"image": "fake_image"}, "context": {
"images": {
"fake_image_name": "foo"}}}
config = {
"args": {"image": "fake_image"},
"contexts": {
"images": {"fake_image_name": "foo"}}}
clients = self.context[
"users"][0]["credential"].clients.return_value
@@ -266,7 +265,7 @@ class FlavorExistsValidatorTestCase(test.TestCase):
def test__get_flavor_from_context(self, mock_flavor_config,
mock_obj_from_name):
config = {
"context": {"images": {"fake_parameter_name": "foo_image"}}}
"contexts": {"images": {"fake_parameter_name": "foo_image"}}}
e = self.assertRaises(
validators.validation.ValidationError,
@@ -274,8 +273,8 @@ class FlavorExistsValidatorTestCase(test.TestCase):
config, "foo_flavor")
self.assertEqual("No flavors context", e.message)
config = {"context": {"images": {"fake_parameter_name": "foo_image"},
"flavors": [{"flavor1": "fake_flavor1"}]}}
config = {"contexts": {"images": {"fake_parameter_name": "foo_image"},
"flavors": [{"flavor1": "fake_flavor1"}]}}
result = self.validator._get_flavor_from_context(config, "foo_flavor")
self.assertEqual("<context flavor: %s>" % result.name, result.id)
@@ -457,11 +456,12 @@ class ImageValidOnFlavorValidatorTestCase(test.TestCase):
"min_disk": 0
}
# Get image name from context
result = self.validator._get_validated_image({"args": {
"image": {"regex": r"^foo$"}}, "context": {
"images": {
"image_name": "foo"}
}}, mock.Mock(), "image")
result = self.validator._get_validated_image({
"args": {
"image": {"regex": r"^foo$"}},
"contexts": {
"images": {"image_name": "foo"}}},
mock.Mock(), "image")
self.assertEqual(image, result)
clients = mock.Mock()
@@ -819,7 +819,7 @@ class RequiredAPIVersionsValidatorTestCase(test.TestCase):
clients = self.context["users"][0]["credential"].clients()
clients.nova.choose_version.return_value = nova
config = {"context": {"api_versions@openstack": {}}}
config = {"contexts": {"api_versions@openstack": {}}}
if err_msg:
e = self.assertRaises(
@@ -839,7 +839,7 @@ class RequiredAPIVersionsValidatorTestCase(test.TestCase):
[version])
config = {
"context": {"api_versions@openstack": {"nova": {"version": 2}}}}
"contexts": {"api_versions@openstack": {"nova": {"version": 2}}}}
if err_msg:
e = self.assertRaises(
@@ -890,7 +890,7 @@ class VolumeTypeExistsValidatorTestCase(test.TestCase):
clients = self.context["users"][0]["credential"].clients()
clients.cinder().volume_types.list.return_value = []
ctx = {"args": {"volume_type": "fake_type"},
"context": {"volume_types": ["fake_type"]}}
"contexts": {"volume_types": ["fake_type"]}}
result = self.validator.validate(self.context, ctx, None, None)
self.assertIsNone(result)
@@ -899,7 +899,7 @@ class VolumeTypeExistsValidatorTestCase(test.TestCase):
clients = self.context["users"][0]["credential"].clients()
clients.cinder().volume_types.list.return_value = []
config = {"args": {"volume_type": "fake_type"},
"context": {"volume_types": ["fake_type_2"]}}
"contexts": {"volume_types": ["fake_type_2"]}}
e = self.assertRaises(
validators.validation.ValidationError,
self.validator.validate, self.context, config, None, None)

View File

@@ -51,7 +51,8 @@ class PlotTestCase(test.TestCase):
"sla_results": {"sla": {}}, "pass_sla": True,
"position": 0,
"name": "Foo.bar", "description": "Description!!",
"runner": {"type": "constant"},
"runner_type": "constant",
"runner": {},
"statistics": {"atomics": {
"foo_action": {"max_duration": 19, "min_duration": 10}}},
"full_duration": 40, "load_duration": 32,
@@ -90,8 +91,8 @@ class PlotTestCase(test.TestCase):
{
"config": {
"description": "Foo",
"action": {"sys_call": "foo cmd"},
"trigger": {"event": {"at": [2, 5], "unit": "iteration"}}},
"action": ("sys_call", "foo cmd"),
"trigger": ("event", {"at": [2, 5], "unit": "iteration"})},
"results": [
{
"status": "success",
@@ -120,8 +121,8 @@ class PlotTestCase(test.TestCase):
"summary": {"success": 2}},
{
"config": {
"action": {"sys_call": "bar cmd"},
"trigger": {"event": {"at": [1, 2, 4], "unit": "time"}}},
"action": ("sys_call", "bar cmd"),
"trigger": ("event", {"at": [1, 2, 4], "unit": "time"})},
"results": [
{
"status": "success",
@@ -186,11 +187,14 @@ class PlotTestCase(test.TestCase):
"name": "Foo.bar_%s" % i,
"description": "Make something useful (or not).",
"position": i,
"runner": {"type": "constant", "times": 3},
"context": {"users": {}},
"runner_type": "constant",
"runner": {"times": 3},
"contexts": {"users": {}},
"sla": {"failure_rate": {"max": 0}},
"args": {"key1": "value1"},
"hooks": [{"config": {"hook1": "xxx"}}],
"hooks": [{"config": {
"action": ("foo", {}),
"trigger": ("xxx", {})}}],
"sla_results": {"sla": []},
"start_time": "2997.23.12",
"load_duration": 42,
@@ -225,7 +229,8 @@ class PlotTestCase(test.TestCase):
"args": {},
"context": {"key": "context"},
"sla": {"key": "sla"},
"runner": {"type": "crunner"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]}]
@@ -262,7 +267,8 @@ class PlotTestCase(test.TestCase):
"args": {},
"context": {"key": "context"},
"sla": {"key": "sla"},
"runner": {"type": "crunner"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]},
{"title": "subtask title3",
@@ -274,7 +280,8 @@ class PlotTestCase(test.TestCase):
"args": {},
"context": {"key": "context"},
"sla": {"key": "sla"},
"runner": {"type": "crunner"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]})

View File

@@ -37,15 +37,16 @@ class MyException(exceptions.RallyException):
class TaskEngineTestCase(test.TestCase):
@staticmethod
def _make_workload(name, args=None, description=None, context=None,
def _make_workload(name, args=None, description=None, contexts=None,
sla=None, runner=None, hooks=None, position=0):
return {"uuid": "foo",
"name": name,
"position": position,
"description": description,
"args": args,
"context": context or {},
"runner": runner or {"type": "serial"},
"contexts": contexts or {},
"runner_type": runner[0] if runner else "serial",
"runner": runner[1] if runner else {},
"sla": sla or {},
"hooks": hooks or []}
@@ -153,12 +154,13 @@ class TaskEngineTestCase(test.TestCase):
scenario_name = "Foo.bar"
runner_type = "MegaRunner"
hook_conf = {"action": {"c": "c_args"},
"trigger": {"d": "d_args"}}
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
workload = {"name": scenario_name,
"runner": {"type": runner_type},
"context": {"a": "a_conf"},
"hooks": [{"config": hook_conf}],
"runner_type": runner_type,
"runner": {},
"contexts": {"a": "a_conf"},
"hooks": [hook_conf],
"sla": {"foo_sla": "sla_conf"},
"position": 2}
@@ -169,7 +171,7 @@ class TaskEngineTestCase(test.TestCase):
mock_scenario_runner_validate.assert_called_once_with(
name=runner_type, context=None, config=None,
plugin_cfg={"type": runner_type}, vtype=None)
plugin_cfg={}, vtype=None)
self.assertEqual([mock.call(name="a",
context=None,
config=None,
@@ -204,7 +206,7 @@ class TaskEngineTestCase(test.TestCase):
"There is no such runner"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
workload = self._make_workload(name="sca", runner={"type": "b"})
workload = self._make_workload(name="sca", runner=("b", {}))
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
@@ -230,7 +232,7 @@ class TaskEngineTestCase(test.TestCase):
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
context={"a": "a_conf"})
contexts={"a": "a_conf"})
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
@@ -282,12 +284,12 @@ class TaskEngineTestCase(test.TestCase):
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": {"c": "c_args"},
"trigger": {"d": "d_args"}}
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[{"config": hook_conf}])
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
@@ -315,12 +317,12 @@ class TaskEngineTestCase(test.TestCase):
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": {"c": "c_args"},
"trigger": {"d": "d_args"}}
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[{"config": hook_conf}])
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
@@ -353,7 +355,7 @@ class TaskEngineTestCase(test.TestCase):
mock_task_instance = mock.MagicMock()
wconf1 = self._make_workload(name="SomeScen.scenario",
context={"users": {}})
contexts={"users": {}})
wconf2 = self._make_workload(name="SomeScen.scenario",
position=1)
subtask1 = {"workloads": [wconf1, wconf2]}
@@ -452,9 +454,9 @@ class TaskEngineTestCase(test.TestCase):
"context": {},
"workloads": [
self._make_workload(name="a.task", description="foo",
context={"context_a": {"a": 1}}),
contexts={"context_a": {"a": 1}}),
self._make_workload(name="a.task", description="foo",
context={"context_a": {"b": 2}},
contexts={"context_a": {"b": 2}},
position=2)]}]
mock_task_config.return_value = mock_task_instance
@@ -1064,6 +1066,6 @@ class TaskConfigTestCase(test.TestCase):
workload = task.subtasks[0]["workloads"][0]
self.assertEqual(
{"description": "descr",
"action": {"hook_action": {"k1": "v1"}},
"trigger": {"hook_trigger": {"k2": "v2"}}},
workload["hooks"][0]["config"])
"action": ("hook_action", {"k1": "v1"}),
"trigger": ("hook_trigger", {"k2": "v2"})},
workload["hooks"][0])

View File

@@ -55,18 +55,18 @@ class HookExecutorTestCase(test.TestCase):
super(HookExecutorTestCase, self).setUp()
self.conf = {
"hooks": [
{"config": {
{
"description": "dummy_action",
"action": {
"dummy_hook": {"status": consts.HookStatus.SUCCESS}
},
"trigger": {
"event": {
"action": (
"dummy_hook", {"status": consts.HookStatus.SUCCESS}
),
"trigger": (
"event", {
"unit": "iteration",
"at": [1],
}
}
}}
)
}
]
}
self.task = mock.MagicMock()
@@ -78,7 +78,7 @@ class HookExecutorTestCase(test.TestCase):
hook_executor.on_event(event_type="iteration", value=1)
self.assertEqual(
[{"config": self.conf["hooks"][0]["config"],
[{"config": self.conf["hooks"][0],
"results": [{
"triggered_by": {"event_type": "iteration", "value": 1},
"started_at": fakes.FakeTimer().timestamp(),
@@ -90,15 +90,16 @@ class HookExecutorTestCase(test.TestCase):
@mock.patch("rally.task.hook.HookExecutor._timer_method")
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_result_optional(self, mock_timer, mock__timer_method):
hook_args = list(self.conf["hooks"][0]["config"]["action"].values())[0]
hook_args = self.conf["hooks"][0]["action"][1]
hook_args["error"] = ["Exception", "Description", "Traceback"]
hook_args["output"] = {"additive": None, "complete": None}
hook_executor = hook.HookExecutor(self.conf, self.task)
hook_executor.on_event(event_type="iteration", value=1)
self.assertEqual(
[{"config": self.conf["hooks"][0]["config"],
[{"config": self.conf["hooks"][0],
"results": [{
"triggered_by": {"event_type": "iteration", "value": 1},
"started_at": fakes.FakeTimer().timestamp(),
@@ -112,7 +113,7 @@ class HookExecutorTestCase(test.TestCase):
def test_empty_result(self):
hook_executor = hook.HookExecutor(self.conf, self.task)
self.assertEqual([{"config": self.conf["hooks"][0]["config"],
self.assertEqual([{"config": self.conf["hooks"][0],
"results": [],
"summary": {}}],
hook_executor.results())
@@ -127,7 +128,7 @@ class HookExecutorTestCase(test.TestCase):
hook_executor.on_event(event_type="iteration", value=1)
self.assertEqual(
[{"config": self.conf["hooks"][0]["config"],
[{"config": self.conf["hooks"][0],
"results": [{
"triggered_by": {"event_type": "iteration", "value": 1},
"error": {"etype": "Exception",
@@ -140,15 +141,14 @@ class HookExecutorTestCase(test.TestCase):
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_time_event(self, mock_timer):
trigger_args = list(
self.conf["hooks"][0]["config"]["trigger"].values())[0]
trigger_args = self.conf["hooks"][0]["trigger"][1]
trigger_args["unit"] = "time"
hook_executor = hook.HookExecutor(self.conf, self.task)
hook_executor.on_event(event_type="time", value=1)
self.assertEqual(
[{"config": self.conf["hooks"][0]["config"],
[{"config": self.conf["hooks"][0],
"results": [{
"triggered_by": {"event_type": "time", "value": 1},
"started_at": fakes.FakeTimer().timestamp(),
@@ -159,8 +159,8 @@ class HookExecutorTestCase(test.TestCase):
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_time_periodic(self, mock_timer):
self.conf["hooks"][0]["config"]["trigger"] = {
"periodic": {"unit": "time", "step": 2}}
self.conf["hooks"][0]["trigger"] = ("periodic",
{"unit": "time", "step": 2})
hook_executor = hook.HookExecutor(self.conf, self.task)
for i in range(1, 7):
@@ -168,7 +168,7 @@ class HookExecutorTestCase(test.TestCase):
self.assertEqual(
[{
"config": self.conf["hooks"][0]["config"],
"config": self.conf["hooks"][0],
"results":[
{
"triggered_by": {"event_type": "time", "value": 2},
@@ -196,8 +196,7 @@ class HookExecutorTestCase(test.TestCase):
@mock.patch("rally.common.utils.Stopwatch", autospec=True)
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_timer_thread(self, mock_timer, mock_stopwatch):
trigger_args = list(
self.conf["hooks"][0]["config"]["trigger"].values())[0]
trigger_args = self.conf["hooks"][0]["trigger"][1]
trigger_args["unit"] = "time"
hook_executor = hook.HookExecutor(self.conf, self.task)
@@ -212,7 +211,7 @@ class HookExecutorTestCase(test.TestCase):
self.assertTrue(hook_executor._timer_stop_event.wait(1))
self.assertEqual(
[{"config": self.conf["hooks"][0]["config"],
[{"config": self.conf["hooks"][0],
"results": [{
"triggered_by": {"event_type": "time", "value": 1},
"started_at": fakes.FakeTimer().timestamp(),
@@ -316,8 +315,8 @@ class TriggerTestCase(test.TestCase):
# test_on_event and test_get_results in one test.
right_values = [5, 7, 12, 13]
cfg = {"trigger": {self.DummyTrigger.get_name(): right_values},
"action": {"fake": {}}}
cfg = {"trigger": (self.DummyTrigger.get_name(), right_values),
"action": ("fake", {})}
task = mock.MagicMock()
hook_cls = mock.MagicMock(__name__="fake")
dummy_trigger = self.DummyTrigger(cfg, task, hook_cls)

View File

@@ -36,7 +36,7 @@ class TriggerTestCase(test.TestCase):
@mock.patch("rally.task.trigger.LOG.warning")
def test_warning(self, mock_log_warning):
self.DummyTrigger({"trigger": {self.id(): {}}}, None, None)
self.DummyTrigger({"trigger": (self.id(), {})}, None, None)
mock_log_warning.assert_called_once_with(
"Please contact Rally plugin maintainer. The plugin '%s'"
@@ -52,8 +52,8 @@ class TriggerTestCase(test.TestCase):
descr = "descr"
trigger_obj = self.DummyTrigger({
"trigger": {trigger_name: trigger_cfg},
"action": {action_name: action_cfg},
"trigger": (trigger_name, trigger_cfg),
"action": (action_name, action_cfg),
"description": descr}, None, None)
self.assertEqual(

View File

@@ -477,6 +477,7 @@ class TaskAPITestCase(test.TestCase):
"start_time": 23.77,
"position": 77,
"runner": "runner-config",
"runner_type": "runner-type",
"context": "ctx-config",
"hooks": "hooks-config",
"sla": "sla-config",
@@ -507,6 +508,7 @@ class TaskAPITestCase(test.TestCase):
sub_task.add_workload.assert_called_once_with(
name=workload["name"], description=workload["description"],
position=workload["position"], runner=workload["runner"],
runner_type=workload["runner_type"],
context=workload["context"], sla=workload["sla"],
hooks=workload["hooks"], args=workload["args"]
)
@@ -538,6 +540,7 @@ class TaskAPITestCase(test.TestCase):
"start_time": 23.77,
"position": 77,
"runner": "runner-config",
"runner_type": "runner-type",
"context": "ctx-config",
"hooks": "hooks-config",
"sla": "sla-config",
@@ -569,6 +572,7 @@ class TaskAPITestCase(test.TestCase):
sub_task.add_workload.assert_called_once_with(
name=workload["name"], description=workload["description"],
position=workload["position"], runner=workload["runner"],
runner_type=workload["runner_type"],
context=workload["context"], sla=workload["sla"],
hooks=workload["hooks"], args=workload["args"]
)