Input task templates and task cmd cleanup
Implement task templates based on jinja2. This allow us to pass as a task jinja2 template and it's argument via arguments --task-args and --task-args-file that should be dict in JSON or YAML presentations. So now command looks like: rally task start <file> --task-args <template-args-json-or-yaml> \ --task-args-file <file-with-args-in-json-yaml> If both --task-args and --task-args-file then file dict is updated by task args file. Extend rally CI performance job. Now we can set template args via file with name: ${TASK}_args.yaml Bonus: * Better message on InvalidTask format * Remove redudant catch of "keyboardinterrupt" it should be implement in different way. * Replace ' -> " in rally.cmd.commands.task and tests.unit.cmd.commands.task * Imporve a bit CLI messages on rally task start * Remove old plot2html command (it's enough deprecated) * Improve test coverage of rally/cmd/commands/task * Fix rally/cmd/commands/validate return 1 if bad format * Write errors to stderr (in whole cmd/commands/task.py) Change-Id: I7dadf2986bb10407865bc73bb2fb8c96a5162d9a
This commit is contained in:
parent
3a0e9b20f8
commit
a2d28d1f9f
@ -1,3 +1,4 @@
|
||||
{% set image_name = "^cirros.*uec$" %}
|
||||
---
|
||||
Dummy.dummy:
|
||||
-
|
||||
@ -302,7 +303,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 4
|
||||
@ -321,7 +322,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 4
|
||||
@ -340,7 +341,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 4
|
||||
@ -362,7 +363,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
detailed: True
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -382,7 +383,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
actions:
|
||||
-
|
||||
hard_reboot: 1
|
||||
@ -410,7 +411,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
volume_size: 1
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -430,7 +431,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
volume_size: 1
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -450,7 +451,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 3
|
||||
@ -469,7 +470,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
to_flavor:
|
||||
name: "m1.small"
|
||||
confirm: true
|
||||
@ -549,7 +550,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
security_group_count: 5
|
||||
rules_per_security_group: 5
|
||||
runner:
|
||||
@ -658,7 +659,7 @@
|
||||
args:
|
||||
size: 1
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
runner:
|
||||
@ -690,7 +691,7 @@
|
||||
users_per_tenant: 1
|
||||
servers:
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
servers_per_tenant: 2
|
||||
@ -713,7 +714,7 @@
|
||||
users_per_tenant: 1
|
||||
servers:
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
servers_per_tenant: 1
|
||||
@ -741,7 +742,7 @@
|
||||
users_per_tenant: 1
|
||||
servers:
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
servers_per_tenant: 2
|
||||
|
@ -746,7 +746,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 3
|
||||
@ -764,7 +764,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 3
|
||||
@ -785,7 +785,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
force_delete: true
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -805,7 +805,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
detailed: True
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -835,7 +835,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
servers_per_tenant: 2
|
||||
sla:
|
||||
failure_rate:
|
||||
@ -847,7 +847,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
to_flavor:
|
||||
name: "m1.small"
|
||||
confirm: true
|
||||
@ -869,7 +869,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
actions:
|
||||
-
|
||||
hard_reboot: 1
|
||||
@ -895,7 +895,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
volume_size: 1
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -915,7 +915,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
volume_size: 1
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -935,7 +935,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 2
|
||||
@ -954,7 +954,7 @@
|
||||
flavor:
|
||||
name: "^ram64$"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
auto_assign_nics: false
|
||||
runner:
|
||||
type: "constant"
|
||||
@ -976,7 +976,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 3
|
||||
@ -995,7 +995,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
fixed_network: "private"
|
||||
floating_network: "public"
|
||||
use_floatingip: true
|
||||
@ -1019,7 +1019,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
volume_args:
|
||||
size: 2
|
||||
fixed_network: "private"
|
||||
@ -1045,7 +1045,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
fixed_network: "private"
|
||||
use_floatingip: false
|
||||
script: "/home/jenkins/.rally/extra/instance_dd_test.sh"
|
||||
@ -1122,7 +1122,7 @@
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
name: {{image_name}}
|
||||
security_group_count: 5
|
||||
rules_per_security_group: 5
|
||||
runner:
|
||||
|
3
rally-jobs/rally_args.yaml
Normal file
3
rally-jobs/rally_args.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
|
||||
image_name: "^cirros.*uec$"
|
35
rally/api.py
35
rally/api.py
@ -13,6 +13,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import re
|
||||
|
||||
import jinja2
|
||||
import jinja2.meta
|
||||
import jsonschema
|
||||
|
||||
from rally.benchmark import engine
|
||||
@ -90,6 +95,36 @@ def recreate_deploy(deployment):
|
||||
deployment.update_endpoints(endpoints)
|
||||
|
||||
|
||||
def task_template_render(task_template, **kwargs):
|
||||
"""Render jinja2 task template to Rally input task.
|
||||
|
||||
:param task_template: String that contains template
|
||||
:param kwargs: Dict with template arguments
|
||||
:returns: rendered template str
|
||||
"""
|
||||
ast = jinja2.Environment().parse(task_template)
|
||||
required_kwargs = jinja2.meta.find_undeclared_variables(ast)
|
||||
|
||||
missing = set(required_kwargs) - set(kwargs)
|
||||
# NOTE(boris-42): Removing variables that have default values from missing.
|
||||
# Construction that won't be properly checked is
|
||||
# {% set x = x or 1}
|
||||
real_missing = []
|
||||
for mis in missing:
|
||||
if not re.search(mis.join(["{%\s*set\s+", "\s*=\s*", "[^\w]+"]),
|
||||
task_template):
|
||||
real_missing.append(mis)
|
||||
|
||||
if real_missing:
|
||||
multi_msg = _("Please specify next template task arguments: %s")
|
||||
single_msg = _("Please specify template task argument: %s")
|
||||
|
||||
raise TypeError((len(real_missing) > 1 and multi_msg or single_msg) %
|
||||
", ".join(real_missing))
|
||||
|
||||
return jinja2.Template(task_template).render(**kwargs)
|
||||
|
||||
|
||||
def create_task(deployment, tag):
|
||||
"""Create a task without starting it.
|
||||
|
||||
|
@ -65,9 +65,9 @@ def _run_scenario_once(args):
|
||||
|
||||
context["iteration"] = iteration
|
||||
scenario = cls(
|
||||
context=context,
|
||||
admin_clients=osclients.Clients(context["admin"]["endpoint"]),
|
||||
clients=osclients.Clients(context["user"]["endpoint"]))
|
||||
context=context,
|
||||
admin_clients=osclients.Clients(context["admin"]["endpoint"]),
|
||||
clients=osclients.Clients(context["user"]["endpoint"]))
|
||||
|
||||
error = []
|
||||
scenario_output = {"errors": "", "data": {}}
|
||||
|
@ -40,6 +40,19 @@ LOG = logging.getLogger(__name__)
|
||||
MARGIN = 3
|
||||
|
||||
|
||||
def make_header(text, size=80, symbol="-"):
|
||||
"""Unified way to make header message to CLI.
|
||||
|
||||
:param text: what text to write
|
||||
:param size: Length of header decorative line
|
||||
:param symbol: What symbol to use to create header
|
||||
"""
|
||||
header = symbol * size + "\n"
|
||||
header += " %s\n" % text
|
||||
header += symbol * size + "\n"
|
||||
return header
|
||||
|
||||
|
||||
class CategoryParser(argparse.ArgumentParser):
|
||||
|
||||
"""Customized arguments parser
|
||||
@ -190,9 +203,9 @@ def _add_command_parsers(categories, subparsers):
|
||||
for action, action_fn in _methods_of(command_object):
|
||||
descr = _compose_action_description(action_fn)
|
||||
parser = category_subparsers.add_parser(
|
||||
action,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=descr, help=descr)
|
||||
action,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=descr, help=descr)
|
||||
|
||||
action_kwargs = []
|
||||
for args, kwargs in getattr(action_fn, 'args', []):
|
||||
@ -209,8 +222,9 @@ def _add_command_parsers(categories, subparsers):
|
||||
|
||||
|
||||
def validate_deprecated_args(argv, fn):
|
||||
if len(argv) > 3 and argv[2] == fn.func_name and not getattr(
|
||||
fn, "deprecated_args", []) == list():
|
||||
if (len(argv) > 3
|
||||
and (argv[2] == fn.func_name)
|
||||
and getattr(fn, "deprecated_args", None)):
|
||||
for item in fn.deprecated_args:
|
||||
if item in argv[3:]:
|
||||
LOG.warning("Deprecated argument %s for %s." % (item,
|
||||
|
@ -19,6 +19,7 @@ from __future__ import print_function
|
||||
import json
|
||||
import os
|
||||
import pprint
|
||||
import sys
|
||||
import webbrowser
|
||||
|
||||
import jsonschema
|
||||
@ -41,79 +42,179 @@ from rally import objects
|
||||
from rally.openstack.common import cliutils as common_cliutils
|
||||
|
||||
|
||||
class FailedToLoadTask(exceptions.RallyException):
|
||||
msg_fmt = _("Failed to load task")
|
||||
|
||||
|
||||
class TaskCommands(object):
|
||||
"""Task management.
|
||||
|
||||
Set of commands that allow you to manage benchmarking tasks and results.
|
||||
"""
|
||||
|
||||
def _load_task(self, task_file, task_args=None, task_args_file=None):
|
||||
"""Load tasks template from file and render it with passed args.
|
||||
|
||||
:param task_file: Path to file with input task
|
||||
:param task_args: JSON or YAML representation of dict with args that
|
||||
will be used to render input task with jinja2
|
||||
:param task_args_file: Path to file with JSON or YAML representation
|
||||
of dict, that will be used to render input
|
||||
with jinja2. If both specified task_args and
|
||||
task_args_file they will be merged. task_args
|
||||
has bigger priority so it will update values
|
||||
from task_args_file.
|
||||
:returns: Str with loaded and rendered task
|
||||
"""
|
||||
print(cliutils.make_header("Preparing input task"))
|
||||
|
||||
def print_invalid_header(source_name, args):
|
||||
print(_("Invalid %(source)s passed: \n\n %(args)s \n")
|
||||
% {"source": source_name, "args": args},
|
||||
file=sys.stderr)
|
||||
|
||||
def parse_task_args(src_name, args):
|
||||
try:
|
||||
kw = args and yaml.safe_load(args)
|
||||
kw = {} if kw is None else kw
|
||||
except yaml.parser.ParserError as e:
|
||||
print_invalid_header(src_name, args)
|
||||
print(_("%(source)s has to be YAML or JSON. Details:"
|
||||
"\n\n%(err)s\n")
|
||||
% {"source": src_name, "err": e},
|
||||
file=sys.stderr)
|
||||
raise TypeError()
|
||||
|
||||
if not isinstance(kw, dict):
|
||||
print_invalid_header(src_name, args)
|
||||
print(_("%(src)s has to be dict, actually %(src_type)s\n")
|
||||
% {"src": src_name, "src_type": type(kw)},
|
||||
file=sys.stderr)
|
||||
raise TypeError()
|
||||
return kw
|
||||
|
||||
try:
|
||||
kw = {}
|
||||
if task_args_file:
|
||||
with open(task_args_file) as f:
|
||||
kw.update(parse_task_args("task_args_file", f.read()))
|
||||
kw.update(parse_task_args("task_args", task_args))
|
||||
except TypeError:
|
||||
raise FailedToLoadTask()
|
||||
|
||||
with open(task_file) as f:
|
||||
try:
|
||||
input_task = f.read()
|
||||
rendered_task = api.task_template_render(input_task, **kw)
|
||||
except Exception as e:
|
||||
print(_("Failed to render task template:\n%(task)s\n%(err)s\n")
|
||||
% {"task": input_task, "err": e},
|
||||
file=sys.stderr)
|
||||
raise FailedToLoadTask()
|
||||
|
||||
print(_("Input task is:\n%s\n") % rendered_task)
|
||||
try:
|
||||
return yaml.safe_load(rendered_task)
|
||||
except Exception as e:
|
||||
print(_("Wrong format of rendered input task. It should be "
|
||||
"YAML or JSON.\n%s") % e,
|
||||
file=sys.stderr)
|
||||
raise FailedToLoadTask()
|
||||
|
||||
@cliutils.deprecated_args(
|
||||
"--deploy-id", dest="deployment", type=str,
|
||||
required=False, help="UUID of the deployment.")
|
||||
@cliutils.args('--deployment', type=str, dest='deployment',
|
||||
required=False, help='UUID or name of the deployment')
|
||||
@cliutils.args('--task', '--filename',
|
||||
help='Path to the file with full configuration of task')
|
||||
@cliutils.args("--deployment", type=str, dest="deployment",
|
||||
required=False, help="UUID or name of the deployment")
|
||||
@cliutils.args("--task", "--filename",
|
||||
help="Path to the file with full configuration of task")
|
||||
@cliutils.args("--task-args", dest="task_args",
|
||||
help="Input task args (dict in json). These args are used "
|
||||
"to render input task that is jinja2 template.")
|
||||
@cliutils.args("--task-args-file", dest="task_args_file",
|
||||
help="Path to the file with input task args (dict in "
|
||||
"json/yaml). These args are used to render input "
|
||||
"task that is jinja2 template.")
|
||||
@envutils.with_default_deployment
|
||||
def validate(self, task, deployment=None):
|
||||
def validate(self, task, deployment=None, task_args=None,
|
||||
task_args_file=None):
|
||||
"""Validate a task configuration file.
|
||||
|
||||
This will check that task configuration file has valid syntax and
|
||||
all required options of scenarios, contexts, SLA and runners are set.
|
||||
|
||||
:param task: a file with yaml/json configration
|
||||
:param task: a file with yaml/json task
|
||||
:param task_args: Input task args (dict in json/yaml). These args are
|
||||
used to render input task that is jinja2 template.
|
||||
:param task_args_file: File with input task args (dict in json/yaml).
|
||||
These args are used to render input task that
|
||||
is jinja2 template.
|
||||
:param deployment: UUID or name of a deployment
|
||||
"""
|
||||
|
||||
task = os.path.expanduser(task)
|
||||
with open(task, "rb") as task_file:
|
||||
config_dict = yaml.safe_load(task_file.read())
|
||||
try:
|
||||
api.task_validate(deployment, config_dict)
|
||||
input_task = self._load_task(task, task_args, task_args_file)
|
||||
except FailedToLoadTask:
|
||||
return(1)
|
||||
|
||||
try:
|
||||
api.task_validate(deployment, input_task)
|
||||
print("Task config is valid :)")
|
||||
except exceptions.InvalidTaskException as e:
|
||||
print("Task config is invalid: \n")
|
||||
print(e)
|
||||
return(1)
|
||||
|
||||
@cliutils.deprecated_args(
|
||||
"--deploy-id", dest="deployment", type=str,
|
||||
required=False, help="UUID of the deployment.")
|
||||
@cliutils.args('--deployment', type=str, dest='deployment',
|
||||
required=False, help='UUID or name of the deployment')
|
||||
@cliutils.args('--task', '--filename',
|
||||
help='Path to the file with full configuration of task')
|
||||
@cliutils.args('--tag',
|
||||
help='Tag for this task')
|
||||
@cliutils.args('--no-use', action='store_false', dest='do_use',
|
||||
help='Don\'t set new task as default for future operations')
|
||||
@cliutils.args("--deployment", type=str, dest="deployment",
|
||||
required=False, help="UUID or name of the deployment")
|
||||
@cliutils.args("--task", "--filename", help="Path to the input task file")
|
||||
@cliutils.args("--task-args", dest="task_args",
|
||||
help="Input task args (dict in json). These args are used "
|
||||
"to render input task that is jinja2 template.")
|
||||
@cliutils.args("--task-args-file", dest="task_args_file",
|
||||
help="Path to the file with input task args (dict in "
|
||||
"json/yaml). These args are used to render input "
|
||||
"task that is jinja2 template.")
|
||||
@cliutils.args("--tag", help="Tag for this task")
|
||||
@cliutils.args("--no-use", action="store_false", dest="do_use",
|
||||
help="Don't set new task as default for future operations")
|
||||
@envutils.with_default_deployment
|
||||
def start(self, task, deployment=None, tag=None, do_use=False):
|
||||
def start(self, task, deployment=None, task_args=None, task_args_file=None,
|
||||
tag=None, do_use=False):
|
||||
"""Start benchmark task.
|
||||
|
||||
:param task: a file with yaml/json configration
|
||||
:param task: a file with yaml/json task
|
||||
:param task_args: Input task args (dict in json/yaml). These args are
|
||||
used to render input task that is jinja2 template.
|
||||
:param task_args_file: File with input task args (dict in json/yaml).
|
||||
These args are used to render input task that
|
||||
is jinja2 template.
|
||||
:param deployment: UUID or name of a deployment
|
||||
:param tag: optional tag for this task
|
||||
"""
|
||||
task = os.path.expanduser(task)
|
||||
with open(task, 'rb') as task_file:
|
||||
config_dict = yaml.safe_load(task_file.read())
|
||||
try:
|
||||
task = api.create_task(deployment, tag)
|
||||
print("=" * 80)
|
||||
print(_("Task %(tag)s %(uuid)s is started")
|
||||
% {"uuid": task["uuid"], "tag": task["tag"]})
|
||||
print("-" * 80)
|
||||
api.start_task(deployment, config_dict, task=task)
|
||||
self.detailed(task_id=task['uuid'])
|
||||
if do_use:
|
||||
use.UseCommands().task(task['uuid'])
|
||||
except exceptions.InvalidConfigException:
|
||||
return(1)
|
||||
except KeyboardInterrupt:
|
||||
api.abort_task(task['uuid'])
|
||||
raise
|
||||
try:
|
||||
input_task = self._load_task(task, task_args, task_args_file)
|
||||
except FailedToLoadTask:
|
||||
return(1)
|
||||
|
||||
@cliutils.args('--uuid', type=str, dest='task_id', help='UUID of task')
|
||||
try:
|
||||
task = api.create_task(deployment, tag)
|
||||
print(cliutils.make_header(
|
||||
_("Task %(tag)s %(uuid)s: started")
|
||||
% {"uuid": task["uuid"], "tag": task["tag"]}))
|
||||
print("Benchmarking... This can take a while...\n")
|
||||
print("To track task status use:\n")
|
||||
print("\trally task status\n\tor\n\trally task detailed\n")
|
||||
api.start_task(deployment, input_task, task=task)
|
||||
self.detailed(task_id=task["uuid"])
|
||||
if do_use:
|
||||
use.UseCommands().task(task["uuid"])
|
||||
except exceptions.InvalidConfigException:
|
||||
return(1)
|
||||
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
|
||||
@envutils.with_default_task_id
|
||||
def abort(self, task_id=None):
|
||||
"""Abort started benchmarking task.
|
||||
@ -123,7 +224,7 @@ class TaskCommands(object):
|
||||
|
||||
api.abort_task(task_id)
|
||||
|
||||
@cliutils.args('--uuid', type=str, dest='task_id', help='UUID of task')
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
|
||||
@envutils.with_default_task_id
|
||||
def status(self, task_id=None):
|
||||
"""Display current status of task.
|
||||
@ -133,16 +234,15 @@ class TaskCommands(object):
|
||||
"""
|
||||
|
||||
task = db.task_get(task_id)
|
||||
print(_("Task %(task_id)s is %(status)s.")
|
||||
% {'task_id': task_id, 'status': task['status']})
|
||||
print(_("Task %(task_id)s: %(status)s")
|
||||
% {"task_id": task_id, "status": task["status"]})
|
||||
|
||||
@cliutils.args(
|
||||
'--uuid', type=str, dest='task_id',
|
||||
help=('uuid of task, if --uuid is "last" results of most '
|
||||
'recently created task will be displayed.'))
|
||||
@cliutils.args('--iterations-data', dest='iterations_data',
|
||||
action='store_true',
|
||||
help='print detailed results for each iteration')
|
||||
@cliutils.args("--uuid", type=str, dest="task_id",
|
||||
help=("uuid of task, if --uuid is \"last\" results of most "
|
||||
"recently created task will be displayed."))
|
||||
@cliutils.args("--iterations-data", dest="iterations_data",
|
||||
action="store_true",
|
||||
help="print detailed results for each iteration")
|
||||
@envutils.with_default_task_id
|
||||
def detailed(self, task_id=None, iterations_data=False):
|
||||
"""Display results table.
|
||||
@ -188,19 +288,15 @@ class TaskCommands(object):
|
||||
formatters=formatters)
|
||||
print()
|
||||
|
||||
if task_id == "last":
|
||||
task = db.task_get_detailed_last()
|
||||
task_id = task.uuid
|
||||
else:
|
||||
task = db.task_get_detailed(task_id)
|
||||
task = db.task_get_detailed(task_id)
|
||||
|
||||
if task is None:
|
||||
print("The task %s can not be found" % task_id)
|
||||
return(1)
|
||||
|
||||
print()
|
||||
print("=" * 80)
|
||||
print(_("Task %(task_id)s is %(status)s.")
|
||||
print("-" * 80)
|
||||
print(_("Task %(task_id)s: %(status)s")
|
||||
% {"task_id": task_id, "status": task["status"]})
|
||||
|
||||
if task["failed"]:
|
||||
@ -226,7 +322,6 @@ class TaskCommands(object):
|
||||
print("args values:")
|
||||
pprint.pprint(key["kw"])
|
||||
|
||||
scenario_time = result["data"]["load_duration"]
|
||||
raw = result["data"]["raw"]
|
||||
table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)",
|
||||
"90 percentile", "95 percentile", "success",
|
||||
@ -261,8 +356,8 @@ class TaskCommands(object):
|
||||
if iterations_data:
|
||||
_print_iterations_data(raw)
|
||||
|
||||
print(_("Whole scenario time without context preparation: "),
|
||||
scenario_time)
|
||||
print(_("Load duration: %s") % result["data"]["load_duration"])
|
||||
print(_("Full duration: %s") % result["data"]["full_duration"])
|
||||
|
||||
# NOTE(hughsaunders): ssrs=scenario specific results
|
||||
ssrs = []
|
||||
@ -293,7 +388,7 @@ class TaskCommands(object):
|
||||
utils.percentile(values, 0.90),
|
||||
utils.percentile(values, 0.95)]
|
||||
else:
|
||||
row = [str(key)] + ['n/a'] * 5
|
||||
row = [str(key)] + ["n/a"] * 5
|
||||
table_rows.append(rutils.Struct(**dict(zip(headers, row))))
|
||||
print("\nScenario Specific Results\n")
|
||||
common_cliutils.print_list(table_rows,
|
||||
@ -313,7 +408,7 @@ class TaskCommands(object):
|
||||
print(_("* To get raw JSON output of task results, run:"))
|
||||
print("\trally task results %s\n" % task["uuid"])
|
||||
|
||||
@cliutils.args('--uuid', type=str, dest='task_id', help='uuid of task')
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
|
||||
@envutils.with_default_task_id
|
||||
def results(self, task_id=None):
|
||||
"""Display raw task results.
|
||||
@ -335,15 +430,15 @@ class TaskCommands(object):
|
||||
print(_("The task %s can not be found") % task_id)
|
||||
return(1)
|
||||
|
||||
@cliutils.args('--deployment', type=str, dest='deployment',
|
||||
help='List tasks from specified deployment.'
|
||||
'By default tasks listed from active deployment.')
|
||||
@cliutils.args('--all-deployments', action='store_true',
|
||||
dest='all_deployments',
|
||||
help='List tasks from all deployments.')
|
||||
@cliutils.args('--status', type=str, dest='status',
|
||||
help='List tasks with specified status.'
|
||||
' Available statuses: %s' % ', '.join(consts.TaskStatus))
|
||||
@cliutils.args("--deployment", type=str, dest="deployment",
|
||||
help="List tasks from specified deployment."
|
||||
"By default tasks listed from active deployment.")
|
||||
@cliutils.args("--all-deployments", action="store_true",
|
||||
dest="all_deployments",
|
||||
help="List tasks from all deployments.")
|
||||
@cliutils.args("--status", type=str, dest="status",
|
||||
help="List tasks with specified status."
|
||||
" Available statuses: %s" % ", ".join(consts.TaskStatus))
|
||||
@envutils.with_default_deployment
|
||||
def list(self, deployment=None, all_deployments=False, status=None):
|
||||
"""List tasks, started and finished.
|
||||
@ -357,16 +452,17 @@ class TaskCommands(object):
|
||||
:param all_deployments: display tasks from all deployments
|
||||
"""
|
||||
|
||||
filters = dict()
|
||||
filters = {}
|
||||
headers = ["uuid", "deployment_name", "created_at", "status",
|
||||
"failed", "tag"]
|
||||
|
||||
if status in consts.TaskStatus:
|
||||
filters.setdefault("status", status)
|
||||
elif status is not None:
|
||||
elif status:
|
||||
print(_("Error: Invalid task status '%s'.\n"
|
||||
"Available statuses: %s") % (
|
||||
status, ", ".join(consts.TaskStatus)))
|
||||
status, ", ".join(consts.TaskStatus)),
|
||||
file=sys.stderr)
|
||||
return(1)
|
||||
|
||||
if not all_deployments:
|
||||
@ -375,17 +471,16 @@ class TaskCommands(object):
|
||||
task_list = objects.Task.list(**filters)
|
||||
|
||||
if task_list:
|
||||
common_cliutils.print_list(map(lambda x: x.to_dict(), task_list),
|
||||
headers,
|
||||
sortby_index=headers.index(
|
||||
'created_at'))
|
||||
common_cliutils.print_list(
|
||||
map(lambda x: x.to_dict(), task_list),
|
||||
headers, sortby_index=headers.index("created_at"))
|
||||
else:
|
||||
if status:
|
||||
print(_("There are no tasks in '%s' status. "
|
||||
"To run a new task, use:"
|
||||
"To run a new task, use:\n"
|
||||
"\trally task start") % status)
|
||||
else:
|
||||
print(_("There are no tasks. To run a new task, use:"
|
||||
print(_("There are no tasks. To run a new task, use:\n"
|
||||
"\trally task start"))
|
||||
|
||||
@cliutils.args("--tasks", dest="tasks", nargs="+",
|
||||
@ -422,28 +517,26 @@ class TaskCommands(object):
|
||||
result,
|
||||
objects.task.TASK_RESULT_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
msg = _("ERROR: Invalid task result format in %s"
|
||||
) % task_file_or_uuid
|
||||
print(msg)
|
||||
print(_("ERROR: Invalid task result format in %s")
|
||||
% task_file_or_uuid, file=sys.stderr)
|
||||
if logging.is_debug():
|
||||
print(e)
|
||||
print(e, file=sys.stderr)
|
||||
else:
|
||||
print(e.message)
|
||||
print(e.message, file=sys.stderr)
|
||||
return 1
|
||||
|
||||
elif uuidutils.is_uuid_like(task_file_or_uuid):
|
||||
tasks_results = map(lambda x: {"key": x["key"],
|
||||
"sla": x["data"]["sla"],
|
||||
"result": x["data"]["raw"],
|
||||
"load_duration": x["data"][
|
||||
"load_duration"],
|
||||
"full_duration": x["data"][
|
||||
"full_duration"]},
|
||||
objects.Task.get(
|
||||
task_file_or_uuid).get_results())
|
||||
tasks_results = map(
|
||||
lambda x: {"key": x["key"],
|
||||
"sla": x["data"]["sla"],
|
||||
"result": x["data"]["raw"],
|
||||
"load_duration": x["data"]["load_duration"],
|
||||
"full_duration": x["data"]["full_duration"]},
|
||||
objects.Task.get(task_file_or_uuid).get_results())
|
||||
else:
|
||||
print(_("ERROR: Invalid UUID or file name passed: %s"
|
||||
) % task_file_or_uuid)
|
||||
) % task_file_or_uuid,
|
||||
file=sys.stderr)
|
||||
return 1
|
||||
|
||||
for task_result in tasks_results:
|
||||
@ -462,23 +555,10 @@ class TaskCommands(object):
|
||||
if open_it:
|
||||
webbrowser.open_new_tab("file://" + os.path.realpath(out))
|
||||
|
||||
# NOTE(maretskiy): plot2html is deprecated by `report'
|
||||
# and should be removed later
|
||||
@cliutils.args('--uuid', type=str, dest='task_id', help='uuid of task')
|
||||
@cliutils.args('--out', type=str, dest='out', required=False,
|
||||
help='Path to output file.')
|
||||
@cliutils.args('--open', dest='open_it', action='store_true',
|
||||
help='Open it in browser.')
|
||||
@envutils.with_default_task_id
|
||||
def plot2html(self, task_id=None, out=None, open_it=False):
|
||||
"""Deprecated, use `task report' instead."""
|
||||
print(self.plot2html.__doc__)
|
||||
return self.report(task_id=task_id, out=out, open_it=open_it)
|
||||
|
||||
@cliutils.args('--force', action='store_true', help='force delete')
|
||||
@cliutils.args('--uuid', type=str, dest='task_id', nargs="*",
|
||||
@cliutils.args("--force", action="store_true", help="force delete")
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", nargs="*",
|
||||
metavar="TASK_ID",
|
||||
help='uuid of task or a list of task uuids')
|
||||
help="uuid of task or a list of task uuids")
|
||||
@envutils.with_default_task_id
|
||||
def delete(self, task_id=None, force=False):
|
||||
"""Delete task and its results.
|
||||
|
@ -105,11 +105,10 @@ class NotFoundScenarios(InvalidTaskException):
|
||||
|
||||
|
||||
class InvalidBenchmarkConfig(InvalidTaskException):
|
||||
msg_fmt = _("Task config is invalid.\n"
|
||||
"\tBenchmark %(name)s has wrong configuration at"
|
||||
" position %(pos)s"
|
||||
"\n\tReason: %(reason)s"
|
||||
"\n\tBenchmark configuration: %(config)s")
|
||||
msg_fmt = _("Input task is invalid!\n\n"
|
||||
"Benchmark %(name)s[%(pos)s] has wrong configuration"
|
||||
"\nBenchmark configuration:\n%(config)s\n"
|
||||
"\nReason:\n %(reason)s")
|
||||
|
||||
|
||||
class NotFoundException(RallyException):
|
||||
|
@ -5,6 +5,7 @@ Babel>=1.3
|
||||
decorator>=3.4.0
|
||||
fixtures>=0.3.14
|
||||
iso8601>=0.1.9
|
||||
Jinja2>=2.6 # BSD License (3 clause)
|
||||
jsonschema>=2.0.0,<3.0.0
|
||||
netaddr>=0.7.12
|
||||
oslo.config>=1.6.0 # Apache-2.0
|
||||
|
@ -21,7 +21,14 @@ if [ ! -d $RALLY_JOB_DIR ]; then
|
||||
RALLY_JOB_DIR=$BASE/new/$PROJECT/rally-jobs
|
||||
fi
|
||||
|
||||
SCENARIO=${RALLY_JOB_DIR}/${RALLY_SCENARIO}.yaml
|
||||
BASE_FOR_TASK=${RALLY_JOB_DIR}/${RALLY_SCENARIO}
|
||||
|
||||
TASK=${BASE_FOR_TASK}.yaml
|
||||
TASK_ARGS=""
|
||||
if [ -f ${BASE_FOR_TASK}_args.yaml ]; then
|
||||
TASK_ARGS=" --task-args-file ${BASE_FOR_TASK}_args.yaml"
|
||||
fi
|
||||
|
||||
PLUGINS_DIR=${RALLY_JOB_DIR}/plugins
|
||||
EXTRA_DIR=${RALLY_JOB_DIR}/extra
|
||||
|
||||
@ -48,12 +55,13 @@ rally show images
|
||||
rally show networks
|
||||
rally show secgroups
|
||||
rally show keypairs
|
||||
rally -v task start --task $SCENARIO
|
||||
|
||||
rally -v task start --task $TASK $TASK_ARGS
|
||||
|
||||
mkdir -p rally-plot/extra
|
||||
python $BASE/new/rally/rally/ui/utils.py render\
|
||||
tests/ci/rally-gate/index.mako > rally-plot/extra/index.html
|
||||
cp $SCENARIO rally-plot/task.txt
|
||||
cp $TASK rally-plot/task.txt
|
||||
tar -czf rally-plot/plugins.tar.gz -C $RALLY_PLUGINS_DIR .
|
||||
rally task report --out rally-plot/results.html
|
||||
gzip -9 rally-plot/results.html
|
||||
|
@ -91,11 +91,6 @@ class TaskTestCase(unittest.TestCase):
|
||||
self.assertRaises(utils.RallyCmdError,
|
||||
rally, "task detailed --uuid %s" % FAKE_TASK_UUID)
|
||||
|
||||
def test_plot2html_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
self.assertRaises(utils.RallyCmdError,
|
||||
rally, "task plot2html --uuid %s" % FAKE_TASK_UUID)
|
||||
|
||||
def test_report_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
self.assertRaises(utils.RallyCmdError,
|
||||
@ -131,12 +126,12 @@ class TaskTestCase(unittest.TestCase):
|
||||
html_file = "/tmp/test_plot.html"
|
||||
if os.path.exists(html_file):
|
||||
os.remove(html_file)
|
||||
task_uuids = list()
|
||||
task_uuids = []
|
||||
for i in range(3):
|
||||
res = rally("task start --task %s" % config.filename)
|
||||
for line in res.splitlines():
|
||||
if "finished" in line:
|
||||
task_uuids.append(line.split(" ")[1])
|
||||
task_uuids.append(line.split(" ")[1][:-1])
|
||||
rally("task report --tasks %s --out %s" % (" ".join(task_uuids),
|
||||
html_file))
|
||||
self.assertTrue(os.path.exists(html_file))
|
||||
@ -222,11 +217,12 @@ class TaskTestCase(unittest.TestCase):
|
||||
deployment_id = envutils.get_global("RALLY_DEPLOYMENT")
|
||||
cfg = {"invalid": "config"}
|
||||
config = utils.TaskConfig(cfg)
|
||||
output = rally(("task validate --task %(task_file)s "
|
||||
"--deployment %(deployment_id)s") %
|
||||
{"task_file": config.filename,
|
||||
"deployment_id": deployment_id})
|
||||
self.assertIn("Task config is invalid", output)
|
||||
self.assertRaises(utils.RallyCmdError,
|
||||
rally,
|
||||
("task validate --task %(task_file)s "
|
||||
"--deployment %(deployment_id)s") %
|
||||
{"task_file": config.filename,
|
||||
"deployment_id": deployment_id})
|
||||
|
||||
def test_start(self):
|
||||
rally = utils.Rally()
|
||||
@ -239,7 +235,7 @@ class TaskTestCase(unittest.TestCase):
|
||||
{"task_file": config.filename,
|
||||
"deployment_id": deployment_id})
|
||||
result = re.search(
|
||||
r"(?P<task_id>[0-9a-f\-]{36}) is started", output)
|
||||
r"(?P<task_id>[0-9a-f\-]{36}): started", output)
|
||||
self.assertIsNotNone(result)
|
||||
|
||||
# NOTE(oanufriev): Not implemented
|
||||
@ -283,14 +279,14 @@ class SLATestCase(unittest.TestCase):
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
expected = [
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_seconds_per_iteration",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_failure_percent",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_seconds_per_iteration",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "max_failure_percent",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
@ -68,7 +68,7 @@ class CliUtilsTestCase(unittest.TestCase):
|
||||
{"task_file": config.filename,
|
||||
"deployment_id": deployment_id})
|
||||
result = re.search(
|
||||
r"(?P<uuid>[0-9a-f\-]{36}) is started", output)
|
||||
r"(?P<uuid>[0-9a-f\-]{36}): started", output)
|
||||
uuid = result.group("uuid")
|
||||
self.rally("use task --uuid %s" % uuid)
|
||||
current_task = envutils.get_global("RALLY_TASK")
|
||||
|
@ -13,9 +13,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
import mock
|
||||
|
||||
from rally.cmd.commands import task
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from tests.unit import fakes
|
||||
from tests.unit import test
|
||||
@ -27,78 +30,149 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
super(TaskCommandsTestCase, self).setUp()
|
||||
self.task = task.TaskCommands()
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.TaskCommands.detailed')
|
||||
@mock.patch('rally.api.create_task')
|
||||
@mock.patch('rally.cmd.commands.task.api.start_task')
|
||||
@mock.patch('rally.cmd.commands.task.open',
|
||||
mock.mock_open(read_data='{"some": "json"}'),
|
||||
create=True)
|
||||
def test_start(self, mock_api, mock_create_task,
|
||||
@mock.patch("rally.cmd.commands.task.open", create=True)
|
||||
def test_load_task(self, mock_open):
|
||||
input_task = "{'ab': {{test}}}"
|
||||
input_args = "{'test': 2}"
|
||||
|
||||
# NOTE(boris-42): Such order of files is because we are reading
|
||||
# file with args before file with template.
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data="{'test': 1}").return_value,
|
||||
mock.mock_open(read_data=input_task).return_value
|
||||
]
|
||||
result = self.task._load_task("in_task", task_args_file="in_args_path")
|
||||
self.assertEqual(result, {"ab": 1})
|
||||
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data=input_task).return_value
|
||||
]
|
||||
result = self.task._load_task("in_task", task_args=input_args)
|
||||
self.assertEqual(result, {"ab": 2})
|
||||
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data="{'test': 1}").return_value,
|
||||
mock.mock_open(read_data=input_task).return_value
|
||||
|
||||
]
|
||||
result = self.task._load_task("in_task", task_args=input_args,
|
||||
task_args_file="any_file")
|
||||
self.assertEqual(result, {"ab": 2})
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.open", create=True)
|
||||
def test_load_task_wrong_task_args_file(self, mock_open):
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data="{'test': {}").return_value
|
||||
]
|
||||
self.assertRaises(task.FailedToLoadTask,
|
||||
self.task._load_task,
|
||||
"in_task", task_args_file="in_args_path")
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.open", create=True)
|
||||
def test_load_task_wrong_task_args_file_exception(self, mock_open):
|
||||
mock_open.side_effect = IOError
|
||||
self.assertRaises(IOError, self.task._load_task,
|
||||
"in_task", task_args_file="in_args_path")
|
||||
|
||||
def test_load_task_wrong_input_task_args(self):
|
||||
self.assertRaises(task.FailedToLoadTask,
|
||||
self.task._load_task, "in_task", "{'test': {}")
|
||||
self.assertRaises(task.FailedToLoadTask,
|
||||
self.task._load_task, "in_task", "[]")
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.open", create=True)
|
||||
def test_load_task_task_render_raise_exc(self, mock_open):
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data="{'test': {{t}}}").return_value
|
||||
]
|
||||
self.assertRaises(task.FailedToLoadTask,
|
||||
self.task._load_task, "in_task")
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.open", create=True)
|
||||
def test_load_task_task_not_in_yaml(self, mock_open):
|
||||
mock_open.side_effect = [
|
||||
mock.mock_open(read_data="{'test': {}").return_value
|
||||
]
|
||||
self.assertRaises(task.FailedToLoadTask,
|
||||
self.task._load_task, "in_task")
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands.detailed")
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
|
||||
return_value={"some": "json"})
|
||||
@mock.patch("rally.api.create_task")
|
||||
@mock.patch("rally.cmd.commands.task.api.start_task")
|
||||
def test_start(self, mock_api, mock_create_task, mock_load,
|
||||
mock_task_detailed):
|
||||
mock_create_task.return_value = (
|
||||
dict(uuid='fc1a9bbe-1ead-4740-92b5-0feecf421634',
|
||||
created_at='2014-01-14 09:14:45.395822',
|
||||
status='init', failed=False, tag=None))
|
||||
deployment_id = 'e0617de9-77d1-4875-9b49-9d5789e29f20'
|
||||
self.task.start('path_to_config.json', deployment_id)
|
||||
mock_api.assert_called_once_with(deployment_id, {u'some': u'json'},
|
||||
dict(uuid="c1a9bbe-1ead-4740-92b5-0feecf421634",
|
||||
created_at="2014-01-14 09:14:45.395822",
|
||||
status="init", failed=False, tag=None))
|
||||
deployment_id = "e0617de9-77d1-4875-9b49-9d5789e29f20"
|
||||
task_path = "path_to_config.json"
|
||||
self.task.start(task_path, deployment_id)
|
||||
mock_api.assert_called_once_with(deployment_id, {"some": "json"},
|
||||
task=mock_create_task.return_value)
|
||||
mock_load.assert_called_once_with(task_path, None, None)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.envutils.get_global')
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
|
||||
side_effect=task.FailedToLoadTask)
|
||||
def test_start_with_task_args(self, mock_load):
|
||||
task_path = mock.MagicMock()
|
||||
task_args = mock.MagicMock()
|
||||
task_args_file = mock.MagicMock()
|
||||
self.task.start(task_path, deployment="any", task_args=task_args,
|
||||
task_args_file=task_args_file)
|
||||
mock_load.assert_called_once_with(task_path, task_args, task_args_file)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.envutils.get_global")
|
||||
def test_start_no_deployment_id(self, mock_default):
|
||||
mock_default.side_effect = exceptions.InvalidArgumentsException
|
||||
self.assertRaises(exceptions.InvalidArgumentsException,
|
||||
self.task.start, 'path_to_config.json', None)
|
||||
self.task.start, "path_to_config.json", None)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.TaskCommands.detailed')
|
||||
@mock.patch('rally.api.create_task')
|
||||
@mock.patch('rally.cmd.commands.task.api')
|
||||
@mock.patch('rally.cmd.commands.task.open',
|
||||
mock.mock_open(read_data='{"some": "json"}'),
|
||||
create=True)
|
||||
def test_start_kb_interuupt(self, mock_api, mock_create_task,
|
||||
mock_task_detailed):
|
||||
mock_create_task.return_value = (
|
||||
dict(uuid='fc1a9bbe-1ead-4740-92b5-0feecf421634',
|
||||
created_at='2014-01-14 09:14:45.395822',
|
||||
status='init', failed=False, tag=None))
|
||||
mock_api.start_task.side_effect = KeyboardInterrupt
|
||||
deployment_id = 'f586dcd7-8473-4c2e-a4d4-22be26371c10'
|
||||
self.assertRaises(KeyboardInterrupt, self.task.start,
|
||||
'path_to_config.json', deployment_id)
|
||||
mock_api.abort_task.assert_called_once_with(
|
||||
mock_api.create_task.return_value['uuid'])
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task")
|
||||
@mock.patch("rally.cmd.commands.task.api")
|
||||
def test_start_invalid_task(self, mock_api, mock_load):
|
||||
mock_api.start_task.side_effect = exceptions.InvalidConfigException
|
||||
|
||||
result = self.task.start("task_path", "deployment", tag="tag")
|
||||
self.assertEqual(1, result)
|
||||
|
||||
mock_api.create_task.assert_called_once_with("deployment", "tag")
|
||||
mock_api.start_task.assert_called_once_with(
|
||||
"deployment", mock_load.return_value,
|
||||
task=mock_api.create_task.return_value)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.api")
|
||||
def test_abort(self, mock_api):
|
||||
test_uuid = '17860c43-2274-498d-8669-448eff7b073f'
|
||||
test_uuid = "17860c43-2274-498d-8669-448eff7b073f"
|
||||
mock_api.abort_task = mock.MagicMock()
|
||||
self.task.abort(test_uuid)
|
||||
task.api.abort_task.assert_called_once_with(test_uuid)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.envutils.get_global')
|
||||
@mock.patch("rally.cmd.commands.task.envutils.get_global")
|
||||
def test_abort_no_task_id(self, mock_default):
|
||||
mock_default.side_effect = exceptions.InvalidArgumentsException
|
||||
self.assertRaises(exceptions.InvalidArgumentsException,
|
||||
self.task.abort, None)
|
||||
|
||||
def test_status(self):
|
||||
test_uuid = 'a3e7cefb-bec2-4802-89f6-410cc31f71af'
|
||||
value = {'task_id': "task", "status": "status"}
|
||||
test_uuid = "a3e7cefb-bec2-4802-89f6-410cc31f71af"
|
||||
value = {"task_id": "task", "status": "status"}
|
||||
with mock.patch("rally.cmd.commands.task.db") as mock_db:
|
||||
mock_db.task_get = mock.MagicMock(return_value=value)
|
||||
self.task.status(test_uuid)
|
||||
mock_db.task_get.assert_called_once_with(test_uuid)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.envutils.get_global')
|
||||
@mock.patch("rally.cmd.commands.task.envutils.get_global")
|
||||
def test_status_no_task_id(self, mock_default):
|
||||
mock_default.side_effect = exceptions.InvalidArgumentsException
|
||||
self.assertRaises(exceptions.InvalidArgumentsException,
|
||||
self.task.status, None)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.db')
|
||||
@mock.patch("rally.cmd.commands.task.db")
|
||||
def test_detailed(self, mock_db):
|
||||
test_uuid = 'c0d874d4-7195-4fd5-8688-abe82bfad36f'
|
||||
test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f"
|
||||
value = {
|
||||
"id": "task",
|
||||
"uuid": test_uuid,
|
||||
@ -112,7 +186,54 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
},
|
||||
"data": {
|
||||
"load_duration": 1.0,
|
||||
"raw": []
|
||||
"full_duration": 2.0,
|
||||
"raw": [
|
||||
{
|
||||
"duration": 0.9,
|
||||
"idle_duration": 0.5,
|
||||
"scenario_output": {
|
||||
"data": {
|
||||
"a": 3
|
||||
},
|
||||
"errors": "some"
|
||||
},
|
||||
"atomic_actions": {
|
||||
"a": 0.6,
|
||||
"b": 0.7
|
||||
},
|
||||
"error": ["type", "message", "traceback"]
|
||||
},
|
||||
{
|
||||
"duration": 0.5,
|
||||
"idle_duration": 0.2,
|
||||
"scenario_output": {
|
||||
"data": {
|
||||
"a": 1
|
||||
},
|
||||
"errors": "some"
|
||||
},
|
||||
"atomic_actions": {
|
||||
"a": 0.2,
|
||||
"b": 0.4
|
||||
},
|
||||
"error": None
|
||||
},
|
||||
{
|
||||
"duration": 0.6,
|
||||
"idle_duration": 0.4,
|
||||
"scenario_output": {
|
||||
"data": {
|
||||
"a": 2
|
||||
},
|
||||
"errors": None
|
||||
},
|
||||
"atomic_actions": {
|
||||
"a": 0.3,
|
||||
"b": 0.5
|
||||
},
|
||||
"error": None
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
@ -122,15 +243,36 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
self.task.detailed(test_uuid)
|
||||
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
|
||||
|
||||
self.task.detailed(test_uuid, iterations_data=True)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.db")
|
||||
@mock.patch("rally.cmd.commands.task.logging")
|
||||
def test_detailed_task_failed(self, mock_logging, mock_db):
|
||||
value = {
|
||||
"id": "task",
|
||||
"uuid": "task_uuid",
|
||||
"status": "status",
|
||||
"results": [],
|
||||
"verification_log": "['1', '2', '3']",
|
||||
"failed": True
|
||||
}
|
||||
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
|
||||
|
||||
mock_logging.is_debug.return_value = False
|
||||
self.task.detailed("task_uuid")
|
||||
|
||||
mock_logging.is_debug.return_value = True
|
||||
self.task.detailed("task_uuid")
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.envutils.get_global")
|
||||
def test_detailed_no_task_id(self, mock_default):
|
||||
mock_default.side_effect = exceptions.InvalidArgumentsException
|
||||
self.assertRaises(exceptions.InvalidArgumentsException,
|
||||
self.task.detailed, None)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.db')
|
||||
@mock.patch("rally.cmd.commands.task.db")
|
||||
def test_detailed_wrong_id(self, mock_db):
|
||||
test_uuid = 'eb290c30-38d8-4c8f-bbcc-fc8f74b004ae'
|
||||
test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
|
||||
mock_db.task_get_detailed = mock.MagicMock(return_value=None)
|
||||
self.task.detailed(test_uuid)
|
||||
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
|
||||
@ -240,14 +382,13 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
|
||||
results = list()
|
||||
for task_uuid in tasks:
|
||||
results.extend(map(lambda x: {"key": x["key"],
|
||||
"result": x["data"]["raw"],
|
||||
"sla": x["data"]["sla"],
|
||||
"load_duration": x[
|
||||
"data"]["load_duration"],
|
||||
"full_duration": x[
|
||||
"data"]["full_duration"]},
|
||||
data))
|
||||
results.extend(
|
||||
map(lambda x: {"key": x["key"],
|
||||
"result": x["data"]["raw"],
|
||||
"sla": x["data"]["sla"],
|
||||
"load_duration": x["data"]["load_duration"],
|
||||
"full_duration": x["data"]["full_duration"]},
|
||||
data))
|
||||
|
||||
mock_results = mock.Mock(return_value=data)
|
||||
mock_get.return_value = mock.Mock(get_results=mock_results)
|
||||
@ -343,9 +484,9 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
out="/tmp/tmp.hsml")
|
||||
self.assertEqual(ret, 1)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.common_cliutils.print_list')
|
||||
@mock.patch('rally.cmd.commands.task.envutils.get_global',
|
||||
return_value='123456789')
|
||||
@mock.patch("rally.cmd.commands.task.common_cliutils.print_list")
|
||||
@mock.patch("rally.cmd.commands.task.envutils.get_global",
|
||||
return_value="123456789")
|
||||
@mock.patch("rally.cmd.commands.task.objects.Task.list",
|
||||
return_value=[fakes.FakeTask(uuid="a",
|
||||
created_at="b",
|
||||
@ -355,9 +496,10 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
deployment_name="some_name")])
|
||||
def test_list(self, mock_objects_list, mock_default, mock_print_list):
|
||||
|
||||
self.task.list()
|
||||
self.task.list(status="running")
|
||||
mock_objects_list.assert_called_once_with(
|
||||
deployment=mock_default.return_value)
|
||||
deployment=mock_default.return_value,
|
||||
status=consts.TaskStatus.RUNNING)
|
||||
|
||||
headers = ["uuid", "deployment_name", "created_at", "status",
|
||||
"failed", "tag"]
|
||||
@ -365,8 +507,25 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
mock_objects_list.return_value, headers,
|
||||
sortby_index=headers.index('created_at'))
|
||||
|
||||
def test_list_wrong_status(self):
|
||||
self.assertEqual(1, self.task.list(deployment="fake",
|
||||
status="wrong non existing status"))
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.objects.Task.list", return_value=[])
|
||||
def test_list_no_results(self, mock_list):
|
||||
self.assertIsNone(
|
||||
self.task.list(deployment="fake", all_deployments=True))
|
||||
mock_list.assert_called_once_with()
|
||||
mock_list.reset_mock()
|
||||
|
||||
self.assertIsNone(
|
||||
self.task.list(deployment="d", status=consts.TaskStatus.RUNNING)
|
||||
)
|
||||
mock_list.assert_called_once_with(deployment="d",
|
||||
status=consts.TaskStatus.RUNNING)
|
||||
|
||||
def test_delete(self):
|
||||
task_uuid = '8dcb9c5e-d60b-4022-8975-b5987c7833f7'
|
||||
task_uuid = "8dcb9c5e-d60b-4022-8975-b5987c7833f7"
|
||||
force = False
|
||||
with mock.patch("rally.cmd.commands.task.api") as mock_api:
|
||||
mock_api.delete_task = mock.Mock()
|
||||
@ -376,11 +535,10 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.api")
|
||||
def test_delete_multiple_uuid(self, mock_api):
|
||||
task_uuids = ['4bf35b06-5916-484f-9547-12dce94902b7',
|
||||
'52cad69d-d3e4-47e1-b445-dec9c5858fe8',
|
||||
'6a3cb11c-ac75-41e7-8ae7-935732bfb48f',
|
||||
'018af931-0e5a-40d5-9d6f-b13f4a3a09fc',
|
||||
'1a4d88c9-fb68-4ff6-a246-f9122aec79b0']
|
||||
task_uuids = ["4bf35b06-5916-484f-9547-12dce94902b7",
|
||||
"52cad69d-d3e4-47e1-b445-dec9c5858fe8",
|
||||
"6a3cb11c-ac75-41e7-8ae7-935732bfb48f",
|
||||
"018af931-0e5a-40d5-9d6f-b13f4a3a09fc"]
|
||||
force = False
|
||||
self.task.delete(task_uuids, force=force)
|
||||
self.assertTrue(mock_api.delete_task.call_count == len(task_uuids))
|
||||
@ -389,8 +547,8 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
self.assertTrue(mock_api.delete_task.mock_calls == expected_calls)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.common_cliutils.print_list")
|
||||
@mock.patch("rally.cmd.commands.task.db")
|
||||
def _test_sla_check(self, mock_db, mock_print_list):
|
||||
@mock.patch("rally.cmd.commands.task.objects.Task.get")
|
||||
def test_sla_check(self, mock_task_get, mock_print_list):
|
||||
data = [{"key": {"name": "fake_name",
|
||||
"pos": "fake_pos",
|
||||
"kw": "fake_kw"},
|
||||
@ -401,14 +559,43 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
"pos": 0,
|
||||
"success": False,
|
||||
"detail": "Max foo, actually bar"}]}}]
|
||||
mock_db.task_result_get_all_by_uuid.return_value = data
|
||||
|
||||
mock_task_get().get_results.return_value = copy.deepcopy(data)
|
||||
result = self.task.sla_check(task_id="fake_task_id")
|
||||
self.assertEqual(1, result)
|
||||
mock_task_get.assert_called_with("fake_task_id")
|
||||
|
||||
data[0]["data"]["sla"][0]["success"] = True
|
||||
mock_task_get().get_results.return_value = data
|
||||
|
||||
result = self.task.sla_check(task_id="fake_task_id", tojson=True)
|
||||
self.assertEqual(0, result)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.open",
|
||||
mock.mock_open(read_data='{"some": "json"}'),
|
||||
mock.mock_open(read_data="{\"some\": \"json\"}"),
|
||||
create=True)
|
||||
@mock.patch("rally.api.task_validate")
|
||||
def test_verify(self, mock_validate):
|
||||
def test_validate(self, mock_validate):
|
||||
self.task.validate("path_to_config.json", "fake_id")
|
||||
mock_validate.assert_called_once_with("fake_id", {"some": "json"})
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
|
||||
side_effect=task.FailedToLoadTask)
|
||||
def test_validate_failed_to_load_task(self, mock_load):
|
||||
args = mock.MagicMock()
|
||||
args_file = mock.MagicMock()
|
||||
|
||||
result = self.task.validate("path_to_task", "fake_id",
|
||||
task_args=args, task_args_file=args_file)
|
||||
self.assertEqual(1, result)
|
||||
mock_load.assert_called_once_with("path_to_task", args, args_file)
|
||||
|
||||
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task")
|
||||
@mock.patch("rally.api.task_validate")
|
||||
def test_validate_invalid(self, mock_task_validate, mock_load):
|
||||
|
||||
mock_task_validate.side_effect = exceptions.InvalidTaskException
|
||||
result = self.task.validate("path_to_task", "deployment")
|
||||
self.assertEqual(1, result)
|
||||
mock_task_validate.assert_called_once_with("deployment",
|
||||
mock_load.return_value)
|
||||
|
@ -54,6 +54,10 @@ class CliUtilsTestCase(test.TestCase):
|
||||
self._unregister_opts()
|
||||
super(CliUtilsTestCase, self).tearDown()
|
||||
|
||||
def test_make_header(self):
|
||||
h1 = cliutils.make_header("msg", size="4", symbol="=")
|
||||
self.assertEqual(h1, "====\n msg\n====\n")
|
||||
|
||||
def test_pretty_float_formatter_rounding(self):
|
||||
test_table_rows = {"test_header": 6.56565}
|
||||
self.__dict__.update(**test_table_rows)
|
||||
|
@ -18,6 +18,7 @@ import traceback
|
||||
import mock
|
||||
import yaml
|
||||
|
||||
from rally import api
|
||||
from rally.benchmark import engine
|
||||
import rally.common.utils as rutils
|
||||
from tests.unit import test
|
||||
@ -38,9 +39,22 @@ class RallyJobsTestCase(test.TestCase):
|
||||
|
||||
with open(full_path) as task_file:
|
||||
try:
|
||||
task_config = yaml.safe_load(task_file.read())
|
||||
eng = engine.BenchmarkEngine(task_config,
|
||||
mock.MagicMock())
|
||||
args_file = os.path.join(
|
||||
self.rally_jobs_path,
|
||||
filename.rsplit(".", 1)[0] + "_args.yaml")
|
||||
|
||||
args = {}
|
||||
if os.path.exists(args_file):
|
||||
args = yaml.safe_load(open(args_file).read())
|
||||
if not isinstance(args, dict):
|
||||
raise TypeError(
|
||||
"args file %s must be dict in yaml or json "
|
||||
"presenatation" % args_file)
|
||||
|
||||
task = api.task_template_render(task_file.read(), **args)
|
||||
task = yaml.safe_load(task)
|
||||
|
||||
eng = engine.BenchmarkEngine(task, mock.MagicMock())
|
||||
eng.validate()
|
||||
except Exception:
|
||||
print(traceback.format_exc())
|
||||
|
@ -89,6 +89,22 @@ class APITestCase(test.TestCase):
|
||||
mock_deployment_get.assert_called_once_with(
|
||||
mock_deployment_get.return_value["uuid"])
|
||||
|
||||
def test_task_template_render(self):
|
||||
self.assertEqual(
|
||||
"3 = 3",
|
||||
api.task_template_render("{{a + b}} = {{c}}", a=1, b=2, c=3))
|
||||
|
||||
def test_task_template_render_default_values(self):
|
||||
template = "{% set a = a or 1 %}{{a + b}} = {{c}}"
|
||||
|
||||
self.assertEqual("3 = 3", api.task_template_render(template, b=2, c=3))
|
||||
|
||||
self.assertEqual(
|
||||
"5 = 5", api.task_template_render(template, a=2, b=3, c=5))
|
||||
|
||||
def test_task_template_render_missing_args(self):
|
||||
self.assertRaises(TypeError, api.task_template_render, "{{a}}")
|
||||
|
||||
@mock.patch("rally.objects.Deployment.get",
|
||||
return_value={'uuid': 'b0d9cd6c-2c94-4417-a238-35c7019d0257'})
|
||||
@mock.patch("rally.objects.Task")
|
||||
|
@ -20,13 +20,12 @@ _rally()
|
||||
OPTS["task_delete"]="--force --uuid"
|
||||
OPTS["task_detailed"]="--uuid --iterations-data"
|
||||
OPTS["task_list"]="--deployment --all-deployments --status"
|
||||
OPTS["task_plot2html"]="--uuid --out --open"
|
||||
OPTS["task_report"]="--tasks --out --open"
|
||||
OPTS["task_results"]="--uuid"
|
||||
OPTS["task_sla_check"]="--uuid --json"
|
||||
OPTS["task_start"]="--deployment --task --tag --no-use"
|
||||
OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use"
|
||||
OPTS["task_status"]="--uuid"
|
||||
OPTS["task_validate"]="--deployment --task"
|
||||
OPTS["task_validate"]="--deployment --task --task-args --task-args-file"
|
||||
OPTS["show_flavors"]="--deployment"
|
||||
OPTS["show_images"]="--deployment"
|
||||
OPTS["show_keypairs"]="--deployment"
|
||||
@ -76,4 +75,4 @@ _rally()
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
complete -F _rally rally
|
||||
complete -F _rally rally
|
||||
|
Loading…
Reference in New Issue
Block a user