[CLI] group commands in help message
In subcommand(for example in verification) we have several groups of methods(managements, launchers, results). It would be nice to split these groups in help message. Before: compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. discover Show a list of discovered tests. genconfig Generate Tempest configuration file. import Import Tempest tests results into the Rally database. install Install Tempest. installplugin Install Tempest plugin. list List verification runs. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. results Display results of verifications. show Display results table of a verification. showconfig Show Tempest configuration file. start Start verification (run Tempest tests). uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. use Set active verification. After: genconfig Generate Tempest configuration file. install Install Tempest. installplugin Install Tempest plugin. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. showconfig Show Tempest configuration file. uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. discover Show a list of discovered tests. start Start verification (run Tempest tests). compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. import-results Import Tempest tests results into the Rally database. list List verification runs. results Display results of verifications. show Display results table of a verification. use Set active verification. Also this change transforms all _ to - in cli methods names. Change-Id: I292e71d159ee35e933119f7fb57209f071aa37d4
This commit is contained in:
parent
11a9c09c2b
commit
f7929d0f6d
@ -40,6 +40,7 @@ _rally()
|
||||
OPTS["task_list"]="--deployment --all-deployments --status --uuids-only"
|
||||
OPTS["task_report"]="--tasks --out --open --html --html-static --junit"
|
||||
OPTS["task_results"]="--uuid"
|
||||
OPTS["task_sla-check"]="--uuid --json"
|
||||
OPTS["task_sla_check"]="--uuid --json"
|
||||
OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure"
|
||||
OPTS["task_status"]="--uuid"
|
||||
|
@ -362,6 +362,24 @@ def deprecated_args(*args, **kwargs):
|
||||
return _decorator
|
||||
|
||||
|
||||
def help_group(uuid):
|
||||
"""Label cli method with specific group.
|
||||
|
||||
Joining methods by groups allows to compose more user-friendly help
|
||||
messages in CLI.
|
||||
|
||||
:param uuid: Name of group to find common methods. It will be used for
|
||||
sorting groups in help message, so you can start uuid with
|
||||
some number (i.e "1_launcher", "2_management") to put groups in proper
|
||||
order. Note: default group had "0" uuid.
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
func.help_group = uuid
|
||||
return func
|
||||
return wrapper
|
||||
|
||||
|
||||
def _methods_of(cls):
|
||||
"""Get all callable methods of a class that don't start with underscore.
|
||||
|
||||
@ -373,6 +391,20 @@ def _methods_of(cls):
|
||||
all_methods = inspect.getmembers(
|
||||
cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))
|
||||
methods = [m for m in all_methods if not m[0].startswith("_")]
|
||||
|
||||
help_groups = {}
|
||||
for m in methods:
|
||||
group = getattr(m[1], "help_group", "0")
|
||||
help_groups.setdefault(group, []).append(m)
|
||||
|
||||
if len(help_groups) > 1:
|
||||
# we should sort methods by groups
|
||||
methods = []
|
||||
for group in sorted(help_groups.items(), key=lambda x: x[0]):
|
||||
if methods:
|
||||
# None -> empty line between groups
|
||||
methods.append((None, None))
|
||||
methods.extend(group[1])
|
||||
return methods
|
||||
|
||||
|
||||
@ -386,10 +418,13 @@ def _compose_category_description(category):
|
||||
description = doc.strip()
|
||||
if descr_pairs:
|
||||
description += "\n\nCommands:\n"
|
||||
sublen = lambda item: len(item[0])
|
||||
sublen = lambda item: len(item[0]) if item[0] else 0
|
||||
first_column_len = max(map(sublen, descr_pairs)) + MARGIN
|
||||
for item in descr_pairs:
|
||||
name = getattr(item[1], "alias", item[0])
|
||||
if item[0] is None:
|
||||
description += "\n"
|
||||
continue
|
||||
name = getattr(item[1], "alias", item[0].replace("_", "-"))
|
||||
if item[1].__doc__:
|
||||
doc = info.parse_docstring(
|
||||
item[1].__doc__)["short_description"]
|
||||
@ -436,6 +471,9 @@ def _add_command_parsers(categories, subparsers):
|
||||
category_subparsers = parser.add_subparsers(dest="action")
|
||||
|
||||
for method_name, method in _methods_of(command_object):
|
||||
if method is None:
|
||||
continue
|
||||
method_name = method_name.replace("_", "-")
|
||||
descr = _compose_action_description(method)
|
||||
parser = category_subparsers.add_parser(
|
||||
getattr(method, "alias", method_name),
|
||||
@ -647,7 +685,7 @@ complete -o filenames -F _rally rally
|
||||
completion = []
|
||||
for category, cmds in main.categories.items():
|
||||
for name, command in _methods_of(cmds):
|
||||
command_name = getattr(command, "alias", name)
|
||||
command_name = getattr(command, "alias", name.replace("_", "-"))
|
||||
args_list = []
|
||||
for arg in getattr(command, "args", []):
|
||||
if getattr(command, "deprecated_args", []):
|
||||
|
@ -739,6 +739,16 @@ class TaskCommands(object):
|
||||
else:
|
||||
_delete_single_task(task_id, force)
|
||||
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
|
||||
@cliutils.args("--json", dest="tojson",
|
||||
action="store_true",
|
||||
help="Output in JSON format.")
|
||||
@envutils.with_default_task_id
|
||||
@cliutils.alias("sla_check")
|
||||
def sla_check_deprecated(self, task_id=None, tojson=False):
|
||||
"""DEPRECATED since Rally 0.8.0, use `rally task sla-check` instead."""
|
||||
return self.sla_check(task_id=task_id, tojson=tojson)
|
||||
|
||||
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
|
||||
@cliutils.args("--json", dest="tojson",
|
||||
action="store_true",
|
||||
|
@ -110,7 +110,7 @@ function run () {
|
||||
# NOTE(stpierre): if the sla check fails, we still want osresources.py
|
||||
# to run, so we turn off -e and save the return value
|
||||
set +e
|
||||
rally task sla_check | tee rally-plot/sla.txt
|
||||
rally task sla-check | tee rally-plot/sla.txt
|
||||
retval=$?
|
||||
set -e
|
||||
|
||||
|
@ -111,7 +111,7 @@ def run_task(task, tags=None):
|
||||
"%s/%s.html" % (pub_dir, task_name)])
|
||||
run(["rally", "task", "results"],
|
||||
stdout="%s/results-%s.json" % (pub_dir, task_name))
|
||||
status = run(["rally", "task", "sla_check"],
|
||||
status = run(["rally", "task", "sla-check"],
|
||||
stdout="%s/%s.sla.txt" % (pub_dir, task_name))
|
||||
run(["rally", "task", "detailed"],
|
||||
stdout="rally-plot/detailed-%s.txt" % task_name)
|
||||
|
@ -206,7 +206,7 @@ class TaskTestCase(unittest.TestCase):
|
||||
def test_sla_check_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
self.assertRaises(utils.RallyCliError,
|
||||
rally, "task sla_check --uuid %s" % FAKE_TASK_UUID)
|
||||
rally, "task sla-check --uuid %s" % FAKE_TASK_UUID)
|
||||
|
||||
def test_status_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
@ -883,13 +883,13 @@ class SLATestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "failure_rate",
|
||||
@ -900,7 +900,7 @@ class SLATestCase(unittest.TestCase):
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"}
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
@ -935,11 +935,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def _test_broken_context(self, runner):
|
||||
@ -963,11 +963,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def test_broken_context_with_constant_runner(self):
|
||||
@ -1012,20 +1012,20 @@ class SLAPerfDegrTestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_degradation=1)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "Dummy.dummy_random_action",
|
||||
"criterion": "performance_degradation",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@ import ddt
|
||||
from keystoneclient import exceptions as keystone_exc
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from six import moves
|
||||
import six
|
||||
import sqlalchemy.exc
|
||||
|
||||
from rally.cli import cliutils
|
||||
@ -226,7 +226,7 @@ class CliUtilsTestCase(test.TestCase):
|
||||
def failed_to_open_file(self):
|
||||
raise IOError("No such file")
|
||||
|
||||
ret = cliutils.run(["rally", "failure", "failed_to_open_file"],
|
||||
ret = cliutils.run(["rally", "failure", "failed-to-open-file"],
|
||||
{"failure": FailuresCommands})
|
||||
self.assertEqual(1, ret)
|
||||
|
||||
@ -237,7 +237,7 @@ class CliUtilsTestCase(test.TestCase):
|
||||
def operational_failure(self):
|
||||
raise sqlalchemy.exc.OperationalError("Can't open DB file")
|
||||
|
||||
ret = cliutils.run(["rally", "failure", "operational_failure"],
|
||||
ret = cliutils.run(["rally", "failure", "operational-failure"],
|
||||
{"failure": SQLAlchemyCommands})
|
||||
self.assertEqual(1, ret)
|
||||
|
||||
@ -376,13 +376,13 @@ class CliUtilsTestCase(test.TestCase):
|
||||
"+---+---+")})
|
||||
@ddt.unpack
|
||||
def test_print_list(self, args, kwargs, expected):
|
||||
out = moves.StringIO()
|
||||
out = six.moves.StringIO()
|
||||
kwargs["out"] = out
|
||||
cliutils.print_list(*args, **kwargs)
|
||||
self.assertEqual(expected, out.getvalue().strip())
|
||||
|
||||
def test_print_list_raises(self):
|
||||
out = moves.StringIO()
|
||||
out = six.moves.StringIO()
|
||||
self.assertRaisesRegexp(
|
||||
ValueError,
|
||||
"Field labels list.*has different number "
|
||||
@ -391,6 +391,54 @@ class CliUtilsTestCase(test.TestCase):
|
||||
[self.TestObj()], ["x"],
|
||||
field_labels=["x", "y"], sortby_index=None, out=out)
|
||||
|
||||
def test_help_for_grouped_methods(self):
|
||||
class SomeCommand(object):
|
||||
@cliutils.help_group("1_manage")
|
||||
def install(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("1_manage")
|
||||
def uninstall(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("1_manage")
|
||||
def reinstall(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("2_launch")
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("2_launch")
|
||||
def rerun(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("3_results")
|
||||
def show(self):
|
||||
pass
|
||||
|
||||
@cliutils.help_group("3_results")
|
||||
def list(self):
|
||||
pass
|
||||
|
||||
def do_do_has_do_has_mesh(self):
|
||||
pass
|
||||
|
||||
self.assertEqual(
|
||||
"\n\nCommands:\n"
|
||||
" do-do-has-do-has-mesh \n"
|
||||
"\n"
|
||||
" install \n"
|
||||
" reinstall \n"
|
||||
" uninstall \n"
|
||||
"\n"
|
||||
" rerun \n"
|
||||
" run \n"
|
||||
"\n"
|
||||
" list \n"
|
||||
" show \n",
|
||||
cliutils._compose_category_description(SomeCommand))
|
||||
|
||||
|
||||
class ValidateArgsTest(test.TestCase):
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user