Add benchmark-context manager
This patch actually mostly finish support of benchmark-context. * Add context manager that allows to use only specified context. Context could be specified through 2 ways: 1) @base.scenario(context={...}) 2) via task config * Support of two types of context: 1) __ctx_hidden__ = True => could be setup only using @base.scenario 2) __ctx_hidden__ = False => could be setup in both ways * Support of validation of context types * Support of order of loading context __ctx_order__ Context manager will use this number to sort context and chose order of creating them. * Unified "cleanup" context to work only via @base.scenario() * Great Rally perf optimization - use only required context * Cleanup decorator is merged to @base.scenario * Add logging for context * Improve NovaScenario._boot_server to add to secgroup allow_ssh only when we are using allow_ssh context * Add unit test that checks that all scenario have proper predefined context config blueprint benchmark-context Change-Id: I4fce466a88075a694a87e72ace72cc522605b72a
This commit is contained in:
parent
a680e0ec57
commit
4f7303fe86
@ -24,18 +24,20 @@ from rally import utils
|
|||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class Context(object):
|
class Context(object):
|
||||||
"""We will use this class in future as a factory for context classes.
|
"""This class is a factory for context classes.
|
||||||
|
|
||||||
It will cover:
|
Every context class should be a subclass of this method and implement
|
||||||
1) Auto discovering
|
2 abstract methods: setup() and cleanup()
|
||||||
2) Validation of input args
|
|
||||||
3) Common logging
|
|
||||||
|
|
||||||
Actually the same functionality as
|
It covers:
|
||||||
runners.base.ScenarioRunner and scenarios.base.Scenario
|
1) proper setting up of context config
|
||||||
|
2) Auto discovering & get by name
|
||||||
|
3) Validation by CONFIG_SCHEMA
|
||||||
|
4) Order of context creation
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__ctx_name__ = "base"
|
__ctx_name__ = "base"
|
||||||
|
__ctx_order__ = 0
|
||||||
|
__ctx_hidden__ = True
|
||||||
|
|
||||||
CONFIG_SCHEMA = {}
|
CONFIG_SCHEMA = {}
|
||||||
|
|
||||||
@ -44,11 +46,11 @@ class Context(object):
|
|||||||
self.context = context
|
self.context = context
|
||||||
self.task = context["task"]
|
self.task = context["task"]
|
||||||
|
|
||||||
@staticmethod
|
@classmethod
|
||||||
def validate(context):
|
def validate(cls, config, non_hidden=False):
|
||||||
for name, config in context.iteritems():
|
if non_hidden and cls.__ctx_hidden__:
|
||||||
ctx = Context.get_by_name(name)
|
raise exceptions.NoSuchContext(name=cls.__ctx_name__)
|
||||||
jsonschema.validate(config, ctx.CONFIG_SCHEMA)
|
jsonschema.validate(config, cls.CONFIG_SCHEMA)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_by_name(name):
|
def get_by_name(name):
|
||||||
@ -71,3 +73,45 @@ class Context(object):
|
|||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||||
self.cleanup()
|
self.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
class ContextManager(object):
|
||||||
|
"""Creates context environment and runs method inside it."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run(context, func, *args, **kwargs):
|
||||||
|
ctxlst = [Context.get_by_name(name) for name in context["config"]]
|
||||||
|
ctxlst = map(lambda ctx: ctx(context),
|
||||||
|
sorted(ctxlst, key=lambda x: x.__ctx_order__))
|
||||||
|
|
||||||
|
return ContextManager._magic(ctxlst, func, *args, **kwargs)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(context, non_hidden=False):
|
||||||
|
for name, config in context.iteritems():
|
||||||
|
Context.get_by_name(name).validate(config, non_hidden=non_hidden)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _magic(ctxlst, func, *args, **kwargs):
|
||||||
|
"""Some kind of contextlib.nested but with black jack & recursion.
|
||||||
|
|
||||||
|
This method uses recursion to build nested "with" from list of context
|
||||||
|
objects. As it's actually a combination of dark and voodoo magic I
|
||||||
|
called it "_magic". Please don't repeat at home.
|
||||||
|
|
||||||
|
:param ctxlst: list of instances of subclasses of Context
|
||||||
|
:param func: function that will be called inside this context
|
||||||
|
:param args: args that will be passed to function `func`
|
||||||
|
:param kwargs: kwargs that will be passed to function `func`
|
||||||
|
:returns: result of function call
|
||||||
|
"""
|
||||||
|
if not ctxlst:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
with ctxlst[0]:
|
||||||
|
# TODO(boris-42): call of setup could be moved inside __enter__
|
||||||
|
# but it should be in try-except, and in except
|
||||||
|
# we should call by hand __exit__
|
||||||
|
ctxlst[0].setup()
|
||||||
|
tmp = ContextManager._magic(ctxlst[1:], func, *args, **kwargs)
|
||||||
|
return tmp
|
||||||
|
@ -17,7 +17,6 @@ import functools
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from rally.benchmark.context import base
|
from rally.benchmark.context import base
|
||||||
from rally.benchmark.scenarios import base as scenario_base
|
|
||||||
from rally.benchmark import utils
|
from rally.benchmark import utils
|
||||||
from rally.openstack.common.gettextutils import _
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
@ -31,19 +30,24 @@ LOG = logging.getLogger(__name__)
|
|||||||
class ResourceCleaner(base.Context):
|
class ResourceCleaner(base.Context):
|
||||||
"""Context class for resource cleanup (both admin and non-admin)."""
|
"""Context class for resource cleanup (both admin and non-admin)."""
|
||||||
|
|
||||||
__ctx_name__ = "cleaner"
|
__ctx_name__ = "cleanup"
|
||||||
|
__ctx_order__ = 200
|
||||||
|
__ctx_hidden__ = True
|
||||||
|
|
||||||
CONFIG_SCHEMA = {
|
CONFIG_SCHEMA = {
|
||||||
"type": "object",
|
"type": "array",
|
||||||
"$schema": "http://json-schema.org/draft-03/schema",
|
"$schema": "http://json-schema.org/draft-04/schema",
|
||||||
"properties": {},
|
"items": {
|
||||||
"additionalProperties": False
|
"type": "string",
|
||||||
|
"enum": ["nova", "glance", "cinder"]
|
||||||
|
},
|
||||||
|
"uniqueItems": True
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, context):
|
def __init__(self, context):
|
||||||
super(ResourceCleaner, self).__init__(context)
|
super(ResourceCleaner, self).__init__(context)
|
||||||
self.admin = None
|
self.admin = []
|
||||||
self.users = None
|
self.users = []
|
||||||
if "admin" in context and context["admin"]:
|
if "admin" in context and context["admin"]:
|
||||||
self.admin = context["admin"]["endpoint"]
|
self.admin = context["admin"]["endpoint"]
|
||||||
if "users" in context and context["users"]:
|
if "users" in context and context["users"]:
|
||||||
@ -51,19 +55,6 @@ class ResourceCleaner(base.Context):
|
|||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _("Cleanup users resources."))
|
@rutils.log_task_wrapper(LOG.info, _("Cleanup users resources."))
|
||||||
def _cleanup_users_resources(self):
|
def _cleanup_users_resources(self):
|
||||||
def _init_services_to_cleanup(cleanup_methods):
|
|
||||||
scenario_name = self.context.get('scenario_name')
|
|
||||||
if scenario_name:
|
|
||||||
cls_name, method_name = scenario_name.split(".", 1)
|
|
||||||
scenario = scenario_base.Scenario.get_by_name(cls_name)()
|
|
||||||
scenario_method = getattr(scenario, method_name)
|
|
||||||
if hasattr(scenario_method, "cleanup_services"):
|
|
||||||
return getattr(scenario_method, "cleanup_services")
|
|
||||||
return cleanup_methods.keys()
|
|
||||||
|
|
||||||
if not self.users:
|
|
||||||
return
|
|
||||||
|
|
||||||
for user in self.users:
|
for user in self.users:
|
||||||
clients = osclients.Clients(user)
|
clients = osclients.Clients(user)
|
||||||
cleanup_methods = {
|
cleanup_methods = {
|
||||||
@ -76,7 +67,7 @@ class ResourceCleaner(base.Context):
|
|||||||
clients.cinder())
|
clients.cinder())
|
||||||
}
|
}
|
||||||
|
|
||||||
for service in _init_services_to_cleanup(cleanup_methods):
|
for service in self.config:
|
||||||
try:
|
try:
|
||||||
cleanup_methods[service]()
|
cleanup_methods[service]()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -87,9 +78,6 @@ class ResourceCleaner(base.Context):
|
|||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _("Cleanup admin resources."))
|
@rutils.log_task_wrapper(LOG.info, _("Cleanup admin resources."))
|
||||||
def _cleanup_admin_resources(self):
|
def _cleanup_admin_resources(self):
|
||||||
if not self.admin:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
admin = osclients.Clients(self.admin)
|
admin = osclients.Clients(self.admin)
|
||||||
utils.delete_keystone_resources(admin.keystone())
|
utils.delete_keystone_resources(admin.keystone())
|
||||||
@ -99,11 +87,13 @@ class ResourceCleaner(base.Context):
|
|||||||
LOG.warning(_('Unable to fully cleanup keystone service: %s') %
|
LOG.warning(_('Unable to fully cleanup keystone service: %s') %
|
||||||
(e.message))
|
(e.message))
|
||||||
|
|
||||||
|
@rutils.log_task_wrapper(LOG.info, _("Enter context: `cleanup`"))
|
||||||
def setup(self):
|
def setup(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@rutils.log_task_wrapper(LOG.info, _("Exit context: `cleanup`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
if self.users:
|
if self.users and self.config:
|
||||||
self._cleanup_users_resources()
|
self._cleanup_users_resources()
|
||||||
if self.admin:
|
if self.admin:
|
||||||
self._cleanup_admin_resources()
|
self._cleanup_admin_resources()
|
||||||
|
@ -16,8 +16,10 @@
|
|||||||
import novaclient.exceptions
|
import novaclient.exceptions
|
||||||
|
|
||||||
from rally.benchmark.context import base
|
from rally.benchmark.context import base
|
||||||
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
from rally import osclients
|
from rally import osclients
|
||||||
|
from rally import utils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -25,6 +27,9 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class Keypair(base.Context):
|
class Keypair(base.Context):
|
||||||
__ctx_name__ = "keypair"
|
__ctx_name__ = "keypair"
|
||||||
|
__ctx_order__ = 300
|
||||||
|
__ctx_hidden__ = True
|
||||||
|
|
||||||
KEYPAIR_NAME = "rally_ssh_key"
|
KEYPAIR_NAME = "rally_ssh_key"
|
||||||
|
|
||||||
def _get_nova_client(self, endpoint):
|
def _get_nova_client(self, endpoint):
|
||||||
@ -47,11 +52,13 @@ class Keypair(base.Context):
|
|||||||
return {"private": keypair.private_key,
|
return {"private": keypair.private_key,
|
||||||
"public": keypair.public_key}
|
"public": keypair.public_key}
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`"))
|
||||||
def setup(self):
|
def setup(self):
|
||||||
for user in self.context["users"]:
|
for user in self.context["users"]:
|
||||||
keypair = self._generate_keypair(user["endpoint"])
|
keypair = self._generate_keypair(user["endpoint"])
|
||||||
user["keypair"] = keypair
|
user["keypair"] = keypair
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
for user in self.context["users"]:
|
for user in self.context["users"]:
|
||||||
endpoint = user['endpoint']
|
endpoint = user['endpoint']
|
||||||
|
@ -14,7 +14,13 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import base
|
from rally.benchmark.context import base
|
||||||
|
from rally.openstack.common.gettextutils import _
|
||||||
|
from rally.openstack.common import log as logging
|
||||||
from rally import osclients
|
from rally import osclients
|
||||||
|
from rally import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NovaQuotas(object):
|
class NovaQuotas(object):
|
||||||
@ -124,6 +130,8 @@ class Quotas(base.Context):
|
|||||||
"""Context class for updating benchmarks' tenants quotas."""
|
"""Context class for updating benchmarks' tenants quotas."""
|
||||||
|
|
||||||
__ctx_name__ = "quotas"
|
__ctx_name__ = "quotas"
|
||||||
|
__ctx_order__ = 210
|
||||||
|
__ctx_hidden__ = False
|
||||||
|
|
||||||
CONFIG_SCHEMA = {
|
CONFIG_SCHEMA = {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -141,6 +149,7 @@ class Quotas(base.Context):
|
|||||||
self.nova_quotas = NovaQuotas(self.clients.nova())
|
self.nova_quotas = NovaQuotas(self.clients.nova())
|
||||||
self.cinder_quotas = CinderQuotas(self.clients.cinder())
|
self.cinder_quotas = CinderQuotas(self.clients.cinder())
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Enter context: `quotas`"))
|
||||||
def setup(self):
|
def setup(self):
|
||||||
for tenant in self.context["tenants"]:
|
for tenant in self.context["tenants"]:
|
||||||
if "nova" in self.config and len(self.config["nova"]) > 0:
|
if "nova" in self.config and len(self.config["nova"]) > 0:
|
||||||
@ -151,6 +160,7 @@ class Quotas(base.Context):
|
|||||||
self.cinder_quotas.update(tenant["id"],
|
self.cinder_quotas.update(tenant["id"],
|
||||||
**self.config["cinder"])
|
**self.config["cinder"])
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Exit context: `quotas`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
for tenant in self.context["tenants"]:
|
for tenant in self.context["tenants"]:
|
||||||
# Always cleanup quotas before deleting a tenant
|
# Always cleanup quotas before deleting a tenant
|
||||||
|
@ -14,8 +14,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import base
|
from rally.benchmark.context import base
|
||||||
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
from rally import osclients
|
from rally import osclients
|
||||||
|
from rally import utils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -77,11 +79,15 @@ def _prepare_open_secgroup(endpoint):
|
|||||||
|
|
||||||
class AllowSSH(base.Context):
|
class AllowSSH(base.Context):
|
||||||
__ctx_name__ = "allow_ssh"
|
__ctx_name__ = "allow_ssh"
|
||||||
|
__ctx_order__ = 301
|
||||||
|
__ctx_hidden__ = True
|
||||||
|
|
||||||
def __init__(self, context):
|
def __init__(self, context):
|
||||||
super(AllowSSH, self).__init__(context)
|
super(AllowSSH, self).__init__(context)
|
||||||
|
self.context["allow_ssh"] = SSH_GROUP_NAME
|
||||||
self.secgroup = []
|
self.secgroup = []
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`"))
|
||||||
def setup(self):
|
def setup(self):
|
||||||
used_tenants = []
|
used_tenants = []
|
||||||
for user in self.context['users']:
|
for user in self.context['users']:
|
||||||
@ -92,6 +98,7 @@ class AllowSSH(base.Context):
|
|||||||
self.secgroup.append(secgroup)
|
self.secgroup.append(secgroup)
|
||||||
used_tenants.append(tenant)
|
used_tenants.append(tenant)
|
||||||
|
|
||||||
|
@utils.log_task_wrapper(LOG.info, _("Exit context: `allow_ssh`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
for secgroup in self.secgroup:
|
for secgroup in self.secgroup:
|
||||||
try:
|
try:
|
||||||
|
@ -20,8 +20,10 @@ from rally.benchmark.context import base
|
|||||||
from rally.benchmark import utils
|
from rally.benchmark import utils
|
||||||
from rally import consts
|
from rally import consts
|
||||||
from rally.objects import endpoint
|
from rally.objects import endpoint
|
||||||
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally.openstack.common import log as logging
|
from rally.openstack.common import log as logging
|
||||||
from rally import osclients
|
from rally import osclients
|
||||||
|
from rally import utils as rutils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -43,6 +45,8 @@ class UserGenerator(base.Context):
|
|||||||
"""Context class for generating temporary users/tenants for benchmarks."""
|
"""Context class for generating temporary users/tenants for benchmarks."""
|
||||||
|
|
||||||
__ctx_name__ = "users"
|
__ctx_name__ = "users"
|
||||||
|
__ctx_order__ = 100
|
||||||
|
__ctx_hidden__ = False
|
||||||
|
|
||||||
CONFIG_SCHEMA = {
|
CONFIG_SCHEMA = {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -145,6 +149,7 @@ class UserGenerator(base.Context):
|
|||||||
"Exception: %(ex)s" %
|
"Exception: %(ex)s" %
|
||||||
{"user_id": user["id"], "ex": ex})
|
{"user_id": user["id"], "ex": ex})
|
||||||
|
|
||||||
|
@rutils.log_task_wrapper(LOG.info, _("Enter context: `users`"))
|
||||||
def setup(self):
|
def setup(self):
|
||||||
"""Create tenants and users, using pool of threads."""
|
"""Create tenants and users, using pool of threads."""
|
||||||
|
|
||||||
@ -164,6 +169,7 @@ class UserGenerator(base.Context):
|
|||||||
self.context["tenants"].append(tenant)
|
self.context["tenants"].append(tenant)
|
||||||
self.context["users"] += users
|
self.context["users"] += users
|
||||||
|
|
||||||
|
@rutils.log_task_wrapper(LOG.info, _("Exit context: `users`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
"""Delete tenants and users, using pool of threads."""
|
"""Delete tenants and users, using pool of threads."""
|
||||||
|
|
||||||
@ -182,11 +188,3 @@ class UserGenerator(base.Context):
|
|||||||
concurrent,
|
concurrent,
|
||||||
self._delete_tenants,
|
self._delete_tenants,
|
||||||
[(self.endpoint, tenants) for tenants in tenants_chunks])
|
[(self.endpoint, tenants) for tenants in tenants_chunks])
|
||||||
|
|
||||||
# NOTE(amaretskiy): Consider that after cleanup() is complete, this has
|
|
||||||
# actually deleted (all or some of) users and tenants
|
|
||||||
# in openstack, but we *STILL HAVE*
|
|
||||||
# self.context["users"] and self.context["tenants"].
|
|
||||||
# Should we ignore that, or just reset these lists
|
|
||||||
# after cleanup() is done, or actually synchronize
|
|
||||||
# for all successfully deleted objects?
|
|
||||||
|
@ -102,7 +102,8 @@ class BenchmarkEngine(object):
|
|||||||
for pos, kw in enumerate(values):
|
for pos, kw in enumerate(values):
|
||||||
try:
|
try:
|
||||||
base_runner.ScenarioRunner.validate(kw.get("runner", {}))
|
base_runner.ScenarioRunner.validate(kw.get("runner", {}))
|
||||||
base_ctx.Context.validate(kw.get("context", {}))
|
base_ctx.ContextManager.validate(kw.get("context", {}),
|
||||||
|
non_hidden=True)
|
||||||
except (exceptions.RallyException,
|
except (exceptions.RallyException,
|
||||||
jsonschema.ValidationError) as e:
|
jsonschema.ValidationError) as e:
|
||||||
raise exceptions.InvalidBenchmarkConfig(name=scenario,
|
raise exceptions.InvalidBenchmarkConfig(name=scenario,
|
||||||
@ -150,6 +151,12 @@ class BenchmarkEngine(object):
|
|||||||
self.task.set_failed(log=log)
|
self.task.set_failed(log=log)
|
||||||
raise exceptions.InvalidTaskException(message=str(e))
|
raise exceptions.InvalidTaskException(message=str(e))
|
||||||
|
|
||||||
|
def _get_runner(self, config):
|
||||||
|
runner = config.get("runner", {})
|
||||||
|
runner.setdefault("type", "continuous")
|
||||||
|
return base_runner.ScenarioRunner.get_runner(self.task, self.endpoints,
|
||||||
|
runner)
|
||||||
|
|
||||||
@rutils.log_task_wrapper(LOG.info, _("Benchmarking."))
|
@rutils.log_task_wrapper(LOG.info, _("Benchmarking."))
|
||||||
def run(self):
|
def run(self):
|
||||||
"""Runs the benchmarks according to the test configuration
|
"""Runs the benchmarks according to the test configuration
|
||||||
@ -161,12 +168,11 @@ class BenchmarkEngine(object):
|
|||||||
self.task.update_status(consts.TaskStatus.RUNNING)
|
self.task.update_status(consts.TaskStatus.RUNNING)
|
||||||
results = {}
|
results = {}
|
||||||
for name in self.config:
|
for name in self.config:
|
||||||
for n, kwargs in enumerate(self.config[name]):
|
for n, kw in enumerate(self.config[name]):
|
||||||
key = {'name': name, 'pos': n, 'kw': kwargs}
|
key = {'name': name, 'pos': n, 'kw': kw}
|
||||||
runner = kwargs.get("runner", {}).get("type", "continuous")
|
runner = self._get_runner(kw)
|
||||||
scenario_runner = base_runner.ScenarioRunner.get_runner(
|
result = runner.run(name, kw.get("context", {}),
|
||||||
self.task, self.endpoints, runner)
|
kw.get("args", {}))
|
||||||
result = scenario_runner.run(name, kwargs)
|
|
||||||
self.task.append_results(key, {"raw": result})
|
self.task.append_results(key, {"raw": result})
|
||||||
results[json.dumps(key)] = result
|
results[json.dumps(key)] = result
|
||||||
self.task.update_status(consts.TaskStatus.FINISHED)
|
self.task.update_status(consts.TaskStatus.FINISHED)
|
||||||
|
@ -19,11 +19,7 @@ import random
|
|||||||
import jsonschema
|
import jsonschema
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as cleaner_ctx
|
from rally.benchmark.context import base as base_ctx
|
||||||
from rally.benchmark.context import keypair as keypair_ctx
|
|
||||||
from rally.benchmark.context import quotas as quotas_ctx
|
|
||||||
from rally.benchmark.context import secgroup as secgroup_ctx
|
|
||||||
from rally.benchmark.context import users as users_ctx
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark import utils
|
from rally.benchmark import utils
|
||||||
from rally import exceptions
|
from rally import exceptions
|
||||||
@ -145,13 +141,14 @@ class ScenarioRunner(object):
|
|||||||
|
|
||||||
CONFIG_SCHEMA = {}
|
CONFIG_SCHEMA = {}
|
||||||
|
|
||||||
def __init__(self, task, endpoints):
|
def __init__(self, task, endpoints, config):
|
||||||
self.task = task
|
self.task = task
|
||||||
self.endpoints = endpoints
|
self.endpoints = endpoints
|
||||||
# NOTE(msdubov): Passing predefined user endpoints hasn't been
|
# NOTE(msdubov): Passing predefined user endpoints hasn't been
|
||||||
# implemented yet, so the scenario runner always gets
|
# implemented yet, so the scenario runner always gets
|
||||||
# a single admin endpoint here.
|
# a single admin endpoint here.
|
||||||
self.admin_user = endpoints[0]
|
self.admin_user = endpoints[0]
|
||||||
|
self.config = config
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_cls(runner_type):
|
def _get_cls(runner_type):
|
||||||
@ -161,9 +158,9 @@ class ScenarioRunner(object):
|
|||||||
raise exceptions.NoSuchRunner(type=runner_type)
|
raise exceptions.NoSuchRunner(type=runner_type)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_runner(task, endpoint, runner_type):
|
def get_runner(task, endpoint, config):
|
||||||
"""Returns instance of a scenario runner for execution type."""
|
"""Returns instance of a scenario runner for execution type."""
|
||||||
return ScenarioRunner._get_cls(runner_type)(task, endpoint)
|
return ScenarioRunner._get_cls(config["type"])(task, endpoint, config)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def validate(config):
|
def validate(config):
|
||||||
@ -172,7 +169,7 @@ class ScenarioRunner(object):
|
|||||||
jsonschema.validate(config, runner.CONFIG_SCHEMA)
|
jsonschema.validate(config, runner.CONFIG_SCHEMA)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def _run_scenario(self, cls, method_name, context, args, config):
|
def _run_scenario(self, cls, method_name, context, args):
|
||||||
"""Runs the specified benchmark scenario with given arguments.
|
"""Runs the specified benchmark scenario with given arguments.
|
||||||
|
|
||||||
:param cls: The Scenario class where the scenario is implemented
|
:param cls: The Scenario class where the scenario is implemented
|
||||||
@ -180,57 +177,33 @@ class ScenarioRunner(object):
|
|||||||
:param context: Benchmark context that contains users, admin & other
|
:param context: Benchmark context that contains users, admin & other
|
||||||
information, that was created before benchmark started.
|
information, that was created before benchmark started.
|
||||||
:param args: Arguments to call the scenario method with
|
:param args: Arguments to call the scenario method with
|
||||||
:param config: Configuration dictionary that contains strategy-specific
|
|
||||||
parameters like the number of times to run the scenario
|
|
||||||
|
|
||||||
:returns: List of results fore each single scenario iteration,
|
:returns: List of results fore each single scenario iteration,
|
||||||
where each result is a dictionary
|
where each result is a dictionary
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _prepare_and_run_scenario(self, context, name, kwargs):
|
def run(self, name, context, args):
|
||||||
cls_name, method_name = name.split(".", 1)
|
cls_name, method_name = name.split(".", 1)
|
||||||
cls = base.Scenario.get_by_name(cls_name)
|
cls = base.Scenario.get_by_name(cls_name)
|
||||||
|
|
||||||
args = kwargs.get('args', {})
|
scenario_context = getattr(cls, method_name).context
|
||||||
config = kwargs.get('runner', {})
|
# TODO(boris-42): We should keep default behavior for `users` context
|
||||||
|
# as a part of work on pre-created users this should be
|
||||||
|
# removed.
|
||||||
|
scenario_context.setdefault("users", {})
|
||||||
|
# merge scenario context and task context configuration
|
||||||
|
scenario_context.update(context)
|
||||||
|
|
||||||
with secgroup_ctx.AllowSSH(context) as allow_ssh:
|
context_obj = {
|
||||||
allow_ssh.setup()
|
|
||||||
with keypair_ctx.Keypair(context) as keypair:
|
|
||||||
keypair.setup()
|
|
||||||
LOG.debug("Context: %s" % context)
|
|
||||||
return self._run_scenario(cls, method_name, context,
|
|
||||||
args, config)
|
|
||||||
|
|
||||||
def _run_as_admin(self, name, kwargs):
|
|
||||||
context = {
|
|
||||||
"task": self.task,
|
"task": self.task,
|
||||||
"admin": {"endpoint": self.admin_user},
|
"admin": {"endpoint": self.admin_user},
|
||||||
"scenario_name": name,
|
"scenario_name": name,
|
||||||
"config": kwargs.get("context", {})
|
"config": scenario_context
|
||||||
}
|
}
|
||||||
|
|
||||||
with users_ctx.UserGenerator(context) as generator:
|
results = base_ctx.ContextManager.run(context_obj, self._run_scenario,
|
||||||
generator.setup()
|
cls, method_name, context_obj,
|
||||||
with quotas_ctx.Quotas(context) as quotas:
|
args)
|
||||||
quotas.setup()
|
|
||||||
with cleaner_ctx.ResourceCleaner(context) as cleaner:
|
|
||||||
cleaner.setup()
|
|
||||||
return self._prepare_and_run_scenario(context,
|
|
||||||
name, kwargs)
|
|
||||||
|
|
||||||
def _run_as_non_admin(self, name, kwargs):
|
|
||||||
# TODO(boris-42): It makes sense to use UserGenerator here as well
|
|
||||||
# take a look at comment in UserGenerator.__init__()
|
|
||||||
context = {"scenario_name": name}
|
|
||||||
with cleaner_ctx.ResourceCleaner(context):
|
|
||||||
return self._prepare_and_run_scenario(context, name, kwargs)
|
|
||||||
|
|
||||||
def run(self, name, kwargs):
|
|
||||||
if self.admin_user:
|
|
||||||
results = self._run_as_admin(name, kwargs)
|
|
||||||
else:
|
|
||||||
results = self._run_as_non_admin(name, kwargs)
|
|
||||||
|
|
||||||
if not isinstance(results, ScenarioRunnerResult):
|
if not isinstance(results, ScenarioRunnerResult):
|
||||||
name = self.__execution_type__
|
name = self.__execution_type__
|
||||||
|
@ -126,25 +126,25 @@ class ContinuousScenarioRunner(base.ScenarioRunner):
|
|||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _run_scenario(self, cls, method_name, context, args, config):
|
def _run_scenario(self, cls, method_name, context, args):
|
||||||
|
|
||||||
timeout = config.get("timeout", 600)
|
timeout = self.config.get("timeout", 600)
|
||||||
concurrent = config.get("active_users", 1)
|
concurrent = self.config.get("active_users", 1)
|
||||||
|
|
||||||
# NOTE(msdubov): If not specified, perform single scenario run.
|
# NOTE(msdubov): If not specified, perform single scenario run.
|
||||||
if "duration" not in config and "times" not in config:
|
if "duration" not in self.config and "times" not in self.config:
|
||||||
config["times"] = 1
|
self.config["times"] = 1
|
||||||
|
|
||||||
# Continiously run a benchmark scenario the specified
|
# Continiously run a benchmark scenario the specified
|
||||||
# amount of times.
|
# amount of times.
|
||||||
if "times" in config:
|
if "times" in self.config:
|
||||||
times = config["times"]
|
times = self.config["times"]
|
||||||
results = self._run_scenario_continuously_for_times(
|
results = self._run_scenario_continuously_for_times(
|
||||||
cls, method_name, context, args, times, concurrent, timeout)
|
cls, method_name, context, args, times, concurrent, timeout)
|
||||||
# Continiously run a scenario as many times as needed
|
# Continiously run a scenario as many times as needed
|
||||||
# to fill up the given period of time.
|
# to fill up the given period of time.
|
||||||
elif "duration" in config:
|
elif "duration" in self.config:
|
||||||
duration = config["duration"]
|
duration = self.config["duration"]
|
||||||
results = self._run_scenario_continuously_for_duration(
|
results = self._run_scenario_continuously_for_duration(
|
||||||
cls, method_name, context, args, duration, concurrent, timeout)
|
cls, method_name, context, args, duration, concurrent, timeout)
|
||||||
|
|
||||||
|
@ -61,11 +61,11 @@ class PeriodicScenarioRunner(base.ScenarioRunner):
|
|||||||
"additionalProperties": False
|
"additionalProperties": False
|
||||||
}
|
}
|
||||||
|
|
||||||
def _run_scenario(self, cls, method_name, context, args, config):
|
def _run_scenario(self, cls, method_name, context, args):
|
||||||
|
|
||||||
times = config["times"]
|
times = self.config["times"]
|
||||||
period = config["period"]
|
period = self.config["period"]
|
||||||
timeout = config.get("timeout", 600)
|
timeout = self.config.get("timeout", 600)
|
||||||
|
|
||||||
async_results = []
|
async_results = []
|
||||||
|
|
||||||
|
@ -45,9 +45,8 @@ class SerialScenarioRunner(base.ScenarioRunner):
|
|||||||
"additionalProperties": True
|
"additionalProperties": True
|
||||||
}
|
}
|
||||||
|
|
||||||
def _run_scenario(self, cls, method_name, context, args, config):
|
def _run_scenario(self, cls, method_name, context, args):
|
||||||
|
times = self.config.get('times', 1)
|
||||||
times = config.get('times', 1)
|
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as context_cleaner
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
|
|
||||||
|
|
||||||
@ -22,6 +21,5 @@ class Authenticate(base.Scenario):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario()
|
||||||
@context_cleaner.cleanup([])
|
|
||||||
def keystone(self, **kwargs):
|
def keystone(self, **kwargs):
|
||||||
self.clients("keystone")
|
self.clients("keystone")
|
||||||
|
@ -22,7 +22,7 @@ from rally import exceptions
|
|||||||
from rally import utils
|
from rally import utils
|
||||||
|
|
||||||
|
|
||||||
def scenario(admin_only=False):
|
def scenario(admin_only=False, context=None):
|
||||||
"""This method is used as decorator for the methods of benchmark scenarios
|
"""This method is used as decorator for the methods of benchmark scenarios
|
||||||
and it adds following extra fields to the methods.
|
and it adds following extra fields to the methods.
|
||||||
'is_scenario' is set to True
|
'is_scenario' is set to True
|
||||||
@ -31,6 +31,7 @@ def scenario(admin_only=False):
|
|||||||
def wrapper(func):
|
def wrapper(func):
|
||||||
func.is_scenario = True
|
func.is_scenario = True
|
||||||
func.admin_only = admin_only
|
func.admin_only = admin_only
|
||||||
|
func.context = context or {}
|
||||||
return func
|
return func
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
@ -13,15 +13,13 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as context_cleaner
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark.scenarios.cinder import utils
|
from rally.benchmark.scenarios.cinder import utils
|
||||||
|
|
||||||
|
|
||||||
class CinderVolumes(utils.CinderScenario):
|
class CinderVolumes(utils.CinderScenario):
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["cinder"]})
|
||||||
@context_cleaner.cleanup(['cinder'])
|
|
||||||
def create_and_list_volume(self, size, detailed=True, **kwargs):
|
def create_and_list_volume(self, size, detailed=True, **kwargs):
|
||||||
"""Tests creating a volume and listing volumes.
|
"""Tests creating a volume and listing volumes.
|
||||||
|
|
||||||
@ -38,8 +36,7 @@ class CinderVolumes(utils.CinderScenario):
|
|||||||
self._create_volume(size, **kwargs)
|
self._create_volume(size, **kwargs)
|
||||||
self._list_volumes(detailed)
|
self._list_volumes(detailed)
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["cinder"]})
|
||||||
@context_cleaner.cleanup(['cinder'])
|
|
||||||
def create_and_delete_volume(self, size, min_sleep=0, max_sleep=0,
|
def create_and_delete_volume(self, size, min_sleep=0, max_sleep=0,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
"""Tests creating and then deleting a volume.
|
"""Tests creating and then deleting a volume.
|
||||||
@ -51,8 +48,7 @@ class CinderVolumes(utils.CinderScenario):
|
|||||||
self.sleep_between(min_sleep, max_sleep)
|
self.sleep_between(min_sleep, max_sleep)
|
||||||
self._delete_volume(volume)
|
self._delete_volume(volume)
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["cinder"]})
|
||||||
@context_cleaner.cleanup(['cinder'])
|
|
||||||
def create_volume(self, size, **kwargs):
|
def create_volume(self, size, **kwargs):
|
||||||
"""Test creating volumes perfromance.
|
"""Test creating volumes perfromance.
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as context_cleaner
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark.scenarios.glance import utils
|
from rally.benchmark.scenarios.glance import utils
|
||||||
from rally.benchmark.scenarios.nova import utils as nova_utils
|
from rally.benchmark.scenarios.nova import utils as nova_utils
|
||||||
@ -22,8 +21,7 @@ from rally.benchmark import validation
|
|||||||
|
|
||||||
class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["glance"]})
|
||||||
@context_cleaner.cleanup(['glance'])
|
|
||||||
def create_and_list_image(self, container_format,
|
def create_and_list_image(self, container_format,
|
||||||
image_location, disk_format, **kwargs):
|
image_location, disk_format, **kwargs):
|
||||||
"""Test adding an image and then listing all images.
|
"""Test adding an image and then listing all images.
|
||||||
@ -46,8 +44,7 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
self._list_images()
|
self._list_images()
|
||||||
|
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["glance"]})
|
||||||
@context_cleaner.cleanup(['glance'])
|
|
||||||
def create_and_delete_image(self, container_format,
|
def create_and_delete_image(self, container_format,
|
||||||
image_location, disk_format, **kwargs):
|
image_location, disk_format, **kwargs):
|
||||||
"""Test adds and then deletes image."""
|
"""Test adds and then deletes image."""
|
||||||
@ -59,9 +56,8 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
self._delete_image(image)
|
self._delete_image(image)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['glance', 'nova'])
|
|
||||||
@validation.add_validator(validation.flavor_exists("flavor_id"))
|
@validation.add_validator(validation.flavor_exists("flavor_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["glance", "nova"]})
|
||||||
def create_image_and_boot_instances(self, container_format,
|
def create_image_and_boot_instances(self, container_format,
|
||||||
image_location, disk_format,
|
image_location, disk_format,
|
||||||
flavor_id, number_instances,
|
flavor_id, number_instances,
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as context_cleaner
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark.scenarios.keystone import utils as kutils
|
from rally.benchmark.scenarios.keystone import utils as kutils
|
||||||
from rally.benchmark import validation as valid
|
from rally.benchmark import validation as valid
|
||||||
@ -21,24 +20,21 @@ from rally.benchmark import validation as valid
|
|||||||
|
|
||||||
class KeystoneBasic(kutils.KeystoneScenario):
|
class KeystoneBasic(kutils.KeystoneScenario):
|
||||||
|
|
||||||
@base.scenario(admin_only=True)
|
@base.scenario(admin_only=True, context={"cleanup": []})
|
||||||
def create_user(self, name_length=10, **kwargs):
|
def create_user(self, name_length=10, **kwargs):
|
||||||
self._user_create(name_length=name_length, **kwargs)
|
self._user_create(name_length=name_length, **kwargs)
|
||||||
|
|
||||||
@base.scenario(admin_only=True)
|
@base.scenario(admin_only=True, context={"cleanup": []})
|
||||||
@context_cleaner.cleanup([])
|
|
||||||
def create_delete_user(self, name_length=10, **kwargs):
|
def create_delete_user(self, name_length=10, **kwargs):
|
||||||
user = self._user_create(name_length=name_length, **kwargs)
|
user = self._user_create(name_length=name_length, **kwargs)
|
||||||
self._resource_delete(user)
|
self._resource_delete(user)
|
||||||
|
|
||||||
@base.scenario(admin_only=True)
|
@base.scenario(admin_only=True, context={"cleanup": []})
|
||||||
@context_cleaner.cleanup([])
|
|
||||||
def create_tenant(self, name_length=10, **kwargs):
|
def create_tenant(self, name_length=10, **kwargs):
|
||||||
self._tenant_create(name_length=name_length, **kwargs)
|
self._tenant_create(name_length=name_length, **kwargs)
|
||||||
|
|
||||||
@base.scenario(admin_only=True)
|
@base.scenario(admin_only=True, context={"cleanup": []})
|
||||||
@valid.add_validator(valid.required_parameters(['users_per_tenant']))
|
@valid.add_validator(valid.required_parameters(['users_per_tenant']))
|
||||||
@context_cleaner.cleanup([])
|
|
||||||
def create_tenant_with_users(self, users_per_tenant, name_length=10,
|
def create_tenant_with_users(self, users_per_tenant, name_length=10,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
tenant = self._tenant_create(name_length=name_length, **kwargs)
|
tenant = self._tenant_create(name_length=name_length, **kwargs)
|
||||||
|
@ -17,7 +17,6 @@ import json
|
|||||||
import jsonschema
|
import jsonschema
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from rally.benchmark.context import cleaner as context_cleaner
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark.scenarios.cinder import utils as cinder_utils
|
from rally.benchmark.scenarios.cinder import utils as cinder_utils
|
||||||
from rally.benchmark.scenarios.nova import utils
|
from rally.benchmark.scenarios.nova import utils
|
||||||
@ -38,9 +37,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(NovaServers, self).__init__(*args, **kwargs)
|
super(NovaServers, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova"]})
|
||||||
def boot_and_list_server(self, image_id, flavor_id,
|
def boot_and_list_server(self, image_id, flavor_id,
|
||||||
detailed=True, **kwargs):
|
detailed=True, **kwargs):
|
||||||
"""Tests booting an image and then listing servers.
|
"""Tests booting an image and then listing servers.
|
||||||
@ -60,9 +58,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
self._boot_server(server_name, image_id, flavor_id, **kwargs)
|
self._boot_server(server_name, image_id, flavor_id, **kwargs)
|
||||||
self._list_servers(detailed)
|
self._list_servers(detailed)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova"]})
|
||||||
def boot_and_delete_server(self, image_id, flavor_id,
|
def boot_and_delete_server(self, image_id, flavor_id,
|
||||||
min_sleep=0, max_sleep=0, **kwargs):
|
min_sleep=0, max_sleep=0, **kwargs):
|
||||||
"""Tests booting and then deleting an image."""
|
"""Tests booting and then deleting an image."""
|
||||||
@ -72,9 +69,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
self.sleep_between(min_sleep, max_sleep)
|
self.sleep_between(min_sleep, max_sleep)
|
||||||
self._delete_server(server)
|
self._delete_server(server)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova', 'cinder'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova", "cinder"]})
|
||||||
def boot_server_from_volume_and_delete(self, image_id, flavor_id,
|
def boot_server_from_volume_and_delete(self, image_id, flavor_id,
|
||||||
volume_size,
|
volume_size,
|
||||||
min_sleep=0, max_sleep=0, **kwargs):
|
min_sleep=0, max_sleep=0, **kwargs):
|
||||||
@ -89,9 +85,9 @@ class NovaServers(utils.NovaScenario,
|
|||||||
self.sleep_between(min_sleep, max_sleep)
|
self.sleep_between(min_sleep, max_sleep)
|
||||||
self._delete_server(server)
|
self._delete_server(server)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova"],
|
||||||
|
"keypair": {}, "allow_ssh": {}})
|
||||||
def boot_runcommand_delete_server(self, image_id, flavor_id,
|
def boot_runcommand_delete_server(self, image_id, flavor_id,
|
||||||
script, interpreter, network='private',
|
script, interpreter, network='private',
|
||||||
username='ubuntu', ip_version=4,
|
username='ubuntu', ip_version=4,
|
||||||
@ -147,9 +143,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
stdout=out, stderr=err))
|
stdout=out, stderr=err))
|
||||||
return {"data": out, "errors": err}
|
return {"data": out, "errors": err}
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova"]})
|
||||||
def boot_and_bounce_server(self, image_id, flavor_id, **kwargs):
|
def boot_and_bounce_server(self, image_id, flavor_id, **kwargs):
|
||||||
"""Tests booting a server then performing stop/start or hard/soft
|
"""Tests booting a server then performing stop/start or hard/soft
|
||||||
reboot a number of times.
|
reboot a number of times.
|
||||||
@ -168,9 +163,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
action()
|
action()
|
||||||
self._delete_server(server)
|
self._delete_server(server)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova', 'glance'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova", "glance"]})
|
||||||
def snapshot_server(self, image_id, flavor_id, **kwargs):
|
def snapshot_server(self, image_id, flavor_id, **kwargs):
|
||||||
"""Tests Nova instance snapshotting."""
|
"""Tests Nova instance snapshotting."""
|
||||||
server_name = self._generate_random_name(16)
|
server_name = self._generate_random_name(16)
|
||||||
@ -183,9 +177,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
self._delete_server(server)
|
self._delete_server(server)
|
||||||
self._delete_image(image)
|
self._delete_image(image)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova"]})
|
||||||
def boot_server(self, image_id, flavor_id, **kwargs):
|
def boot_server(self, image_id, flavor_id, **kwargs):
|
||||||
"""Test VM boot - assumed clean-up is done elsewhere."""
|
"""Test VM boot - assumed clean-up is done elsewhere."""
|
||||||
server_name = self._generate_random_name(16)
|
server_name = self._generate_random_name(16)
|
||||||
@ -196,9 +189,8 @@ class NovaServers(utils.NovaScenario,
|
|||||||
kwargs['nics'] = [{'net-id': random_nic.id}]
|
kwargs['nics'] = [{'net-id': random_nic.id}]
|
||||||
self._boot_server(server_name, image_id, flavor_id, **kwargs)
|
self._boot_server(server_name, image_id, flavor_id, **kwargs)
|
||||||
|
|
||||||
@context_cleaner.cleanup(['nova', 'cinder'])
|
|
||||||
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
@valid.add_validator(valid.image_valid_on_flavor("flavor_id", "image_id"))
|
||||||
@base.scenario()
|
@base.scenario(context={"cleanup": ["nova", "cinder"]})
|
||||||
def boot_server_from_volume(self, image_id, flavor_id,
|
def boot_server_from_volume(self, image_id, flavor_id,
|
||||||
volume_size, **kwargs):
|
volume_size, **kwargs):
|
||||||
"""Test VM boot from volume - assumed clean-up is done elsewhere."""
|
"""Test VM boot from volume - assumed clean-up is done elsewhere."""
|
||||||
|
@ -18,7 +18,6 @@ import random
|
|||||||
import string
|
import string
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from rally.benchmark.context import secgroup
|
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark.scenarios import utils as scenario_utils
|
from rally.benchmark.scenarios import utils as scenario_utils
|
||||||
from rally.benchmark import utils as bench_utils
|
from rally.benchmark import utils as bench_utils
|
||||||
@ -86,12 +85,12 @@ class NovaScenario(base.Scenario):
|
|||||||
|
|
||||||
:returns: Created server object
|
:returns: Created server object
|
||||||
"""
|
"""
|
||||||
|
allow_ssh_secgroup = self.context().get("allow_ssh")
|
||||||
|
if allow_ssh_secgroup:
|
||||||
if 'security_groups' not in kwargs:
|
if 'security_groups' not in kwargs:
|
||||||
kwargs['security_groups'] = [secgroup.SSH_GROUP_NAME]
|
kwargs['security_groups'] = [allow_ssh_secgroup]
|
||||||
else:
|
elif allow_ssh_secgroup not in kwargs['security_groups']:
|
||||||
if secgroup.SSH_GROUP_NAME not in kwargs['security_groups']:
|
kwargs['security_groups'].append(allow_ssh_secgroup)
|
||||||
kwargs['security_groups'].append(secgroup.SSH_GROUP_NAME)
|
|
||||||
|
|
||||||
server = self.clients("nova").servers.create(server_name, image_id,
|
server = self.clients("nova").servers.create(server_name, image_id,
|
||||||
flavor_id, **kwargs)
|
flavor_id, **kwargs)
|
||||||
|
@ -49,24 +49,11 @@ class BaseContextTestCase(test.TestCase):
|
|||||||
self.assertEqual(ctx.context, context)
|
self.assertEqual(ctx.context, context)
|
||||||
|
|
||||||
def test_validate__context(self):
|
def test_validate__context(self):
|
||||||
context = {
|
fakes.FakeContext.validate({"test": 2})
|
||||||
"fake": {"test": 2}
|
|
||||||
}
|
|
||||||
base.Context.validate(context)
|
|
||||||
|
|
||||||
def test_validate__wrong_context(self):
|
def test_validate__wrong_context(self):
|
||||||
context = {
|
|
||||||
"fake": {"nonexisting": 2}
|
|
||||||
}
|
|
||||||
self.assertRaises(jsonschema.ValidationError,
|
self.assertRaises(jsonschema.ValidationError,
|
||||||
base.Context.validate, context)
|
fakes.FakeContext.validate, {"nonexisting": 2})
|
||||||
|
|
||||||
def test_validate__non_existing_context(self):
|
|
||||||
config = {
|
|
||||||
"nonexisting": {"nonexisting": 2}
|
|
||||||
}
|
|
||||||
self.assertRaises(exceptions.NoSuchContext,
|
|
||||||
base.Context.validate, config)
|
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.context.base.utils.itersubclasses")
|
@mock.patch("rally.benchmark.context.base.utils.itersubclasses")
|
||||||
def test_get_by_name(self, mock_itersubclasses):
|
def test_get_by_name(self, mock_itersubclasses):
|
||||||
@ -85,6 +72,10 @@ class BaseContextTestCase(test.TestCase):
|
|||||||
self.assertRaises(exceptions.NoSuchContext,
|
self.assertRaises(exceptions.NoSuchContext,
|
||||||
base.Context.get_by_name, "nonexisting")
|
base.Context.get_by_name, "nonexisting")
|
||||||
|
|
||||||
|
def test_get_by_name_hidder(self):
|
||||||
|
self.assertRaises(exceptions.NoSuchContext,
|
||||||
|
base.Context.validate, {}, non_hidden=True)
|
||||||
|
|
||||||
def test_setup_is_abstract(self):
|
def test_setup_is_abstract(self):
|
||||||
|
|
||||||
class A(base.Context):
|
class A(base.Context):
|
||||||
@ -114,3 +105,87 @@ class BaseContextTestCase(test.TestCase):
|
|||||||
self.assertEqual(ctx, entered_ctx)
|
self.assertEqual(ctx, entered_ctx)
|
||||||
|
|
||||||
ctx.cleanup.assert_called_once_with()
|
ctx.cleanup.assert_called_once_with()
|
||||||
|
|
||||||
|
|
||||||
|
class ContextManagerTestCase(test.TestCase):
|
||||||
|
|
||||||
|
@mock.patch("rally.benchmark.context.base.ContextManager._magic")
|
||||||
|
@mock.patch("rally.benchmark.context.base.Context.get_by_name")
|
||||||
|
def test_run(self, mock_get, mock_magic):
|
||||||
|
context = {
|
||||||
|
"config": {
|
||||||
|
"a": mock.MagicMock(),
|
||||||
|
"b": mock.MagicMock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cc = mock.MagicMock()
|
||||||
|
cc.__ctx_order__ = 10
|
||||||
|
mock_get.return_value = cc
|
||||||
|
|
||||||
|
mock_magic.return_value = 5
|
||||||
|
|
||||||
|
result = base.ContextManager.run(context, lambda x, y: x + y, 1, 2)
|
||||||
|
self.assertEqual(result, 5)
|
||||||
|
|
||||||
|
mock_get.assert_has_calls([
|
||||||
|
mock.call("a"),
|
||||||
|
mock.call("b"),
|
||||||
|
mock.call()(context),
|
||||||
|
mock.call()(context)
|
||||||
|
])
|
||||||
|
|
||||||
|
@mock.patch("rally.benchmark.context.base.Context.get_by_name")
|
||||||
|
def test_validate(self, mock_get):
|
||||||
|
config = {
|
||||||
|
"ctx1": mock.MagicMock(),
|
||||||
|
"ctx2": mock.MagicMock()
|
||||||
|
}
|
||||||
|
|
||||||
|
base.ContextManager.validate(config)
|
||||||
|
mock_get.assert_has_calls([
|
||||||
|
mock.call("ctx1"),
|
||||||
|
mock.call().validate(config["ctx1"], non_hidden=False),
|
||||||
|
mock.call("ctx2"),
|
||||||
|
mock.call().validate(config["ctx2"], non_hidden=False)
|
||||||
|
])
|
||||||
|
|
||||||
|
@mock.patch("rally.benchmark.context.base.Context.get_by_name")
|
||||||
|
def test_validate_non_hidden(self, mock_get):
|
||||||
|
config = {
|
||||||
|
"ctx1": mock.MagicMock(),
|
||||||
|
"ctx2": mock.MagicMock()
|
||||||
|
}
|
||||||
|
|
||||||
|
base.ContextManager.validate(config, non_hidden=True)
|
||||||
|
mock_get.assert_has_calls([
|
||||||
|
mock.call("ctx1"),
|
||||||
|
mock.call().validate(config["ctx1"], non_hidden=True),
|
||||||
|
mock.call("ctx2"),
|
||||||
|
mock.call().validate(config["ctx2"], non_hidden=True)
|
||||||
|
])
|
||||||
|
|
||||||
|
def test_validate__non_existing_context(self):
|
||||||
|
config = {
|
||||||
|
"nonexisting": {"nonexisting": 2}
|
||||||
|
}
|
||||||
|
self.assertRaises(exceptions.NoSuchContext,
|
||||||
|
base.ContextManager.validate, config)
|
||||||
|
|
||||||
|
def test__magic(self):
|
||||||
|
func = lambda x, y: x + y
|
||||||
|
|
||||||
|
result = base.ContextManager._magic([], func, 2, 3)
|
||||||
|
self.assertEqual(result, 5)
|
||||||
|
|
||||||
|
def test__magic_with_ctx(self):
|
||||||
|
ctx = [mock.MagicMock(), mock.MagicMock()]
|
||||||
|
func = lambda x, y: x + y
|
||||||
|
|
||||||
|
result = base.ContextManager._magic(ctx, func, 2, 3)
|
||||||
|
self.assertEqual(result, 5)
|
||||||
|
|
||||||
|
expected = [mock.call.__enter__(), mock.call.setup(),
|
||||||
|
mock.call.__exit__(None, None, None)]
|
||||||
|
for c in ctx:
|
||||||
|
ctx[0].assert_has_calls(expected)
|
||||||
|
@ -32,7 +32,6 @@ class ResourceCleanerTestCase(test.TestCase):
|
|||||||
"admin": None,
|
"admin": None,
|
||||||
"users": [],
|
"users": [],
|
||||||
"tenants": [],
|
"tenants": [],
|
||||||
"scenario_name": "NovaServers.boot_server_from_volume_and_delete"
|
|
||||||
}
|
}
|
||||||
resource_cleaner = cleaner_ctx.ResourceCleaner(context)
|
resource_cleaner = cleaner_ctx.ResourceCleaner(context)
|
||||||
with resource_cleaner:
|
with resource_cleaner:
|
||||||
@ -40,6 +39,7 @@ class ResourceCleanerTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_with_statement(self):
|
def test_with_statement(self):
|
||||||
fake_user_ctx = fakes.FakeUserContext({}).context
|
fake_user_ctx = fakes.FakeUserContext({}).context
|
||||||
|
fake_user_ctx["config"] = {"cleanup": ["nova"]}
|
||||||
res_cleaner = cleaner_ctx.ResourceCleaner(fake_user_ctx)
|
res_cleaner = cleaner_ctx.ResourceCleaner(fake_user_ctx)
|
||||||
|
|
||||||
res_cleaner._cleanup_users_resources = mock.MagicMock()
|
res_cleaner._cleanup_users_resources = mock.MagicMock()
|
||||||
@ -56,7 +56,7 @@ class ResourceCleanerTestCase(test.TestCase):
|
|||||||
def test_cleaner_admin(self, mock_del_keystone, mock_clients):
|
def test_cleaner_admin(self, mock_del_keystone, mock_clients):
|
||||||
context = {
|
context = {
|
||||||
"task": mock.MagicMock(),
|
"task": mock.MagicMock(),
|
||||||
"scenario_name": 'NovaServers.boot_server_from_volume_and_delete',
|
"config": {"cleanup": ["cinder", "nova"]},
|
||||||
"admin": {"endpoint": mock.MagicMock()},
|
"admin": {"endpoint": mock.MagicMock()},
|
||||||
}
|
}
|
||||||
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
||||||
@ -70,21 +70,18 @@ class ResourceCleanerTestCase(test.TestCase):
|
|||||||
mock_clients.return_value.keystone.assert_called_once_with()
|
mock_clients.return_value.keystone.assert_called_once_with()
|
||||||
mock_del_keystone.assert_called_once_with('keystone')
|
mock_del_keystone.assert_called_once_with('keystone')
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.scenarios.nova.servers.NovaServers."
|
|
||||||
"boot_server_from_volume_and_delete")
|
|
||||||
@mock.patch("%s.osclients.Clients" % BASE)
|
@mock.patch("%s.osclients.Clients" % BASE)
|
||||||
@mock.patch("%s.utils.delete_nova_resources" % BASE)
|
@mock.patch("%s.utils.delete_nova_resources" % BASE)
|
||||||
@mock.patch("%s.utils.delete_glance_resources" % BASE)
|
@mock.patch("%s.utils.delete_glance_resources" % BASE)
|
||||||
@mock.patch("%s.utils.delete_cinder_resources" % BASE)
|
@mock.patch("%s.utils.delete_cinder_resources" % BASE)
|
||||||
def test_cleaner_users_all_services(self, mock_del_cinder,
|
def test_cleaner_users_all_services(self, mock_del_cinder,
|
||||||
mock_del_glance, mock_del_nova,
|
mock_del_glance, mock_del_nova,
|
||||||
mock_clients, mock_scenario_method):
|
mock_clients):
|
||||||
del mock_scenario_method.cleanup_services
|
|
||||||
context = {
|
context = {
|
||||||
"task": mock.MagicMock(),
|
"task": mock.MagicMock(),
|
||||||
"users": [{"endpoint": mock.MagicMock()},
|
"users": [{"endpoint": mock.MagicMock()},
|
||||||
{"endpoint": mock.MagicMock()}],
|
{"endpoint": mock.MagicMock()}],
|
||||||
"scenario_name": "NovaServers.boot_server_from_volume_and_delete",
|
"config": {"cleanup": ["cinder", "nova", "glance"]},
|
||||||
"tenants": [mock.MagicMock()]
|
"tenants": [mock.MagicMock()]
|
||||||
}
|
}
|
||||||
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
||||||
@ -100,18 +97,31 @@ class ResourceCleanerTestCase(test.TestCase):
|
|||||||
self.assertEqual(mock_del_glance.call_count, 2)
|
self.assertEqual(mock_del_glance.call_count, 2)
|
||||||
self.assertEqual(mock_del_cinder.call_count, 2)
|
self.assertEqual(mock_del_cinder.call_count, 2)
|
||||||
|
|
||||||
|
@mock.patch("%s.ResourceCleaner._cleanup_users_resources" % BASE)
|
||||||
|
def test_cleaner_users_default_behavior(self, mock_cleanup):
|
||||||
|
context = {
|
||||||
|
"task": mock.MagicMock(),
|
||||||
|
"users": [{"endpoint": mock.MagicMock()},
|
||||||
|
{"endpoint": mock.MagicMock()}],
|
||||||
|
}
|
||||||
|
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
||||||
|
|
||||||
|
with res_cleaner:
|
||||||
|
res_cleaner.setup()
|
||||||
|
|
||||||
|
self.assertEqual(mock_cleanup.call_count, 0)
|
||||||
|
|
||||||
@mock.patch("%s.osclients.Clients" % BASE)
|
@mock.patch("%s.osclients.Clients" % BASE)
|
||||||
@mock.patch("%s.utils.delete_nova_resources" % BASE)
|
@mock.patch("%s.utils.delete_nova_resources" % BASE)
|
||||||
@mock.patch("%s.utils.delete_glance_resources" % BASE)
|
@mock.patch("%s.utils.delete_glance_resources" % BASE)
|
||||||
@mock.patch("%s.utils.delete_cinder_resources" % BASE)
|
@mock.patch("%s.utils.delete_cinder_resources" % BASE)
|
||||||
def test_cleaner_users_by_service(self, mock_del_cinder, mock_del_glance,
|
def test_cleaner_users_by_service(self, mock_del_cinder, mock_del_glance,
|
||||||
mock_del_nova, mock_clients):
|
mock_del_nova, mock_clients):
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
"task": mock.MagicMock(),
|
"task": mock.MagicMock(),
|
||||||
"users": [{"endpoint": mock.MagicMock()},
|
"users": [{"endpoint": mock.MagicMock()},
|
||||||
{"endpoint": mock.MagicMock()}],
|
{"endpoint": mock.MagicMock()}],
|
||||||
"scenario_name": 'NovaServers.boot_server_from_volume_and_delete',
|
"config": {"cleanup": ["cinder", "nova"]},
|
||||||
"tenants": [mock.MagicMock()]
|
"tenants": [mock.MagicMock()]
|
||||||
}
|
}
|
||||||
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
res_cleaner = cleaner_ctx.ResourceCleaner(context)
|
||||||
|
@ -26,15 +26,16 @@ class KeyPairContextTestCase(test.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(KeyPairContextTestCase, self).setUp()
|
super(KeyPairContextTestCase, self).setUp()
|
||||||
self.users = 2
|
self.users = 2
|
||||||
|
task = mock.MagicMock()
|
||||||
self.ctx_with_keys = {
|
self.ctx_with_keys = {
|
||||||
"users": [
|
"users": [
|
||||||
{"keypair": "key", "endpoint": "endpoint"},
|
{"keypair": "key", "endpoint": "endpoint"},
|
||||||
] * self.users,
|
] * self.users,
|
||||||
"task": {}
|
"task": task
|
||||||
}
|
}
|
||||||
self.ctx_without_keys = {
|
self.ctx_without_keys = {
|
||||||
"users": [{'endpoint': 'endpoint'}] * self.users,
|
"users": [{'endpoint': 'endpoint'}] * self.users,
|
||||||
"task": {}
|
"task": task
|
||||||
}
|
}
|
||||||
|
|
||||||
@mock.patch("%s.keypair.Keypair._generate_keypair" % CTX)
|
@mock.patch("%s.keypair.Keypair._generate_keypair" % CTX)
|
||||||
|
@ -97,7 +97,7 @@ class QuotasTestCase(test.TestCase):
|
|||||||
{"endpoint": mock.MagicMock(), "id": mock.MagicMock()}
|
{"endpoint": mock.MagicMock(), "id": mock.MagicMock()}
|
||||||
],
|
],
|
||||||
"admin": {"endpoint": mock.MagicMock()},
|
"admin": {"endpoint": mock.MagicMock()},
|
||||||
"task": {}
|
"task": mock.MagicMock()
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_quotas_schemas(self):
|
def test_quotas_schemas(self):
|
||||||
@ -128,7 +128,7 @@ class QuotasTestCase(test.TestCase):
|
|||||||
# Test invalid values
|
# Test invalid values
|
||||||
ctx["config"]["quotas"][service][key] = self.unlimited - 1
|
ctx["config"]["quotas"][service][key] = self.unlimited - 1
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -137,7 +137,7 @@ class QuotasTestCase(test.TestCase):
|
|||||||
|
|
||||||
ctx["config"]["quotas"][service][key] = 2.5
|
ctx["config"]["quotas"][service][key] = 2.5
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -146,7 +146,7 @@ class QuotasTestCase(test.TestCase):
|
|||||||
|
|
||||||
ctx["config"]["quotas"][service][key] = "-1"
|
ctx["config"]["quotas"][service][key] = "-1"
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -157,20 +157,20 @@ class QuotasTestCase(test.TestCase):
|
|||||||
ctx["config"]["quotas"][service][key] = \
|
ctx["config"]["quotas"][service][key] = \
|
||||||
random.randint(0, 1000000)
|
random.randint(0, 1000000)
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
self.fail("Positive integers are valid quota values")
|
self.fail("Positive integers are valid quota values")
|
||||||
|
|
||||||
ctx["config"]["quotas"][service][key] = self.unlimited
|
ctx["config"]["quotas"][service][key] = self.unlimited
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
self.fail("%d is a valid quota value" % self.unlimited)
|
self.fail("%d is a valid quota value" % self.unlimited)
|
||||||
|
|
||||||
# Test additional keys are refused
|
# Test additional keys are refused
|
||||||
ctx["config"]["quotas"][service]["additional"] = self.unlimited
|
ctx["config"]["quotas"][service]["additional"] = self.unlimited
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -180,7 +180,7 @@ class QuotasTestCase(test.TestCase):
|
|||||||
# Test valid keys are optional
|
# Test valid keys are optional
|
||||||
ctx["config"]["quotas"][service] = {}
|
ctx["config"]["quotas"][service] = {}
|
||||||
try:
|
try:
|
||||||
quotas.Quotas.validate(ctx["config"])
|
quotas.Quotas.validate(ctx["config"]["quotas"])
|
||||||
except jsonschema.ValidationError:
|
except jsonschema.ValidationError:
|
||||||
self.fail("Valid quota keys are optional")
|
self.fail("Valid quota keys are optional")
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import mock
|
|||||||
|
|
||||||
from rally.benchmark.runners import base
|
from rally.benchmark.runners import base
|
||||||
from rally.benchmark.runners import continuous
|
from rally.benchmark.runners import continuous
|
||||||
|
from rally.benchmark.scenarios import base as base_scenario
|
||||||
from rally import consts
|
from rally import consts
|
||||||
from rally import exceptions
|
from rally import exceptions
|
||||||
from tests import fakes
|
from tests import fakes
|
||||||
@ -180,17 +181,19 @@ class ScenarioRunnerTestCase(test.TestCase):
|
|||||||
|
|
||||||
task = mock.MagicMock()
|
task = mock.MagicMock()
|
||||||
endpoints = [mock.MagicMock(), mock.MagicMock()]
|
endpoints = [mock.MagicMock(), mock.MagicMock()]
|
||||||
runner = base.ScenarioRunner.get_runner(task, endpoints, "new_runner")
|
config = {"type": "new_runner", "a": 123}
|
||||||
|
runner = base.ScenarioRunner.get_runner(task, endpoints, config)
|
||||||
|
|
||||||
self.assertEqual(runner.task, task)
|
self.assertEqual(runner.task, task)
|
||||||
self.assertEqual(runner.endpoints, endpoints)
|
self.assertEqual(runner.endpoints, endpoints)
|
||||||
self.assertEqual(runner.admin_user, endpoints[0])
|
self.assertEqual(runner.admin_user, endpoints[0])
|
||||||
|
self.assertEqual(runner.config, config)
|
||||||
self.assertIsInstance(runner, NewRunner)
|
self.assertIsInstance(runner, NewRunner)
|
||||||
|
|
||||||
def test_get_runner_no_such(self):
|
def test_get_runner_no_such(self):
|
||||||
self.assertRaises(exceptions.NoSuchRunner,
|
self.assertRaises(exceptions.NoSuchRunner,
|
||||||
base.ScenarioRunner.get_runner,
|
base.ScenarioRunner.get_runner,
|
||||||
None, None, "NoSuchRunner")
|
None, None, {"type": "NoSuchRunner"})
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.runners.base.jsonschema.validate")
|
@mock.patch("rally.benchmark.runners.base.jsonschema.validate")
|
||||||
def test_validate_default_runner(self, mock_validate):
|
def test_validate_default_runner(self, mock_validate):
|
||||||
@ -200,10 +203,39 @@ class ScenarioRunnerTestCase(test.TestCase):
|
|||||||
config,
|
config,
|
||||||
continuous.ContinuousScenarioRunner.CONFIG_SCHEMA)
|
continuous.ContinuousScenarioRunner.CONFIG_SCHEMA)
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.runners.base.ScenarioRunner._run_as_admin")
|
@mock.patch("rally.benchmark.runners.base.base_ctx.ContextManager")
|
||||||
def test_run_scenario_runner_results_exception(self, mock_run_method):
|
def test_run(self, mock_ctx_manager):
|
||||||
runner = continuous.ContinuousScenarioRunner(mock.MagicMock(),
|
runner = continuous.ContinuousScenarioRunner(mock.MagicMock(),
|
||||||
self.fake_endpoints)
|
self.fake_endpoints,
|
||||||
|
mock.MagicMock())
|
||||||
|
mock_ctx_manager.run.return_value = base.ScenarioRunnerResult([])
|
||||||
|
scenario_name = "NovaServers.boot_server_from_volume_and_delete"
|
||||||
|
result = runner.run(scenario_name, {"some_ctx": 2}, [1, 2, 3])
|
||||||
|
|
||||||
|
self.assertEqual(result, mock_ctx_manager.run.return_value)
|
||||||
|
|
||||||
|
cls_name, method_name = scenario_name.split(".", 1)
|
||||||
|
cls = base_scenario.Scenario.get_by_name(cls_name)
|
||||||
|
|
||||||
|
context_obj = {
|
||||||
|
"task": runner.task,
|
||||||
|
"admin": {"endpoint": runner.admin_user},
|
||||||
|
"scenario_name": scenario_name,
|
||||||
|
"config": {
|
||||||
|
"cleanup": ["nova", "cinder"], "some_ctx": 2, "users": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
expected = [context_obj, runner._run_scenario, cls, method_name,
|
||||||
|
context_obj, [1, 2, 3]]
|
||||||
|
mock_ctx_manager.run.assert_called_once_with(*expected)
|
||||||
|
|
||||||
|
@mock.patch("rally.benchmark.runners.base.base_ctx.ContextManager")
|
||||||
|
def test_run__scenario_runner_results_exception(self, mock_ctx_manager):
|
||||||
|
runner = continuous.ContinuousScenarioRunner(mock.MagicMock(),
|
||||||
|
self.fake_endpoints,
|
||||||
|
mock.MagicMock())
|
||||||
self.assertRaises(exceptions.InvalidRunnerResult,
|
self.assertRaises(exceptions.InvalidRunnerResult,
|
||||||
runner.run, "NovaServers.boot_server_from_volume_"
|
runner.run,
|
||||||
"and_delete", mock.MagicMock())
|
"NovaServers.boot_server_from_volume_and_delete",
|
||||||
|
mock.MagicMock(), {})
|
||||||
|
@ -41,8 +41,9 @@ class ContinuousScenarioRunnerTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_run_scenario_continuously_for_times(self):
|
def test_run_scenario_continuously_for_times(self):
|
||||||
context = fakes.FakeUserContext({"task": None}).context
|
context = fakes.FakeUserContext({"task": None}).context
|
||||||
|
|
||||||
runner = continuous.ContinuousScenarioRunner(
|
runner = continuous.ContinuousScenarioRunner(
|
||||||
None, [context["admin"]["endpoint"]])
|
None, [context["admin"]["endpoint"]], {})
|
||||||
times = 4
|
times = 4
|
||||||
concurrent = 2
|
concurrent = 2
|
||||||
timeout = 2
|
timeout = 2
|
||||||
@ -56,7 +57,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase):
|
|||||||
def test_run_scenario_continuously_for_times_exception(self):
|
def test_run_scenario_continuously_for_times_exception(self):
|
||||||
context = fakes.FakeUserContext({"task": None}).context
|
context = fakes.FakeUserContext({"task": None}).context
|
||||||
runner = continuous.ContinuousScenarioRunner(
|
runner = continuous.ContinuousScenarioRunner(
|
||||||
None, [context["admin"]["endpoint"]])
|
None, [context["admin"]["endpoint"]], {})
|
||||||
times = 4
|
times = 4
|
||||||
concurrent = 2
|
concurrent = 2
|
||||||
timeout = 2
|
timeout = 2
|
||||||
@ -72,7 +73,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase):
|
|||||||
self.skipTest("This test produce a lot of races so we should fix it "
|
self.skipTest("This test produce a lot of races so we should fix it "
|
||||||
"before running inside in gates")
|
"before running inside in gates")
|
||||||
runner = continuous.ContinuousScenarioRunner(mock.MagicMock(),
|
runner = continuous.ContinuousScenarioRunner(mock.MagicMock(),
|
||||||
[mock.MagicMock()])
|
[mock.MagicMock()], {})
|
||||||
duration = 0
|
duration = 0
|
||||||
active_users = 4
|
active_users = 4
|
||||||
timeout = 5
|
timeout = 5
|
||||||
|
@ -48,29 +48,24 @@ class PeriodicScenarioRunnerTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_run_scenario(self):
|
def test_run_scenario(self):
|
||||||
context = fakes.FakeUserContext({}).context
|
context = fakes.FakeUserContext({}).context
|
||||||
|
config = {"times": 3, "period": 0, "timeout": 5}
|
||||||
runner = periodic.PeriodicScenarioRunner(
|
runner = periodic.PeriodicScenarioRunner(
|
||||||
None, [context["admin"]["endpoint"]])
|
None, [context["admin"]["endpoint"]], config)
|
||||||
times = 3
|
|
||||||
period = 0
|
|
||||||
|
|
||||||
result = runner._run_scenario(fakes.FakeScenario, "do_it", context, {},
|
result = runner._run_scenario(fakes.FakeScenario, "do_it", context, {})
|
||||||
{"times": times, "period": period,
|
self.assertEqual(len(result), config["times"])
|
||||||
"timeout": 5})
|
|
||||||
self.assertEqual(len(result), times)
|
|
||||||
self.assertIsNotNone(base.ScenarioRunnerResult(result))
|
self.assertIsNotNone(base.ScenarioRunnerResult(result))
|
||||||
|
|
||||||
def test_run_scenario_exception(self):
|
def test_run_scenario_exception(self):
|
||||||
context = fakes.FakeUserContext({}).context
|
context = fakes.FakeUserContext({}).context
|
||||||
|
|
||||||
|
config = {"times": 4, "period": 0}
|
||||||
runner = periodic.PeriodicScenarioRunner(
|
runner = periodic.PeriodicScenarioRunner(
|
||||||
None, [context["admin"]["endpoint"]])
|
None, [context["admin"]["endpoint"]], config)
|
||||||
times = 4
|
|
||||||
period = 0
|
|
||||||
|
|
||||||
result = runner._run_scenario(fakes.FakeScenario,
|
result = runner._run_scenario(fakes.FakeScenario,
|
||||||
"something_went_wrong", context, {},
|
"something_went_wrong", context, {})
|
||||||
{"times": times, "period": period,
|
self.assertEqual(len(result), config["times"])
|
||||||
"timeout": 5})
|
|
||||||
self.assertEqual(len(result), times)
|
|
||||||
self.assertIsNotNone(base.ScenarioRunnerResult(result))
|
self.assertIsNotNone(base.ScenarioRunnerResult(result))
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.runners.periodic.base.ScenarioRunnerResult")
|
@mock.patch("rally.benchmark.runners.periodic.base.ScenarioRunnerResult")
|
||||||
@ -79,19 +74,17 @@ class PeriodicScenarioRunnerTestCase(test.TestCase):
|
|||||||
def test_run_scenario_internal_logic(self, mock_time, mock_pool,
|
def test_run_scenario_internal_logic(self, mock_time, mock_pool,
|
||||||
mock_result):
|
mock_result):
|
||||||
context = fakes.FakeUserContext({}).context
|
context = fakes.FakeUserContext({}).context
|
||||||
|
config = {"times": 4, "period": 0, "timeout": 5}
|
||||||
runner = periodic.PeriodicScenarioRunner(
|
runner = periodic.PeriodicScenarioRunner(
|
||||||
None, [context["admin"]["endpoint"]])
|
None, [context["admin"]["endpoint"]], config)
|
||||||
times = 4
|
|
||||||
period = 0
|
|
||||||
|
|
||||||
mock_pool_inst = mock.MagicMock()
|
mock_pool_inst = mock.MagicMock()
|
||||||
mock_pool.ThreadPool.return_value = mock_pool_inst
|
mock_pool.ThreadPool.return_value = mock_pool_inst
|
||||||
|
|
||||||
runner._run_scenario(fakes.FakeScenario, "do_it", context, {},
|
runner._run_scenario(fakes.FakeScenario, "do_it", context, {})
|
||||||
{"times": times, "period": period, "timeout": 5})
|
|
||||||
|
|
||||||
exptected_pool_inst_call = []
|
exptected_pool_inst_call = []
|
||||||
for i in range(times):
|
for i in range(config["times"]):
|
||||||
args = (
|
args = (
|
||||||
base._run_scenario_once,
|
base._run_scenario_once,
|
||||||
((i, fakes.FakeScenario, "do_it",
|
((i, fakes.FakeScenario, "do_it",
|
||||||
@ -99,7 +92,7 @@ class PeriodicScenarioRunnerTestCase(test.TestCase):
|
|||||||
)
|
)
|
||||||
exptected_pool_inst_call.append(mock.call.apply_async(*args))
|
exptected_pool_inst_call.append(mock.call.apply_async(*args))
|
||||||
|
|
||||||
for i in range(times):
|
for i in range(config["times"]):
|
||||||
call = mock.call.apply_async().get(timeout=5)
|
call = mock.call.apply_async().get(timeout=5)
|
||||||
exptected_pool_inst_call.append(call)
|
exptected_pool_inst_call.append(call)
|
||||||
|
|
||||||
@ -117,5 +110,5 @@ class PeriodicScenarioRunnerTestCase(test.TestCase):
|
|||||||
|
|
||||||
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
runner = base.ScenarioRunner.get_runner(mock.MagicMock(),
|
||||||
self.fake_endpoints,
|
self.fake_endpoints,
|
||||||
"periodic")
|
{"type": "periodic"})
|
||||||
self.assertTrue(runner is not None)
|
self.assertTrue(runner is not None)
|
||||||
|
@ -40,9 +40,9 @@ class SerialScenarioRunnerTestCase(test.TestCase):
|
|||||||
expected_results = [result for i in range(times)]
|
expected_results = [result for i in range(times)]
|
||||||
|
|
||||||
runner = serial.SerialScenarioRunner(mock.MagicMock(),
|
runner = serial.SerialScenarioRunner(mock.MagicMock(),
|
||||||
self.fake_endpoints)
|
self.fake_endpoints,
|
||||||
|
{"times": times})
|
||||||
results = runner._run_scenario(fakes.FakeScenario, "do_it",
|
results = runner._run_scenario(fakes.FakeScenario, "do_it",
|
||||||
fakes.FakeUserContext({}).context,
|
fakes.FakeUserContext({}).context, {})
|
||||||
{}, {"type": "serial", "times": times})
|
|
||||||
self.assertEqual(mock_run_once.call_count, times)
|
self.assertEqual(mock_run_once.call_count, times)
|
||||||
self.assertEqual(results, expected_results)
|
self.assertEqual(results, expected_results)
|
||||||
|
@ -80,7 +80,7 @@ class NovaScenarioTestCase(test.TestCase):
|
|||||||
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
|
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
|
||||||
def test__boot_server(self, mock_clients):
|
def test__boot_server(self, mock_clients):
|
||||||
mock_clients("nova").servers.create.return_value = self.server
|
mock_clients("nova").servers.create.return_value = self.server
|
||||||
nova_scenario = utils.NovaScenario()
|
nova_scenario = utils.NovaScenario(context={})
|
||||||
return_server = nova_scenario._boot_server('server_name', 'image_id',
|
return_server = nova_scenario._boot_server('server_name', 'image_id',
|
||||||
'flavor_id')
|
'flavor_id')
|
||||||
self.wait_for.mock.assert_called_once_with(
|
self.wait_for.mock.assert_called_once_with(
|
||||||
|
@ -14,7 +14,9 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from rally.benchmark.context import base as base_ctx
|
||||||
from rally.benchmark.scenarios import base
|
from rally.benchmark.scenarios import base
|
||||||
from rally.benchmark import validation
|
from rally.benchmark import validation
|
||||||
from rally import consts
|
from rally import consts
|
||||||
@ -173,3 +175,17 @@ class ScenarioTestCase(test.TestCase):
|
|||||||
scenario = base.Scenario(admin_clients=clients)
|
scenario = base.Scenario(admin_clients=clients)
|
||||||
self.assertEqual(clients.nova(), scenario.admin_clients("nova"))
|
self.assertEqual(clients.nova(), scenario.admin_clients("nova"))
|
||||||
self.assertEqual(clients.glance(), scenario.admin_clients("glance"))
|
self.assertEqual(clients.glance(), scenario.admin_clients("glance"))
|
||||||
|
|
||||||
|
def test_scenario_context_are_valid(self):
|
||||||
|
scenarios = base.Scenario.list_benchmark_scenarios()
|
||||||
|
|
||||||
|
for scenario in scenarios:
|
||||||
|
cls_name, method_name = scenario.split(".", 1)
|
||||||
|
cls = base.Scenario.get_by_name(cls_name)
|
||||||
|
context = getattr(cls, method_name).context
|
||||||
|
try:
|
||||||
|
base_ctx.ContextManager.validate(context)
|
||||||
|
except Exception:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
self.assertTrue(False,
|
||||||
|
"Scenario `%s` has wrong context" % scenario)
|
||||||
|
@ -121,16 +121,17 @@ class BenchmarkEngineTestCase(test.TestCase):
|
|||||||
eng._validate_config_scenarios_name, config)
|
eng._validate_config_scenarios_name, config)
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
||||||
@mock.patch("rally.benchmark.engine.base_ctx.Context.validate")
|
@mock.patch("rally.benchmark.engine.base_ctx.ContextManager.validate")
|
||||||
def test__validate_config_syntax(self, mock_context, mock_runner):
|
def test__validate_config_syntax(self, mock_context, mock_runner):
|
||||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||||
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
|
||||||
eng._validate_config_syntax(config)
|
eng._validate_config_syntax(config)
|
||||||
mock_runner.assert_has_calls([mock.call({}), mock.call("b")])
|
mock_runner.assert_has_calls([mock.call({}), mock.call("b")])
|
||||||
mock_context.assert_has_calls([mock.call("a"), mock.call({})])
|
mock_context.assert_has_calls([mock.call("a", non_hidden=True),
|
||||||
|
mock.call({}, non_hidden=True)])
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner")
|
||||||
@mock.patch("rally.benchmark.engine.base_ctx.Context.validate")
|
@mock.patch("rally.benchmark.engine.base_ctx.ContextManager.validate")
|
||||||
def test__validate_config_syntax__wrong_runner(self, mock_context,
|
def test__validate_config_syntax__wrong_runner(self, mock_context,
|
||||||
mock_runner):
|
mock_runner):
|
||||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||||
@ -142,7 +143,7 @@ class BenchmarkEngineTestCase(test.TestCase):
|
|||||||
eng._validate_config_syntax, config)
|
eng._validate_config_syntax, config)
|
||||||
|
|
||||||
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
@mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner.validate")
|
||||||
@mock.patch("rally.benchmark.engine.base_ctx.Context")
|
@mock.patch("rally.benchmark.engine.base_ctx.ContextManager")
|
||||||
def test__validate_config_syntax__wrong_context(self, mock_context,
|
def test__validate_config_syntax__wrong_context(self, mock_context,
|
||||||
mock_runner):
|
mock_runner):
|
||||||
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user