diff --git a/rally/benchmark/engine.py b/rally/benchmark/engine.py index 269b90ef11..74fb7e54d0 100644 --- a/rally/benchmark/engine.py +++ b/rally/benchmark/engine.py @@ -74,29 +74,42 @@ CONFIG_SCHEMA = { class BenchmarkEngine(object): """The Benchmark engine class is used to execute benchmark scenarios. - An instance of class is initialized by the Orchestrator with the benchmarks - configuration and then is used to execute all specified scenarios. + An instance of this class is initialized by the API with the benchmarks + configuration and then is used to validate and execute all specified + in config benchmarks. + .. note:: Typical usage: ... - benchmark_engine = BenchmarkEngine(config, task) - # Deploying the cloud... - # admin - is an objects.Endpoint that actually presents admin user - # users - is a list of objects.Endpoint that actually presents list - of users. - with benchmark_engine.bind(admin=admin, users=users): - benchmark_engine.run() + admin = .... # contains dict representations of objects.Endpoint + # with OpenStack admin credentials + + users = .... # contains a list of dicts of representations of + # objects.Endpoint with OpenStack users credentials. + + engine = BenchmarkEngine(config, task, admin=admin, users=users) + engine.validate() # to test config + engine.run() # to run config """ - def __init__(self, config, task): + def __init__(self, config, task, admin=None, users=None): """BenchmarkEngine constructor. :param config: The configuration with specified benchmark scenarios :param task: The current task which is being performed + :param admin: Dict with admin credentials + :param users: List of dicts with user credentials """ self.config = config self.task = task + self.admin = admin and endpoint.Endpoint(**admin) or None + self.users = map(lambda u: endpoint.Endpoint(**u), users or []) + + @rutils.log_task_wrapper(LOG.info, _("Task validation check cloud.")) + def _check_cloud(self): + clients = osclients.Clients(self.admin) + clients.verified_keystone() @rutils.log_task_wrapper(LOG.info, _("Task validation of scenarios names.")) @@ -141,15 +154,14 @@ class BenchmarkEngine(object): @rutils.log_task_wrapper(LOG.info, _("Task validation of semantic.")) def _validate_config_semantic(self, config): + self._check_cloud() + # NOTE(boris-42): In future we will have more complex context, because # we will have pre-created users mode as well. - context = { - "task": self.task, - "admin": {"endpoint": self.admin_endpoint} - } + context = {"task": self.task, "admin": {"endpoint": self.admin}} with users_ctx.UserGenerator(context) as ctx: ctx.setup() - admin = osclients.Clients(self.admin_endpoint) + admin = osclients.Clients(self.admin) user = osclients.Clients(context["users"][0]["endpoint"]) for name, values in config.iteritems(): @@ -214,7 +226,11 @@ class BenchmarkEngine(object): consumer.start() context_obj = self._prepare_context(kw.get("context", {}), - name, self.admin_endpoint) + name, self.admin) + + # NOTE(boris-42): reset duration, in case of failures during + # context creation + self.duration = 0 try: with base_ctx.ContextManager(context_obj): self.duration = runner.run(name, context_obj, @@ -224,22 +240,6 @@ class BenchmarkEngine(object): consumer.join() self.task.update_status(consts.TaskStatus.FINISHED) - @rutils.log_task_wrapper(LOG.info, _("Check cloud.")) - def bind(self, admin=None, users=None): - """Bind benchmark engine to OpenStack cloud. - - This method will set self.admin_endpoint with passed values, - as well it will check that admin user is actually admin. - - :param admin: admin credentials - :param users: List of users credentials - :returns: self - """ - self.admin_endpoint = endpoint.Endpoint(**admin) - clients = osclients.Clients(self.admin_endpoint) - clients.verified_keystone() - return self - def consume_results(self, key, task, result_queue, is_done): """Consume scenario runner results from queue and send them to db. diff --git a/rally/orchestrator/api.py b/rally/orchestrator/api.py index 3e1ca936b7..f882eb38e7 100644 --- a/rally/orchestrator/api.py +++ b/rally/orchestrator/api.py @@ -71,12 +71,12 @@ def destroy_deploy(deployment): deployment = objects.Deployment.get(deployment) deployer = deploy.EngineFactory.get_engine(deployment['config']['type'], deployment) + + tempest.Tempest(deployment['uuid']).uninstall() with deployer: deployer.make_cleanup() deployment.delete() - tempest.Tempest(deployment['uuid']).uninstall() - def recreate_deploy(deployment): """Performs a clean up and then start to deploy. @@ -114,9 +114,8 @@ def task_validate(deployment, config): """ deployment = objects.Deployment.get(deployment) task = objects.Task(deployment_uuid=deployment['uuid']) - benchmark_engine = engine.BenchmarkEngine(config, task) - benchmark_engine.bind(admin=deployment["admin"], - users=deployment["users"]) + benchmark_engine = engine.BenchmarkEngine( + config, task, admin=deployment["admin"], users=deployment["users"]) benchmark_engine.validate() @@ -133,12 +132,10 @@ def start_task(deployment, config, task=None): task = task or objects.Task(deployment_uuid=deployment['uuid']) LOG.info("Benchmark Task %s on Deployment %s" % (task['uuid'], deployment['uuid'])) - benchmark_engine = engine.BenchmarkEngine(config, task) - admin = deployment["admin"] - users = deployment["users"] + benchmark_engine = engine.BenchmarkEngine( + config, task, admin=deployment["admin"], users=deployment["users"]) try: - benchmark_engine.bind(admin=admin, users=users) benchmark_engine.validate() benchmark_engine.run() except exceptions.InvalidTaskException: diff --git a/tests/unit/benchmark/test_engine.py b/tests/unit/benchmark/test_engine.py index 7c6e6afbf3..f0ccc69830 100644 --- a/tests/unit/benchmark/test_engine.py +++ b/tests/unit/benchmark/test_engine.py @@ -206,7 +206,7 @@ class BenchmarkEngineTestCase(test.TestCase): fake_task = mock.MagicMock() eng = engine.BenchmarkEngine(config, fake_task) - eng.admin_endpoint = "admin" + eng.admin = "admin" eng._validate_config_semantic(config) @@ -229,10 +229,7 @@ class BenchmarkEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.setup") @mock.patch("rally.benchmark.engine.base_scenario.Scenario") @mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner") - @mock.patch("rally.benchmark.engine.osclients") - @mock.patch("rally.benchmark.engine.endpoint.Endpoint") - def test_run__update_status(self, mock_endpoint, mock_osclients, - mock_runner, mock_scenario, + def test_run__update_status(self, mock_runner, mock_scenario, mock_setup, mock_cleanup, mock_consume): task = mock.MagicMock() eng = engine.BenchmarkEngine([], task) @@ -247,17 +244,14 @@ class BenchmarkEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.cleanup") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.setup") - @mock.patch("rally.benchmark.engine.osclients") - @mock.patch("rally.benchmark.engine.endpoint.Endpoint") - def test_run__config_has_args(self, mock_endpoint, mock_osclients, - mock_setup, mock_cleanup, + def test_run__config_has_args(self, mock_setup, mock_cleanup, mock_runner, mock_scenario, mock_consume): config = { "a.benchmark": [{"args": {"a": "a", "b": 1}}], "b.benchmark": [{"args": {"a": 1}}] } task = mock.MagicMock() - eng = engine.BenchmarkEngine(config, task).bind({}) + eng = engine.BenchmarkEngine(config, task) eng.run() @mock.patch("rally.benchmark.engine.BenchmarkEngine.consume_results") @@ -265,17 +259,14 @@ class BenchmarkEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.cleanup") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.setup") - @mock.patch("rally.benchmark.engine.osclients") - @mock.patch("rally.benchmark.engine.endpoint.Endpoint") - def test_run__config_has_runner(self, mock_endpoint, mock_osclients, - mock_setup, mock_cleanup, + def test_run__config_has_runner(self, mock_setup, mock_cleanup, mock_runner, mock_scenario, mock_consume): config = { "a.benchmark": [{"runner": {"type": "a", "b": 1}}], "b.benchmark": [{"runner": {"a": 1}}] } task = mock.MagicMock() - eng = engine.BenchmarkEngine(config, task).bind({}) + eng = engine.BenchmarkEngine(config, task) eng.run() @mock.patch("rally.benchmark.engine.BenchmarkEngine.consume_results") @@ -283,42 +274,16 @@ class BenchmarkEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.engine.base_runner.ScenarioRunner") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.cleanup") @mock.patch("rally.benchmark.engine.base_ctx.ContextManager.setup") - @mock.patch("rally.benchmark.engine.osclients") - @mock.patch("rally.benchmark.engine.endpoint.Endpoint") - def test_run__config_has_context(self, mock_endpoint, mock_osclients, - mock_ctx_setup, mock_ctx_cleanup, + def test_run__config_has_context(self, mock_ctx_setup, mock_ctx_cleanup, mock_runner, mock_scenario, mock_consume): config = { "a.benchmark": [{"context": {"context_a": {"a": 1}}}], "b.benchmark": [{"context": {"context_b": {"b": 2}}}] } task = mock.MagicMock() - eng = engine.BenchmarkEngine(config, task).bind({}) + eng = engine.BenchmarkEngine(config, task) eng.run() - @mock.patch("rally.benchmark.engine.osclients") - @mock.patch("rally.benchmark.engine.endpoint.Endpoint") - def test_bind(self, mock_endpoint, mock_osclients): - mock_endpoint.return_value = mock.MagicMock() - benchmark_engine = engine.BenchmarkEngine(mock.MagicMock(), - mock.MagicMock()) - admin = { - "auth_url": "http://valid.com", - "username": "user", - "password": "pwd", - "tenant_name": "tenant" - } - - binded_benchmark_engine = benchmark_engine.bind(admin) - self.assertEqual(mock_endpoint.return_value, - benchmark_engine.admin_endpoint) - self.assertEqual(benchmark_engine, binded_benchmark_engine) - expected_calls = [ - mock.call.Clients(mock_endpoint.return_value), - mock.call.Clients().verified_keystone() - ] - mock_osclients.assert_has_calls(expected_calls) - @mock.patch("rally.benchmark.engine.base_scenario.Scenario.meta") def test__prepare_context(self, mock_meta): default_context = {"a": 1, "b": 2} diff --git a/tests/unit/orchestrator/test_api.py b/tests/unit/orchestrator/test_api.py index 7d0155ec2a..4683c4acd8 100644 --- a/tests/unit/orchestrator/test_api.py +++ b/tests/unit/orchestrator/test_api.py @@ -76,13 +76,13 @@ class APITestCase(test.TestCase): users=[])) @mock.patch("rally.orchestrator.api.engine.BenchmarkEngine") def test_task_validate(self, mock_engine, mock_deployment_get, mock_task): - api.task_validate(mock_deployment_get.return_value['uuid'], "config") + api.task_validate(mock_deployment_get.return_value["uuid"], "config") mock_engine.assert_has_calls([ - mock.call("config", mock_task.return_value), - mock.call().bind(admin=mock_deployment_get.return_value["admin"], - users=[]), - mock.call().validate(), + mock.call("config", mock_task.return_value, + admin=mock_deployment_get.return_value["admin"], + users=[]), + mock.call().validate() ]) mock_task.assert_called_once_with( @@ -110,9 +110,9 @@ class APITestCase(test.TestCase): api.start_task(mock_deployment_get.return_value["uuid"], "config") mock_engine.assert_has_calls([ - mock.call("config", mock_task.return_value), - mock.call().bind(admin=mock_deployment_get.return_value["admin"], - users=[]), + mock.call("config", mock_task.return_value, + admin=mock_deployment_get.return_value["admin"], + users=[]), mock.call().validate(), mock.call().run(), ])