diff --git a/drydock_provisioner/drivers/driver.py b/drydock_provisioner/drivers/driver.py index 15957478..5cc3dbbc 100644 --- a/drydock_provisioner/drivers/driver.py +++ b/drydock_provisioner/drivers/driver.py @@ -17,9 +17,10 @@ from threading import Thread import time import drydock_provisioner.objects.fields as hd_fields -import drydock_provisioner.statemgmt as statemgmt import drydock_provisioner.error as errors +from drydock_provisioner.orchestrator.actions.orchestrator import Noop + class ProviderDriver(object): """Generic driver for executing driver actions.""" @@ -47,8 +48,9 @@ class ProviderDriver(object): task_action = task.action if task_action in self.supported_actions: - task_runner = DriverActionRunner(task_id, self.state_manager, - self.orchestrator) + # Just use the Noop action + task_action = Noop(task, self.orchestrator, self.state_manager) + task_runner = DriverActionRunner(task_action) task_runner.start() while task_runner.is_alive(): @@ -62,17 +64,12 @@ class ProviderDriver(object): # Execute a single task in a separate thread class DriverActionRunner(Thread): - def __init__(self, action=None, state_manager=None, orchestrator=None): + def __init__(self, action=None): super().__init__() - self.orchestrator = orchestrator - - if isinstance(state_manager, statemgmt.DesignState): - self.state_manager = state_manager - else: - raise errors.DriverError("Invalid state manager specified") - self.action = action + self.orchestrator = action.orchestrator + self.state_manager = action.state_manager def run(self): self.run_action() diff --git a/drydock_provisioner/drivers/oob/driver.py b/drydock_provisioner/drivers/oob/driver.py index 78c28596..4f9e611e 100644 --- a/drydock_provisioner/drivers/oob/driver.py +++ b/drydock_provisioner/drivers/oob/driver.py @@ -21,7 +21,7 @@ from drydock_provisioner.drivers.driver import ProviderDriver class OobDriver(ProviderDriver): """Genneric driver for OOB actions.""" - oob_types_supported = [''] + oob_types_supported = ['ipmi'] def __init__(self, **kwargs): super(OobDriver, self).__init__(**kwargs) diff --git a/drydock_provisioner/objects/task.py b/drydock_provisioner/objects/task.py index 1d07a8d9..ebfb2408 100644 --- a/drydock_provisioner/objects/task.py +++ b/drydock_provisioner/objects/task.py @@ -380,7 +380,8 @@ class Task(object): 'request_context': json.dumps(self.request_context.to_dict()) if self.request_context is not None else None, - 'node_filter': self.node_filter, + 'node_filter': + self.node_filter, 'action': self.action, 'terminated': diff --git a/drydock_provisioner/orchestrator/actions/orchestrator.py b/drydock_provisioner/orchestrator/actions/orchestrator.py index a7ba4f4a..ba0d12fa 100644 --- a/drydock_provisioner/orchestrator/actions/orchestrator.py +++ b/drydock_provisioner/orchestrator/actions/orchestrator.py @@ -86,7 +86,8 @@ class BaseAction(object): self.task.register_subtask(split_task) action = self.__class__(split_task, self.orchestrator, self.state_manager) - split_tasks[split_task.get_id().bytes] = te.submit(action.start) + split_tasks[split_task.get_id().bytes] = te.submit( + action.start) return split_tasks @@ -185,7 +186,9 @@ class Noop(BaseAction): else: self.logger.debug("Marked task as successful.") self.task.set_status(hd_fields.TaskStatus.Complete) - self.task.success() + target_nodes = self.orchestrator.get_target_nodes(self.task) + for n in target_nodes: + self.task.success(focus=n.name) self.task.add_status_msg( msg="Noop action.", ctx_type='NA', ctx='NA', error=False) self.task.save() diff --git a/drydock_provisioner/orchestrator/orchestrator.py b/drydock_provisioner/orchestrator/orchestrator.py index 153cb01c..a1227d7b 100644 --- a/drydock_provisioner/orchestrator/orchestrator.py +++ b/drydock_provisioner/orchestrator/orchestrator.py @@ -302,7 +302,6 @@ class Orchestrator(object): def get_target_nodes(self, task, failures=False, successes=False): """Compute list of target nodes for given ``task``. - If failures is true, then create a node_filter based on task result failures. If successes is true, then create a node_filter based on task result successes. If both are true, raise an exception. If neither diff --git a/tests/integration/postgres/test_action_prepare_nodes.py b/tests/integration/postgres/test_action_prepare_nodes.py new file mode 100644 index 00000000..de93db60 --- /dev/null +++ b/tests/integration/postgres/test_action_prepare_nodes.py @@ -0,0 +1,57 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Generic testing for the orchestrator.""" +import drydock_provisioner.orchestrator.orchestrator as orch +import drydock_provisioner.objects.fields as hd_fields + +from drydock_provisioner.orchestrator.actions.orchestrator import PrepareNodes + + +class TestActionPrepareNodes(object): + def test_preparenodes(self, input_files, deckhand_ingester, setup, + drydock_state): + input_file = input_files.join("deckhand_fullsite.yaml") + + design_ref = "file://%s" % str(input_file) + + # Build a dummy object that looks like an oslo_config object + # so the orchestrator is configured w/ Noop drivers + class DummyConf(object): + oob_driver = ['drydock_provisioner.drivers.oob.driver.OobDriver'] + node_driver = 'drydock_provisioner.drivers.node.driver.NodeDriver' + network_driver = None + + orchestrator = orch.Orchestrator( + enabled_drivers=DummyConf(), + state_manager=drydock_state, + ingester=deckhand_ingester) + + task = orchestrator.create_task( + design_ref=design_ref, + action=hd_fields.OrchestratorAction.PrepareNodes) + + action = PrepareNodes(task, orchestrator, drydock_state) + action.start() + + task = drydock_state.get_task(task.get_id()) + + assert task.result.status == hd_fields.ActionResult.Success + + # check that the PrepareNodes action was split + # with 2 nodes in the definition + assert len(task.subtask_id_list) == 2 + + for st_id in task.subtask_id_list: + st = drydock_state.get_task(st_id) + assert st.action == hd_fields.OrchestratorAction.PrepareNodes diff --git a/tests/integration/postgres/test_action_prepare_site.py b/tests/integration/postgres/test_action_prepare_site.py index 47fe0254..8a36e9d4 100644 --- a/tests/integration/postgres/test_action_prepare_site.py +++ b/tests/integration/postgres/test_action_prepare_site.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """Generic testing for the orchestrator.""" -import pytest - import drydock_provisioner.orchestrator.orchestrator as orch import drydock_provisioner.objects.fields as hd_fields @@ -21,7 +19,6 @@ from drydock_provisioner.orchestrator.actions.orchestrator import PrepareSite class TestActionPrepareSite(object): - @pytest.mark.skip(reason="test failure fixed in next PS") def test_preparesite(self, input_files, deckhand_ingester, setup, drydock_state): input_file = input_files.join("deckhand_fullsite.yaml") diff --git a/tests/integration/postgres/test_orch_generic.py b/tests/integration/postgres/test_orch_generic.py index da12aefa..de4659ee 100644 --- a/tests/integration/postgres/test_orch_generic.py +++ b/tests/integration/postgres/test_orch_generic.py @@ -18,19 +18,17 @@ import time import drydock_provisioner.orchestrator.orchestrator as orch import drydock_provisioner.objects.fields as hd_fields -from drydock_provisioner.ingester.ingester import Ingester - class TestClass(object): - def test_task_complete(self, setup, blank_state): - ingester = Ingester() - ingester.enable_plugin( - 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + def test_task_complete(self, yaml_ingester, input_files, setup, blank_state): + input_file = input_files.join("fullsite.yaml") + design_ref = "file://%s" % str(input_file) orchestrator = orch.Orchestrator( - state_manager=blank_state, ingester=ingester) + state_manager=blank_state, ingester=yaml_ingester) orch_task = orchestrator.create_task( - action=hd_fields.OrchestratorAction.Noop) + action=hd_fields.OrchestratorAction.Noop, + design_ref=design_ref) orch_task.set_status(hd_fields.TaskStatus.Queued) orch_task.save() @@ -47,15 +45,15 @@ class TestClass(object): orchestrator.stop_orchestrator() orch_thread.join(10) - def test_task_termination(self, setup, blank_state): - ingester = Ingester() - ingester.enable_plugin( - 'drydock_provisioner.ingester.plugins.yaml.YamlIngester') + def test_task_termination(self, input_files, yaml_ingester, setup, blank_state): + input_file = input_files.join("fullsite.yaml") + design_ref = "file://%s" % str(input_file) orchestrator = orch.Orchestrator( - state_manager=blank_state, ingester=ingester) + state_manager=blank_state, ingester=yaml_ingester) orch_task = orchestrator.create_task( - action=hd_fields.OrchestratorAction.Noop) + action=hd_fields.OrchestratorAction.Noop, + design_ref=design_ref) orch_task.set_status(hd_fields.TaskStatus.Queued) orch_task.save() diff --git a/tests/integration/postgres/test_postgres_tasks.py b/tests/integration/postgres/test_postgres_tasks.py index a228fef5..9b02856b 100644 --- a/tests/integration/postgres/test_postgres_tasks.py +++ b/tests/integration/postgres/test_postgres_tasks.py @@ -38,6 +38,33 @@ class TestPostgres(object): assert result + def task_task_node_filter(self, blank_state): + """Test that a persisted task persists node filter.""" + ctx = DrydockRequestContext() + ctx.user = 'sh8121' + ctx.external_marker = str(uuid.uuid4()) + + node_filter = { + 'filter_set_type': 'union', + 'filter_set': [{ + 'node_names': ['foo'], + 'filter_type': 'union' + }] + } + task = objects.Task( + action='deploy_node', + node_filter=node_filter, + design_ref='http://foo.bar/design', + context=ctx) + + result = blank_state.post_task(task) + + assert result + + saved_task = blank_state.get_task(task.get_id()) + + assert saved_task.node_filter == node_filter + def test_subtask_append(self, blank_state): """Test that the atomic subtask append method works.""" diff --git a/tests/unit/test_task_node_filter.py b/tests/unit/test_task_node_filter.py new file mode 100644 index 00000000..7d8f666d --- /dev/null +++ b/tests/unit/test_task_node_filter.py @@ -0,0 +1,81 @@ +# Copyright 2017 AT&T Intellectual Property. All other rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test the node filter logic in the orchestrator.""" + +import drydock_provisioner.objects as objects +import drydock_provisioner.objects.fields as hd_fields + + +class TestTaskFilterGeneration(object): + def test_task_success_focus(self, setup): + """Test that marking a task successful works correctly.""" + task = objects.Task( + action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") + + task.success(focus='foo') + + assert task.result.status == hd_fields.ActionResult.Success + assert 'foo' in task.result.successes + + def test_task_failure_focus(self, setup): + """Test that marking a task failed works correctly.""" + task = objects.Task( + action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") + + task.failure(focus='foo') + + assert task.result.status == hd_fields.ActionResult.Failure + assert 'foo' in task.result.failures + + def test_task_success_nf(self, setup): + """Test that a task can generate a node filter based on its success.""" + task = objects.Task( + action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") + + expected_nf = { + 'filter_set_type': 'intersection', + 'filter_set': [{ + 'node_names': ['foo'], + 'filter_type': 'union', + }] + } + + task.success(focus='foo') + + nf = task.node_filter_from_successes() + + assert nf == expected_nf + + def test_task_failure_nf(self, setup): + """Test that a task can generate a node filter based on its failure.""" + task = objects.Task( + action=hd_fields.OrchestratorAction.Noop, + design_ref="http://foo.com") + + expected_nf = { + 'filter_set_type': 'intersection', + 'filter_set': [{ + 'node_names': ['foo'], + 'filter_type': 'union', + }] + } + + task.failure(focus='foo') + + nf = task.node_filter_from_failures() + + assert nf == expected_nf